ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a54353d8b2382efac9f42dc57e4d284334d9481 | # coding=utf-8
# Copyright 2018 Hao Tan, Mohit Bansal, and the HuggingFace team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch LXMERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
from ...activations import ACT2FN, gelu
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_lxmert import LxmertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
_CONFIG_FOR_DOC = "LxmertConfig"
_TOKENIZER_FOR_DOC = "LxmertTokenizer"
LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"unc-nlp/lxmert-base-uncased",
]
class GeLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return gelu(x)
@dataclass
class LxmertModelOutput(ModelOutput):
"""
Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
encoder")
Args:
language_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the language encoder.
vision_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the visual encoder.
pooled_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
by a Linear layer and a Tanh activation function. The Linear
language_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
language_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
vision_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
cross_encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
"""
language_output: Optional[torch.FloatTensor] = None
vision_output: Optional[torch.FloatTensor] = None
pooled_output: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
language_attentions: Optional[Tuple[torch.FloatTensor]] = None
vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LxmertForQuestionAnsweringOutput(ModelOutput):
"""
Output type of :class:`~transformers.LxmertForQuestionAnswering`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.k.
question_answering_score: (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, n_qa_answers)`, `optional`):
Prediction scores of question answering objective (classification).
language_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
language_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
vision_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
cross_encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
question_answering_score: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
language_attentions: Optional[Tuple[torch.FloatTensor]] = None
vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LxmertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.LxmertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
cross_relationship_score: (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the textual matching objective (classification) head (scores of True/False
continuation before SoftMax).
question_answering_score: (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, n_qa_answers)`):
Prediction scores of question answering objective (classification).
language_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for input features + one for the output of each cross-modality
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
language_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
vision_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
cross_encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
"""
loss: [torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
cross_relationship_score: Optional[torch.FloatTensor] = None
question_answering_score: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
language_attentions: Optional[Tuple[torch.FloatTensor]] = None
vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
def load_tf_weights_in_lxmert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n
in [
"adam_v",
"adam_m",
"AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1",
"global_step",
]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class LxmertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
else:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
seq_length = input_shape[1]
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LxmertAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.head_size = self.num_attention_heads * self.attention_head_size
# visual_dim = 2048
if ctx_dim is None:
ctx_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.head_size)
self.key = nn.Linear(ctx_dim, self.head_size)
self.value = nn.Linear(ctx_dim, self.head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LxmertAttentionOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LxmertCrossAttentionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.att = LxmertAttention(config)
self.output = LxmertAttentionOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False):
output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions)
if output_attentions:
attention_probs = output[1]
attention_output = self.output(output[0], input_tensor)
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
class LxmertSelfAttentionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LxmertAttention(config)
self.output = LxmertAttentionOutput(config)
def forward(self, input_tensor, attention_mask, output_attentions=False):
# Self attention attends to itself, thus keys and queries are the same (input_tensor).
output = self.self(
input_tensor,
input_tensor,
attention_mask,
output_attentions=output_attentions,
)
if output_attentions:
attention_probs = output[1]
attention_output = self.output(output[0], input_tensor)
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
class LxmertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class LxmertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LxmertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = LxmertSelfAttentionLayer(config)
self.intermediate = LxmertIntermediate(config)
self.output = LxmertOutput(config)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
attention_output = outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs[1:] # add attentions if we output them
return outputs
class LxmertXLayer(nn.Module):
def __init__(self, config):
super().__init__()
# The cross-attention Layer
self.visual_attention = LxmertCrossAttentionLayer(config)
# Self-attention Layers
self.lang_self_att = LxmertSelfAttentionLayer(config)
self.visn_self_att = LxmertSelfAttentionLayer(config)
# Intermediate and Output Layers (FFNs)
self.lang_inter = LxmertIntermediate(config)
self.lang_output = LxmertOutput(config)
self.visn_inter = LxmertIntermediate(config)
self.visn_output = LxmertOutput(config)
def cross_att(
self,
lang_input,
lang_attention_mask,
visual_input,
visual_attention_mask,
output_x_attentions=False,
):
# Cross Attention
lang_att_output = self.visual_attention(
lang_input,
visual_input,
ctx_att_mask=visual_attention_mask,
output_attentions=output_x_attentions,
)
visual_att_output = self.visual_attention(
visual_input,
lang_input,
ctx_att_mask=lang_attention_mask,
output_attentions=False,
)
return lang_att_output, visual_att_output
def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask):
# Self Attention
lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False)
visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False)
return lang_att_output[0], visual_att_output[0]
def output_fc(self, lang_input, visual_input):
# FC layers
lang_inter_output = self.lang_inter(lang_input)
visual_inter_output = self.visn_inter(visual_input)
# Layer output
lang_output = self.lang_output(lang_inter_output, lang_input)
visual_output = self.visn_output(visual_inter_output, visual_input)
return lang_output, visual_output
def forward(
self,
lang_feats,
lang_attention_mask,
visual_feats,
visual_attention_mask,
output_attentions=False,
):
lang_att_output, visual_att_output = self.cross_att(
lang_input=lang_feats,
lang_attention_mask=lang_attention_mask,
visual_input=visual_feats,
visual_attention_mask=visual_attention_mask,
output_x_attentions=output_attentions,
)
attention_probs = lang_att_output[1:]
lang_att_output, visual_att_output = self.self_att(
lang_att_output[0],
lang_attention_mask,
visual_att_output[0],
visual_attention_mask,
)
lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output)
return (
(
lang_output,
visual_output,
attention_probs[0],
)
if output_attentions
else (lang_output, visual_output)
)
class LxmertVisualFeatureEncoder(nn.Module):
def __init__(self, config):
super().__init__()
feat_dim = config.visual_feat_dim
pos_dim = config.visual_pos_dim
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
# Box position encoding
self.box_fc = nn.Linear(pos_dim, config.hidden_size)
self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, visual_feats, visual_pos):
x = self.visn_fc(visual_feats)
x = self.visn_layer_norm(x)
y = self.box_fc(visual_pos)
y = self.box_layer_norm(y)
output = (x + y) / 2
output = self.dropout(output)
return output
class LxmertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
# Obj-level image embedding layer
self.visn_fc = LxmertVisualFeatureEncoder(config)
self.config = config
# Number of layers
self.num_l_layers = config.l_layers
self.num_x_layers = config.x_layers
self.num_r_layers = config.r_layers
# Layers
# Using self.layer instead of self.l_layer to support loading BERT weights.
self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)])
self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)])
self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)])
def forward(
self,
lang_feats,
lang_attention_mask,
visual_feats,
visual_pos,
visual_attention_mask=None,
output_attentions=None,
):
vision_hidden_states = ()
language_hidden_states = ()
vision_attentions = () if output_attentions or self.config.output_attentions else None
language_attentions = () if output_attentions or self.config.output_attentions else None
cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
visual_feats = self.visn_fc(visual_feats, visual_pos)
# Run language layers
for layer_module in self.layer:
l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions)
lang_feats = l_outputs[0]
language_hidden_states = language_hidden_states + (lang_feats,)
if language_attentions is not None:
language_attentions = language_attentions + (l_outputs[1],)
# Run relational layers
for layer_module in self.r_layers:
v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions)
visual_feats = v_outputs[0]
vision_hidden_states = vision_hidden_states + (visual_feats,)
if vision_attentions is not None:
vision_attentions = vision_attentions + (v_outputs[1],)
# Run cross-modality layers
for layer_module in self.x_layers:
x_outputs = layer_module(
lang_feats,
lang_attention_mask,
visual_feats,
visual_attention_mask,
output_attentions=output_attentions,
)
lang_feats, visual_feats = x_outputs[:2]
vision_hidden_states = vision_hidden_states + (visual_feats,)
language_hidden_states = language_hidden_states + (lang_feats,)
if cross_encoder_attentions is not None:
cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
visual_encoder_outputs = (
vision_hidden_states,
vision_attentions if output_attentions else None,
)
lang_encoder_outputs = (
language_hidden_states,
language_attentions if output_attentions else None,
)
return (
visual_encoder_outputs,
lang_encoder_outputs,
cross_encoder_attentions if output_attentions else None,
)
class LxmertPooler(nn.Module):
def __init__(self, config):
super(LxmertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class LxmertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(LxmertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act]
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class LxmertLMPredictionHead(nn.Module):
def __init__(self, config, lxmert_model_embedding_weights):
super(LxmertLMPredictionHead, self).__init__()
self.transform = LxmertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(
lxmert_model_embedding_weights.size(1),
lxmert_model_embedding_weights.size(0),
bias=False,
)
self.decoder.weight = lxmert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class LxmertVisualAnswerHead(nn.Module):
def __init__(self, config, num_labels):
super().__init__()
hid_dim = config.hidden_size
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim, hid_dim * 2),
GeLU(),
nn.LayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, num_labels),
)
def forward(self, hidden_states):
return self.logit_fc(hidden_states)
class LxmertVisualObjHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = LxmertPredictionHeadTransform(config)
# Decide the use of visual losses
visual_losses = {}
if config.visual_obj_loss:
visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
if config.visual_attr_loss:
visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
if config.visual_obj_loss:
visual_losses["feat"] = {
"shape": (-1, config.visual_feat_dim),
"num": config.visual_feat_dim,
}
self.visual_losses = visual_losses
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_dict = nn.ModuleDict(
{key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses}
)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
output = {}
for key in self.visual_losses:
output[key] = self.decoder_dict[key](hidden_states)
return output
class LxmertPreTrainingHeads(nn.Module):
def __init__(self, config, lxmert_model_embedding_weights):
super(LxmertPreTrainingHeads, self).__init__()
self.predictions = LxmertLMPredictionHead(config, lxmert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class LxmertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LxmertConfig
load_tf_weights = load_tf_weights_in_lxmert
base_model_prefix = "lxmert"
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
LXMERT_START_DOCSTRING = r"""
The LXMERT model was proposed in `LXMERT: Learning Cross-Modality Encoder Representations from Transformers
<https://arxiv.org/abs/1908.07490>`__ by Hao Tan and Mohit Bansal. It's a vision and language transformer model,
pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome,
using a combination of masked language modeling, region of interest feature regression, cross entropy loss for
question answering attribute prediction, and object tag prediction.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.LxmertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
LXMERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.LxmertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
visual_feats: (:obj:`torch.FloatTensor` of shape :obj:՝(batch_size, num_visual_features, visual_feat_dim)՝):
This input represents visual features. They ROI pooled object features from bounding boxes using a
faster-RCNN model)
These are currently not provided by the transformers library.
visual_pos: (:obj:`torch.FloatTensor` of shape :obj:՝(batch_size, num_visual_features, visual_pos_dim)՝):
This input represents spacial features corresponding to their relative (via index) visual features. The
pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
1.
These are currently not provided by the transformers library.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
visual_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
LXMERT_START_DOCSTRING,
)
class LxmertModel(LxmertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = LxmertEmbeddings(config)
self.encoder = LxmertEncoder(config)
self.pooler = LxmertPooler(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
@add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=LxmertModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
visual_feats=None,
visual_pos=None,
attention_mask=None,
visual_attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if visual_feats is None:
raise ValueError("`visual_feats` cannot be `None`")
if visual_pos is None:
raise ValueError("`visual_pos` cannot be `None`")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Process the visual attention mask
if visual_attention_mask is not None:
extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2)
extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype)
extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * -10000.0
else:
extended_visual_attention_mask = None
# Positional Word Embeddings
embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds)
# Run Lxmert encoder
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
visual_feats=visual_feats,
visual_pos=visual_pos,
visual_attention_mask=extended_visual_attention_mask,
output_attentions=output_attentions,
)
visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
vision_hidden_states = visual_encoder_outputs[0]
language_hidden_states = lang_encoder_outputs[0]
all_attentions = ()
if output_attentions:
language_attentions = lang_encoder_outputs[1]
vision_attentions = visual_encoder_outputs[1]
cross_encoder_attentions = encoder_outputs[2]
all_attentions = (
language_attentions,
vision_attentions,
cross_encoder_attentions,
)
hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
visual_output = vision_hidden_states[-1]
lang_output = language_hidden_states[-1]
pooled_output = self.pooler(lang_output)
if not return_dict:
return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
return LxmertModelOutput(
pooled_output=pooled_output,
language_output=lang_output,
vision_output=visual_output,
language_hidden_states=language_hidden_states if output_hidden_states else None,
vision_hidden_states=vision_hidden_states if output_hidden_states else None,
language_attentions=language_attentions if output_attentions else None,
vision_attentions=vision_attentions if output_attentions else None,
cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
)
@add_start_docstrings(
"""Lxmert Model with a specified pretraining head on top. """,
LXMERT_START_DOCSTRING,
)
class LxmertForPreTraining(LxmertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# Configuration
self.config = config
self.num_qa_labels = config.num_qa_labels
self.visual_loss_normalizer = config.visual_loss_normalizer
# Use of pretraining tasks
self.task_mask_lm = config.task_mask_lm
self.task_obj_predict = config.task_obj_predict
self.task_matched = config.task_matched
self.task_qa = config.task_qa
# Lxmert backbone
self.lxmert = LxmertModel(config)
# Pre-training heads
self.cls = LxmertPreTrainingHeads(config, self.lxmert.embeddings.word_embeddings.weight)
if self.task_obj_predict:
self.obj_predict_head = LxmertVisualObjHead(config)
if self.task_qa:
self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
# Weight initialization
# Initialize weights and apply final processing
self.post_init()
# Loss functions
self.loss_fcts = {
"l2": SmoothL1Loss(reduction="none"),
"visual_ce": CrossEntropyLoss(reduction="none"),
"ce": CrossEntropyLoss(),
}
visual_losses = {}
if config.visual_obj_loss:
visual_losses["obj"] = {
"shape": (-1,),
"num": config.num_object_labels,
"loss": "visual_ce",
}
if config.visual_attr_loss:
visual_losses["attr"] = {
"shape": (-1,),
"num": config.num_attr_labels,
"loss": "visual_ce",
}
if config.visual_obj_loss:
visual_losses["feat"] = {
"shape": (-1, config.visual_feat_dim),
"num": config.visual_feat_dim,
"loss": "l2",
}
self.visual_losses = visual_losses
def resize_num_qa_labels(self, num_labels):
"""
Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
will add newly initialized weights. Reducing the size will remove weights from the end
Args:
num_labels (:obj:`int`, `optional`):
New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
weights at the end. Reducing the size will remove weights from the end. If not provided or :obj:`None`,
just returns a pointer to the qa labels :obj:`torch.nn.Linear`` module of the model without doing
anything.
Return:
:obj:`torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
"""
cur_qa_logit_layer = self.get_qa_logit_layer()
if num_labels is None or cur_qa_logit_layer is None:
return
new_qa_logit_layer = self._resize_qa_labels(num_labels)
self.config.num_qa_labels = num_labels
self.num_qa_labels = num_labels
return new_qa_logit_layer
def _resize_qa_labels(self, num_labels):
cur_qa_logit_layer = self.get_qa_logit_layer()
new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
self._set_qa_logit_layer(new_qa_logit_layer)
return self.get_qa_logit_layer()
def get_qa_logit_layer(self) -> nn.Module:
"""
Returns the the linear layer that produces question answering logits.
Returns:
:obj:`nn.Module`: A torch module mapping the question answering prediction hidden states or :obj:`None` if
LXMERT does not have a visual answering head.
"""
if hasattr(self, "answer_head"):
return self.answer_head.logit_fc[-1]
def _set_qa_logit_layer(self, qa_logit_layer):
self.answer_head.logit_fc[-1] = qa_logit_layer
def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
if num_labels is None:
return cur_qa_logit_layer
cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
if cur_qa_labels == num_labels:
return cur_qa_logit_layer
# Build new linear output
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
else:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
# initialize all new labels
self._init_weights(new_qa_logit_layer)
# Copy labels from the previous weights
num_labels_to_copy = min(cur_qa_labels, num_labels)
new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
return new_qa_logit_layer
@add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
visual_feats=None,
visual_pos=None,
attention_mask=None,
visual_attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
labels=None,
obj_labels=None,
matched_label=None,
ans=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
obj_labels: (``Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]``, `optional`):
each key is named after each one of the visual losses and each element of the tuple is of the shape
``(batch_size, num_features)`` and ``(batch_size, num_features, visual_feature_dim)`` for each the label id
and the label score respectively
matched_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the whether or not the text input matches the image (classification) loss. Input
should be a sequence pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates that the sentence does not match the image,
- 1 indicates that the sentence does match the image.
ans: (``Torch.Tensor`` of shape ``(batch_size)``, `optional`):
a one hot representation hof the correct answer `optional`
Returns:
"""
if "masked_lm_labels" in kwargs:
warnings.warn(
"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("masked_lm_labels")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
device = input_ids.device if input_ids is not None else inputs_embeds.device
lxmert_output = self.lxmert(
input_ids=input_ids,
visual_feats=visual_feats,
visual_pos=visual_pos,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
visual_attention_mask=visual_attention_mask,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
lang_output, visual_output, pooled_output = (
lxmert_output[0],
lxmert_output[1],
lxmert_output[2],
)
lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
if self.task_qa:
answer_score = self.answer_head(pooled_output)
else:
answer_score = pooled_output[0][0]
total_loss = (
None
if (labels is None and matched_label is None and obj_labels is None and ans is None)
else torch.tensor(0.0, device=device)
)
if labels is not None and self.task_mask_lm:
masked_lm_loss = self.loss_fcts["ce"](
lang_prediction_scores.view(-1, self.config.vocab_size),
labels.view(-1),
)
total_loss += masked_lm_loss
if matched_label is not None and self.task_matched:
matched_loss = self.loss_fcts["ce"](cross_relationship_score.view(-1, 2), matched_label.view(-1))
total_loss += matched_loss
if obj_labels is not None and self.task_obj_predict:
total_visual_loss = torch.tensor(0.0, device=input_ids.device)
visual_prediction_scores_dict = self.obj_predict_head(visual_output)
for key, key_info in self.visual_losses.items():
label, mask_conf = obj_labels[key]
output_dim = key_info["num"]
loss_fct_name = key_info["loss"]
label_shape = key_info["shape"]
weight = self.visual_loss_normalizer
visual_loss_fct = self.loss_fcts[loss_fct_name]
visual_prediction_scores = visual_prediction_scores_dict[key]
visual_loss = visual_loss_fct(
visual_prediction_scores.view(-1, output_dim),
label.view(*label_shape),
)
if visual_loss.dim() > 1: # Regression Losses
visual_loss = visual_loss.mean(1)
visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight
total_visual_loss += visual_loss
total_loss += total_visual_loss
if ans is not None and self.task_qa:
answer_loss = self.loss_fcts["ce"](answer_score.view(-1, self.num_qa_labels), ans.view(-1))
total_loss += answer_loss
if not return_dict:
output = (
lang_prediction_scores,
cross_relationship_score,
answer_score,
) + lxmert_output[3:]
return ((total_loss,) + output) if total_loss is not None else output
return LxmertForPreTrainingOutput(
loss=total_loss,
prediction_logits=lang_prediction_scores,
cross_relationship_score=cross_relationship_score,
question_answering_score=answer_score,
language_hidden_states=lxmert_output.language_hidden_states,
vision_hidden_states=lxmert_output.vision_hidden_states,
language_attentions=lxmert_output.language_attentions,
vision_attentions=lxmert_output.vision_attentions,
cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
)
@add_start_docstrings(
"""Lxmert Model with a visual-answering head on top for downstream QA tasks""",
LXMERT_START_DOCSTRING,
)
class LxmertForQuestionAnswering(LxmertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# Configuration
self.config = config
self.num_qa_labels = config.num_qa_labels
self.visual_loss_normalizer = config.visual_loss_normalizer
# Lxmert backbone
self.lxmert = LxmertModel(config)
self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
# Weight initialization
# Initialize weights and apply final processing
self.post_init()
# Loss function
self.loss = CrossEntropyLoss()
def resize_num_qa_labels(self, num_labels):
"""
Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
will add newly initialized weights. Reducing the size will remove weights from the end
Args:
num_labels (:obj:`int`, `optional`):
New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
weights at the end. Reducing the size will remove weights from the end. If not provided or :obj:`None`,
just returns a pointer to the qa labels :obj:`torch.nn.Linear`` module of the model without doing
anything.
Return:
:obj:`torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
"""
cur_qa_logit_layer = self.get_qa_logit_layer()
if num_labels is None or cur_qa_logit_layer is None:
return
new_qa_logit_layer = self._resize_qa_labels(num_labels)
self.config.num_qa_labels = num_labels
self.num_qa_labels = num_labels
return new_qa_logit_layer
def _resize_qa_labels(self, num_labels):
cur_qa_logit_layer = self.get_qa_logit_layer()
new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
self._set_qa_logit_layer(new_qa_logit_layer)
return self.get_qa_logit_layer()
def get_qa_logit_layer(self) -> nn.Module:
"""
Returns the the linear layer that produces question answering logits
Returns:
:obj:`nn.Module`: A torch module mapping the question answering prediction hidden states. :obj:`None`: A
NoneType object if Lxmert does not have the visual answering head.
"""
if hasattr(self, "answer_head"):
return self.answer_head.logit_fc[-1]
def _set_qa_logit_layer(self, qa_logit_layer):
self.answer_head.logit_fc[-1] = qa_logit_layer
def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
if num_labels is None:
return cur_qa_logit_layer
cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
if cur_qa_labels == num_labels:
return cur_qa_logit_layer
# Build new linear output
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
else:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
# initialize all new labels
self._init_weights(new_qa_logit_layer)
# Copy labels from the previous weights
num_labels_to_copy = min(cur_qa_labels, num_labels)
new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
return new_qa_logit_layer
@add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=LxmertForQuestionAnsweringOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
visual_feats=None,
visual_pos=None,
attention_mask=None,
visual_attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels: (``Torch.Tensor`` of shape ``(batch_size)``, `optional`):
A one-hot representation of the correct answer
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
lxmert_output = self.lxmert(
input_ids=input_ids,
visual_feats=visual_feats,
visual_pos=visual_pos,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
visual_attention_mask=visual_attention_mask,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
pooled_output = lxmert_output[2]
answer_score = self.answer_head(pooled_output)
loss = None
if labels is not None:
loss = self.loss(answer_score.view(-1, self.num_qa_labels), labels.view(-1))
if not return_dict:
output = (answer_score,) + lxmert_output[3:]
return (loss,) + output if loss is not None else output
return LxmertForQuestionAnsweringOutput(
loss=loss,
question_answering_score=answer_score,
language_hidden_states=lxmert_output.language_hidden_states,
vision_hidden_states=lxmert_output.vision_hidden_states,
language_attentions=lxmert_output.language_attentions,
vision_attentions=lxmert_output.vision_attentions,
cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
)
|
py | 1a543551217ae3434dda3ac2cb517fb400820217 | """Escreva um programa que leia dois números inteiros e compare-os. mostrando na tela uma mensagem:
– O primeiro valor é maior
– O segundo valor é maior
– Não existe valor maior, os dois são iguais"""
n1 = int(input('Digite o primeiro número inteiro: '))
n2 = int(input('Digite o segundo número inteiro: '))
CYANO = "\033[1;36m"
END = "\033[0m"
YELLOW = "\033[1;93m"
if n1 > n2:
print(f'O primeiro nº digitado, {YELLOW}{n1}{END}, é {CYANO}MAIOR{END} que o segundo nº, {YELLOW}{n2}{END}.')
elif n2 > n1:
print(f'O primeiro nº digitado, {YELLOW}{n1}{END}, é {CYANO}MENOR{END} que o segundo nº, {YELLOW}{n2}{END}.')
else:
print(f'Os dois números digitados são {CYANO}IGUAIS{END}.')
|
py | 1a5435cb18b7be9b591e4f203a228bb87c5080c3 | # MenuTitle: Make Kerning Display
# -*- coding: utf-8 -*-
__doc__ = """
Open tab containing Kerning strings for the selected glyphs.
"""
import re
from collections import defaultdict, OrderedDict
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from vanilla import (
Window,
TextBox,
RadioGroup,
Button,
CheckBox,
)
import _kerningStrings
Glyphs.clearLog()
# Glyphs.showMacroWindow()
quotations = [
('/parenleft', '/parenright'),
('/bracketleft', '/bracketright'),
('/braceleft', '/braceright'),
('/quoteleft', '/quoteright'),
('/quotedblleft', '/quotedblright'),
('/quotesinglbase', '/quoteleft'),
('/quotedblbase', '/quotedblleft'),
('/quotedblbase', '/quotedblright'),
('/quoteright', '/quoteright'),
('/guillemetleft', '/guillemetright'),
('/guilsinglleft', '/guilsinglright'),
('/guillemetright', '/guillemetleft'),
('/guilsinglright', '/guilsinglleft')
]
# punctuations = ['period', 'comma', 'colon', 'semicolon', 'hyphen']
punctuations = {}
punctuations['dflt'] = '. , : ; -'.split(' ')
punctuations['greek'] = ["/comma", "/period", "/anoteleia", "/questiongreek"]
punctuations['armenian'] = ["/comma-arm", '/period-arm', '/hyphen-arm', "/emphasis-arm", "/exclam-arm", "/question-arm", "/abbreviation-arm", ]
class makeDisplay(object):
def __init__(self):
self.verboten = {
'right': ['napostrophe', 'Omegadasiavaria'],
'left': ['ldot', 'Ldot', 'ldot.sc', 'sigmafinal'],
'both': ['*.tf', '*.tosf', '.notdef', 'NULL', 'CR']
}
self.category = None
self.messages = []
self.interpolated_fonts = dict()
self.use_real = True
self.use_selection = False
self.ignore_red = False
self.current_glyph = None
self.leftside_kerning_groups = None
self.rightside_kerning_groups = None
self.all_kern_categories = self.get_all_kern_categories()
self.categories_leftside = self.get_categorised_glyphs('left')
self.categories_rightside = self.get_categorised_glyphs('right')
item_height = 24.0
w_width = 300.0
w_height = item_height * (7 + len(self.all_kern_categories))
margin = 10
next_y = margin
col_1_width = w_width - (margin * 2)
item_height = 24
radio_height = item_height * len(self.all_kern_categories)
self.w = Window((w_width, w_height), "Make Kerning Strings")
self.w.text_1 = TextBox((margin, next_y, w_width, item_height), "Kern with:", sizeStyle='regular')
next_y += item_height
self.w.radioCategories = RadioGroup((margin, next_y, col_1_width, radio_height), self.all_kern_categories, sizeStyle='regular')
self.w.radioCategories.set(0)
next_y += radio_height + margin
self.w.use_real = CheckBox((margin, next_y, col_1_width, item_height), "Use real words", value=True, sizeStyle='regular')
next_y += item_height
self.w.use_selected = CheckBox((margin, next_y, col_1_width, item_height), "Use the selected glyphs verbatum", value=False, sizeStyle='regular')
next_y += item_height
self.w.ignore_red = CheckBox((margin, next_y, col_1_width, item_height), "Ignore red marked glyphs", value=False, sizeStyle='regular')
next_y += item_height + margin
self.w.gobutton = Button((margin + (col_1_width / 4), next_y, col_1_width / 2, item_height), 'Make Strings', callback=self.makeitso)
self.w.setDefaultButton(self.w.gobutton)
self.w.center()
self.w.open()
# self.makeitso(None)
def sbuttonCallback(self, sender):
self.s.close()
@staticmethod
def has_smallcaps():
for g in Glyphs.font.glyphs:
if g.subCategory == 'Smallcaps':
return True
return False
def get_all_kern_categories(self):
kcats = [
'Uppercase',
'Lowercase',
]
if self.has_smallcaps:
kcats.append('Smallcaps')
kcats += [
'Quotes',
'Number',
'Punctuation',
'Other',
]
return kcats
def get_canonincal_kerning_glyph(self, layer, pair_side):
g = layer.parent
if self.use_selection:
return g
if pair_side == 'left':
g = Glyphs.font.glyphs[layer.parent.rightKerningGroup] or layer.parent
if pair_side == 'right':
g = Glyphs.font.glyphs[layer.parent.leftKerningGroup] or layer.parent
if g is None:
g = layer.parent
return g
@staticmethod
def make_list_unique(this_list):
unique_list = []
for x in this_list:
if x in unique_list or x is None:
continue
unique_list.append(x)
return unique_list
def get_categorised_glyphs(self, side):
# cats = defaultdict(lambda: defaultdict(list))
cats = dict((k, defaultdict(list)) for k in self.all_kern_categories)
for g in [x for x in Glyphs.font.glyphs if self.is_elligable(x)]:
l = cats.get(g.category, cats.get(g.subCategory, cats['Other']))
l[g.script].append(self.get_canonincal_kerning_glyph(g.layers[0], side))
for cat in cats.keys():
for script in cats[cat].keys():
cats[cat][script] = self.make_list_unique(cats[cat][script])
return cats
def get_string(self, left_g, right_g):
string = None
if self.category == 'Quotes':
cat = left_g.subCategory if left_g.subCategory != 'Other' else left_g.category
pattern = _kerningStrings.patterns.get(left_g.script, _kerningStrings.patterns.get('latin')).get(cat + '-Quotes', '')
strings = [pattern.format(right=right_g.name, left=left_g.name, qL=quote_pair[0], qR=quote_pair[1]).replace(' /', '/') for quote_pair in _kerningStrings.quotations]
string = ' '.join(strings)
if not string and self.use_real:
base_name_left, _, suffix_left = left_g.name.partition('.')
base_name_right, _, suffix_right = right_g.name.partition('.')
potentials = [
base_name_left + base_name_right,
base_name_left + '/' + base_name_right,
'/' + base_name_left + ' ' + base_name_right,
'/' + base_name_left + '/' + base_name_right,
]
for s in potentials:
string = _kerningStrings.strings.get(s)
if string:
break
print(s)
if not string:
pattern = self.get_pattern(left_g, right_g)
string = pattern.format(right=right_g.name, left=left_g.name).replace(' /', '/')
if not string:
string = '/' + left_g.name + '/' + right_g.name
return string
def get_category_for_glyph(self, glyph):
if glyph.category in self.all_kern_categories:
return glyph.category
if glyph.subCategory in self.all_kern_categories:
return glyph.subCategory
if glyph.subCategory == 'Currancy':
return 'Number'
return 'Other'
def get_pattern(self, main_glyph, other_glyph):
scripts_patterns = _kerningStrings.patterns.get(main_glyph.script, {})
# print(self.get_category_for_glyph(main_glyph))
# print(self.get_category_for_glyph(main_glyph) + '-' + self.get_category_for_glyph(other_glyph), self.all_kern_categories)
pattern = scripts_patterns.get(self.get_category_for_glyph(main_glyph) + '-' + self.get_category_for_glyph(other_glyph), '')
if self.category == 'Number':
suffix = ''.join(main_glyph.name.partition('.')[1:])
else:
suffix = ''
try:
pattern = pattern.format(
suffix=suffix,
left='{left}',
right='{right}',
)
except KeyError:
pass
return pattern
def is_elligable(self, glyph, side='both'):
if self.ignore_red and glyph.color == 0:
return False
if not glyph.export:
return False
for vgn in self.verboten[side]:
if re.match(vgn.replace('.', '\\.').replace('*', '.*'), glyph.name):
return False
return True
def makeitso(self, sender):
try:
self.w.close()
except AttributeError:
pass
self.category = self.all_kern_categories[self.w.radioCategories.get()]
self.use_real = self.w.use_real.get()
self.use_selection = self.w.use_selected.get()
self.ignore_red = self.w.ignore_red.get()
all_strings = []
if self.category == 'Quotes':
left_of_string_glyphs = self.make_list_unique([self.get_canonincal_kerning_glyph(sl, 'right') for sl in Glyphs.font.selectedLayers if self.is_elligable(sl.parent, 'right')])
right_of_string_glyphs = self.make_list_unique([self.get_canonincal_kerning_glyph(sl, 'left') for sl in Glyphs.font.selectedLayers if self.is_elligable(sl.parent, 'left')])
pairs = zip_longest(left_of_string_glyphs, right_of_string_glyphs)
for p in pairs:
gl, gr = p
if gl is None:
gl = gr if gr in left_of_string_glyphs else left_of_string_glyphs[0]
if gr is None:
gr = gl if gl in left_of_string_glyphs else right_of_string_glyphs[0]
kerning_string = self.get_string(gl, gr)
if kerning_string not in all_strings:
all_strings.append(kerning_string)
else:
# Holds kerning key glyphs that have been seen already, to avoid duplicates
processed_main_glyphs_left = OrderedDict()
processed_main_glyphs_right = OrderedDict()
# print([(k, self.categories_rightside[k].keys()) for k in self.categories_rightside.keys()])
for sl in Glyphs.font.selectedLayers:
# Process the selected glyph on the left side
main_g_left = self.get_canonincal_kerning_glyph(sl, 'left')
pair_strings_left = []
if self.is_elligable(main_g_left, 'left'):
if main_g_left.name not in processed_main_glyphs_left.keys():
processed_main_glyphs_left[main_g_left.name] = [sl.parent.name]
try:
if sl.parent.script:
other_glyphs_rightside = self.categories_rightside[self.category].get(sl.parent.script, self.categories_rightside[self.category].get(None))
else:
other_glyphs_rightside = self.categories_rightside[self.category].get(None, self.categories_rightside[self.category].get('latin'))
except KeyError:
other_glyphs_rightside = []
# print(self.category, self.categories_rightside.keys())
print(sl.parent.script, self.category, self.categories_rightside[self.category].keys())
for g in other_glyphs_rightside:
if not self.is_elligable(g, 'right'):
continue
other_g = self.get_canonincal_kerning_glyph(g.layers[sl.associatedMasterId], 'right')
kerning_string_left = self.get_string(main_g_left, other_g)
if kerning_string_left not in pair_strings_left:
pair_strings_left.append(kerning_string_left)
else:
processed_main_glyphs_left[main_g_left.name].append(sl.parent.name)
if pair_strings_left:
pair_strings_left.insert(0, main_g_left.name)
# Process the selected glyph on the right side
main_g_right = self.get_canonincal_kerning_glyph(sl, 'right')
pair_strings_right = []
if self.is_elligable(main_g_right, 'right'):
if main_g_right.name not in processed_main_glyphs_right.keys():
processed_main_glyphs_right[main_g_right.name] = [sl.parent.name]
if self.category == 'Quotes':
other_glyphs_leftside = [main_g_right]
main_g_right = self.get_canonincal_kerning_glyph(sl, 'left')
else:
if sl.parent.script:
other_glyphs_leftside = self.categories_leftside[self.category].get(sl.parent.script, self.categories_leftside[self.category].get(None, []))
else:
other_glyphs_leftside = self.categories_leftside[self.category].get(None, self.categories_leftside[self.category].get('latin', []))
for g in other_glyphs_leftside:
if not self.is_elligable(g, 'left'):
continue
other_g = self.get_canonincal_kerning_glyph(g.layers[sl.associatedMasterId], 'left')
kerning_string_right = self.get_string(other_g, main_g_right)
if kerning_string_right not in pair_strings_right:
pair_strings_right.append(kerning_string_right)
else:
processed_main_glyphs_right[main_g_right.name].append(sl.parent.name)
if pair_strings_right:
pair_strings_right.insert(0, main_g_right.name)
left_string = ' '.join(self.make_list_unique(pair_strings_left))
right_string = ' '.join(self.make_list_unique(pair_strings_right))
if all([left_string, right_string]):
pair_strings = '\n'.join([left_string, right_string])
else:
pair_strings = left_string or right_string
# print(':', pair_strings, ':')
if pair_strings:
all_strings.append(pair_strings)
Glyphs.font.newTab('\n\n'.join(all_strings))
Glyphs.font.currentTab.previewInstances = 'live'
Glyphs.font.currentTab.scale = 0.065
Glyphs.font.currentTab.textCursor = 3
Glyphs.font.tool = 'TextTool'
# Glyphs.showMacroWindow()
makeDisplay()
print('Done.')
|
py | 1a5435f8953f28141d82b6df43775bd6a94e7f4a | import unittest
from copy import deepcopy
from unittest.mock import patch
from weaviate import ReferenceBatchRequest, ObjectsBatchRequest
from test.util import check_error_message
class TestBatchReferencesObject(unittest.TestCase):
def batch_size_test(self, batch: ReferenceBatchRequest, expected_size: int):
"""
Test each parameter to have the same expected size.
Parameters
----------
batch : ReferenceBatchRequest
The reference batch.
expected_size : int
Expected size.
"""
# test __len__
self.assertEqual(len(batch), expected_size)
# test _from_object_class_names
self.assertEqual(len(batch._from_object_class_names), expected_size)
# test _from_object_ids
self.assertEqual(len(batch._from_object_ids), expected_size)
# test _from_object_properties
self.assertEqual(len(batch._from_object_properties), expected_size)
# test _to_object_ids
self.assertEqual(len(batch._to_object_ids), expected_size)
def test_add_and___len__(self):
"""
Test the `add` method.
"""
batch = ReferenceBatchRequest()
# invalid calls
## error messages
type_error_message = 'All arguments must be of type string'
with self.assertRaises(TypeError) as error:
batch.add(10, "some_str", "some_str", "some_str")
check_error_message(self, error, type_error_message)
with self.assertRaises(TypeError) as error:
batch.add("some_str", batch, "some_str", "some_str")
check_error_message(self, error, type_error_message)
with self.assertRaises(TypeError) as error:
batch.add("some_str", "some_str", True, "some_str")
check_error_message(self, error, type_error_message)
with self.assertRaises(TypeError) as error:
batch.add("some_str", "some_str", "some_str", 1.0)
check_error_message(self, error, type_error_message)
# valid calls
## test with a correctly formated URL
batch = ReferenceBatchRequest()
# test __len__
self.batch_size_test(batch, 0)
batch.add("04a4b17d-6beb-443a-b1bc-835b0dd4e660",
"Alpha",
"a",
"fc7eb129-f138-457f-b727-1b29db191a67",
)
self.batch_size_test(batch, 1)
self.assertEqual(batch._from_object_ids[0], "04a4b17d-6beb-443a-b1bc-835b0dd4e660")
self.assertEqual(batch._to_object_ids[0], "fc7eb129-f138-457f-b727-1b29db191a67")
self.assertEqual(batch._from_object_class_names[0], "Alpha")
self.assertEqual(batch._from_object_properties[0], "a")
batch.add("04a4b17d-6beb-443a-b1bc-835b0dd4e661",
"Beta",
"b",
"fc7eb129-f138-457f-b727-1b29db191a68",
)
self.batch_size_test(batch, 2)
# previously added reference
self.assertEqual(batch._from_object_ids[0], "04a4b17d-6beb-443a-b1bc-835b0dd4e660")
self.assertEqual(batch._to_object_ids[0], "fc7eb129-f138-457f-b727-1b29db191a67")
self.assertEqual(batch._from_object_class_names[0], "Alpha")
self.assertEqual(batch._from_object_properties[0], "a")
# currently added reference
self.assertEqual(batch._from_object_ids[1], "04a4b17d-6beb-443a-b1bc-835b0dd4e661")
self.assertEqual(batch._to_object_ids[1], "fc7eb129-f138-457f-b727-1b29db191a68")
self.assertEqual(batch._from_object_class_names[1], "Beta")
self.assertEqual(batch._from_object_properties[1], "b")
def test_get_request_body(self):
"""
Test the `get_request_body` method.
"""
batch = ReferenceBatchRequest()
# no references
expected_return = []
body = batch.get_request_body()
self.assertEqual(body, expected_return)
# add a reference
batch.add("fd5af656-7d86-40da-9577-845c98e75543", "Griptape", "color",
"1c51b14d-1652-4225-8dfc-7f4079616f65")
body = batch.get_request_body()
expected_return.append({
"from": "weaviate://localhost/Griptape/fd5af656-7d86-40da-9577-845c98e75543/color",
"to": "weaviate://localhost/1c51b14d-1652-4225-8dfc-7f4079616f65"
})
self.assertEqual(body, expected_return)
# add another reference
batch.add("fd5af656-7d86-40da-9577-845c98e75511", "Griptape", "length",
"1c51b14d-1652-4225-8dfc-7f4079616f66")
body = batch.get_request_body()
expected_return.append({
"from": "weaviate://localhost/Griptape/fd5af656-7d86-40da-9577-845c98e75511/length",
"to": "weaviate://localhost/1c51b14d-1652-4225-8dfc-7f4079616f66"
})
self.assertEqual(body, expected_return)
class TestAddObjects(unittest.TestCase):
def test_add_and___len__(self):
"""
Test the `add` method.
"""
batch = ObjectsBatchRequest()
# invalid calls
## error messages
data_type_error_message = "Object must be of type dict"
class_type_error_message = "Class name must be of type str"
# wrong data_object
with self.assertRaises(TypeError) as error:
batch.add(None, "Class")
check_error_message(self, error, data_type_error_message)
with self.assertRaises(TypeError) as error:
batch.add(224345, "Class")
check_error_message(self, error, data_type_error_message)
# wrong class_name
with self.assertRaises(TypeError) as error:
batch.add({'name': 'Optimus Prime'}, None)
check_error_message(self, error, class_type_error_message)
with self.assertRaises(TypeError) as error:
batch.add({'name': 'Optimus Prime'}, ["Transformer"])
check_error_message(self, error, class_type_error_message)
# valid calls
self.assertEqual(len(batch), 0)
expected_return = []
# add an object without 'uuid' and 'vector'
obj = {
'class': "Philosopher",
'properties': {"name": "Socrates"}
}
expected_return.append({
'class': "Philosopher",
'properties': {"name": "Socrates"}
})
batch.add(obj['properties'], obj['class'])
self.assertEqual(len(batch), 1)
self.assertEqual(batch._objects, expected_return)
# change obj and check if batch does not reflect this change
obj['properties']['name'] = 'Test'
self.assertEqual(batch._objects, expected_return)
# add an object without 'vector'
obj = {
'class': "Chemist",
'properties': {"name": "Marie Curie"},
'id': "d087b7c6-a115-5c89-8cb2-f25bdeb9bf92"
}
expected_return.append({
'class': "Chemist",
'properties': {"name": "Marie Curie"},
'id': "d087b7c6-a115-5c89-8cb2-f25bdeb9bf92"
})
batch.add(obj['properties'], obj['class'], obj['id'])
self.assertEqual(len(batch), 2)
self.assertEqual(batch._objects, expected_return)
# change obj and check if batch does not reflect this change
obj['properties']['name'] = 'Test'
self.assertEqual(batch._objects, expected_return)
# add an object without 'uuid'
obj = {
'class': "Writer",
'properties': {"name": "Stephen King"},
'vector': [1, 2, 3]
}
expected_return.append({
'class': "Writer",
'properties': {"name": "Stephen King"},
'vector': [1, 2, 3]
})
batch.add(obj['properties'], obj['class'], vector=obj['vector'])
self.assertEqual(len(batch), 3)
self.assertEqual(batch._objects, expected_return)
# change obj and check if batch does not reflect this change
obj['properties']['name'] = 'Test'
self.assertEqual(batch._objects, expected_return)
# add an object with all arguments
obj = {
'class': "Inventor",
'properties': {"name": "Nikola Tesla"},
'id': "d087b7c6-a115-5c89-8cb2-f25bdeb9bf93",
'vector': [1, 2, 3]
}
expected_return.append({
'class': "Inventor",
'properties': {"name": "Nikola Tesla"},
'id': "d087b7c6-a115-5c89-8cb2-f25bdeb9bf93",
'vector': [1, 2, 3]
})
batch.add(obj['properties'], obj['class'], obj['id'], obj['vector'])
self.assertEqual(len(batch), 4)
self.assertEqual(batch._objects, expected_return)
# change obj and check if batch does not reflect this change
obj['properties']['name'] = 'Test'
self.assertEqual(batch._objects, expected_return)
def test_get_request_body(self):
"""
Test the `get_request_body` method.
"""
batch = ObjectsBatchRequest()
expected_return = []
self.assertEqual(batch.get_request_body(), {"fields": ["ALL"], "objects": expected_return})
# add an object without 'uuid' and 'vector'
obj = {
'class': "Philosopher",
'properties': {"name": "Socrates"}
}
expected_return.append({
'class': "Philosopher",
'properties': {"name": "Socrates"}
})
batch.add(obj['properties'], obj['class'])
self.assertEqual(batch.get_request_body(), {"fields": ["ALL"], "objects": expected_return})
# add an object without 'vector'
obj = {
'class': "Chemist",
'properties': {"name": "Marie Curie"},
'id': "d087b7c6-a115-5c89-8cb2-f25bdeb9bf92"
}
expected_return.append({
'class': "Chemist",
'properties': {"name": "Marie Curie"},
'id': "d087b7c6-a115-5c89-8cb2-f25bdeb9bf92"
})
batch.add(obj['properties'], obj['class'], obj['id'])
self.assertEqual(batch.get_request_body(), {"fields": ["ALL"], "objects": expected_return})
# add an object without 'uuid'
obj = {
'class': "Writer",
'properties': {"name": "Stephen King"},
'vector': [1, 2, 3]
}
expected_return.append({
'class': "Writer",
'properties': {"name": "Stephen King"},
'vector': [1, 2, 3]
})
batch.add(obj['properties'], obj['class'], vector=obj['vector'])
self.assertEqual(batch.get_request_body(), {"fields": ["ALL"], "objects": expected_return})
# add an object with all arguments
obj = {
'class': "Inventor",
'properties': {"name": "Nikola Tesla"},
'id': "d087b7c6-a115-5c89-8cb2-f25bdeb9bf93",
'vector': [1, 2, 3]
}
expected_return.append({
'class': "Inventor",
'properties': {"name": "Nikola Tesla"},
'id': "d087b7c6-a115-5c89-8cb2-f25bdeb9bf93",
'vector': [1, 2, 3]
})
batch.add(obj['properties'], obj['class'], obj['id'], obj['vector'])
self.assertEqual(batch.get_request_body(), {"fields": ["ALL"], "objects": expected_return})
|
py | 1a54362247c0796b3115ffe8f84f8780745fd716 | """Nautobot Jobs for the Device Lifecycle plugin."""
from .cve_tracking import GenerateVulnerabilities
from .lifecycle_reporting import DeviceSoftwareValidationFullReport, InventoryItemSoftwareValidationFullReport
jobs = [DeviceSoftwareValidationFullReport, InventoryItemSoftwareValidationFullReport, GenerateVulnerabilities]
|
py | 1a5436929443acba8e665aa18ec08c5a6e32ef29 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListMachineLearningComputeNodesResult',
'AwaitableListMachineLearningComputeNodesResult',
'list_machine_learning_compute_nodes',
]
@pulumi.output_type
class ListMachineLearningComputeNodesResult:
"""
Compute node information related to a AmlCompute.
"""
def __init__(__self__, compute_type=None, next_link=None, nodes=None):
if compute_type and not isinstance(compute_type, str):
raise TypeError("Expected argument 'compute_type' to be a str")
pulumi.set(__self__, "compute_type", compute_type)
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if nodes and not isinstance(nodes, list):
raise TypeError("Expected argument 'nodes' to be a list")
pulumi.set(__self__, "nodes", nodes)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> str:
"""
The type of compute
Expected value is 'AmlCompute'.
"""
return pulumi.get(self, "compute_type")
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> str:
"""
The continuation token.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def nodes(self) -> Sequence['outputs.AmlComputeNodeInformationResponseResult']:
"""
The collection of returned AmlCompute nodes details.
"""
return pulumi.get(self, "nodes")
class AwaitableListMachineLearningComputeNodesResult(ListMachineLearningComputeNodesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListMachineLearningComputeNodesResult(
compute_type=self.compute_type,
next_link=self.next_link,
nodes=self.nodes)
def list_machine_learning_compute_nodes(compute_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListMachineLearningComputeNodesResult:
"""
Compute node information related to a AmlCompute.
:param str compute_name: Name of the Azure Machine Learning compute.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['computeName'] = compute_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20200101:listMachineLearningComputeNodes', __args__, opts=opts, typ=ListMachineLearningComputeNodesResult).value
return AwaitableListMachineLearningComputeNodesResult(
compute_type=__ret__.compute_type,
next_link=__ret__.next_link,
nodes=__ret__.nodes)
|
py | 1a543826dbfcf03b516c3ca49ed55ea8f18ecaab | from output.models.ms_data.element.elem_t064_xsd.elem_t064 import (
A,
B,
Ca,
EA,
ECa,
RA,
RCa,
UnionA,
UnionAb,
Root,
Sa1,
Sa2,
Sa3,
Test1,
Test2,
Test3,
Test4,
Test5,
)
__all__ = [
"A",
"B",
"Ca",
"EA",
"ECa",
"RA",
"RCa",
"UnionA",
"UnionAb",
"Root",
"Sa1",
"Sa2",
"Sa3",
"Test1",
"Test2",
"Test3",
"Test4",
"Test5",
]
|
py | 1a54384d20074d30a170fd187ffd3801193b08da | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core import serializers
from django.core.management import call_command
from django.db import migrations, models
import os
fixture_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../fixtures'))
def load_fixture(fixture_filename):
fixture_file = os.path.join(fixture_dir, fixture_filename)
fixture = open(fixture_file, 'rb')
objects = serializers.deserialize('json', fixture, ignorenonexistent=True)
for obj in objects:
obj.save()
fixture.close()
class Migration(migrations.Migration):
def load_data(apps, schema_editor):
load_fixture("awardees.json")
load_fixture("countries.json")
load_fixture("material_types.json")
load_fixture("languages.json")
load_fixture("institutions.json")
load_fixture("ethnicities.json")
load_fixture("labor_presses.json")
load_fixture("countries.json")
dependencies = [
("core", "0002_auto_20160713_1509"),
]
operations = [
migrations.RunPython(load_data)
]
|
py | 1a54399fbfa1b9708a074197db54193b1d45b3bd | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from docutils import nodes
from functools import partial
from sphinx.util.docfields import _is_single_paragraph
from sphinx.util import docfields
from sphinx import directives, addnodes
from sphinx import addnodes
from sphinx.addnodes import desc, desc_signature
from .utils import transform_node as _transform_node
from .nodes import remarks
TYPE_SEP_PATTERN = '(\[|\]|, |\(|\))'
def _get_desc_data(node):
assert node.tagname == 'desc'
if node.attributes['domain'] != 'py':
print(
'Skipping Domain Object (%s)' % node.attributes['domain']
)
return None, None
try:
module = node[0].attributes['module']
full_name = node[0].attributes['fullname'].split('.')[-1]
except KeyError as e:
print("[docfx_yaml] There maybe some syntax error in docstring near: " + node.astext())
raise e
try:
uid = node[0].attributes['ids'][0]
except Exception:
uid = '{module}.{full_name}'.format(module=module, full_name=full_name)
print('Non-standard id: %s' % uid)
return full_name, uid
def _is_desc_of_enum_class(node):
assert node.tagname == 'desc_content'
if node[0] and node[0].tagname == 'paragraph' and node[0].astext() == 'Bases: enum.Enum':
return True
return False
def _hacked_transform(typemap, node):
"""
Taken from docfields.py from sphinx.
This does all the steps around gathering data,
but doesn't actually do the node transformations.
"""
entries = []
groupindices = {}
types = {}
# step 1: traverse all fields and collect field types and content
for field in node:
fieldname, fieldbody = field
try:
# split into field type and argument
fieldtype, fieldarg = fieldname.astext().split(None, 1)
except ValueError:
# maybe an argument-less field type?
fieldtype, fieldarg = fieldname.astext(), ''
typedesc, is_typefield = typemap.get(fieldtype, (None, None))
# sort out unknown fields
if typedesc is None or typedesc.has_arg != bool(fieldarg):
# either the field name is unknown, or the argument doesn't
# match the spec; capitalize field name and be done with it
new_fieldname = fieldtype[0:1].upper() + fieldtype[1:]
if fieldarg:
new_fieldname += ' ' + fieldarg
fieldname[0] = nodes.Text(new_fieldname)
entries.append(field)
continue
typename = typedesc.name
# collect the content, trying not to keep unnecessary paragraphs
if _is_single_paragraph(fieldbody):
content = fieldbody.children[0].children
else:
content = fieldbody.children
# if the field specifies a type, put it in the types collection
if is_typefield:
# filter out only inline nodes; others will result in invalid
# markup being written out
content = [n for n in content if isinstance(n, nodes.Inline) or
isinstance(n, nodes.Text)]
if content:
types.setdefault(typename, {})[fieldarg] = content
continue
# also support syntax like ``:param type name:``
if typedesc.is_typed:
try:
argtype, argname = fieldarg.split(None, 1)
except ValueError:
pass
else:
types.setdefault(typename, {})[argname] = \
[nodes.Text(argtype)]
fieldarg = argname
translatable_content = nodes.inline(fieldbody.rawsource,
translatable=True)
translatable_content.source = fieldbody.parent.source
translatable_content.line = fieldbody.parent.line
translatable_content += content
# grouped entries need to be collected in one entry, while others
# get one entry per field
if typedesc.is_grouped:
if typename in groupindices:
group = entries[groupindices[typename]]
else:
groupindices[typename] = len(entries)
group = [typedesc, []]
entries.append(group)
entry = typedesc.make_entry(fieldarg, [translatable_content])
group[1].append(entry)
else:
entry = typedesc.make_entry(fieldarg, [translatable_content])
entries.append([typedesc, entry])
return (entries, types)
def patch_docfields(app):
"""
Grab syntax data from the Sphinx info fields.
This is done by monkeypatching into the DocFieldTransformer,
which is what Sphinx uses to transform the docutils ``nodes.field``
into the sphinx ``docfields.Field`` objects.
See usage in Sphinx
`here <https://github.com/sphinx-doc/sphinx/blob/master/sphinx/directives/__init__.py#L180>`_.
This also performs the RST doctree to Markdown transformation on the content,
using the :class:`docfx_yaml.writers.MarkdownWriter`.
"""
transform_node = partial(_transform_node, app)
def get_data_structure(entries, types, field_object):
"""
Get a proper docfx YAML data structure from the entries & types
"""
data = {
'parameters': [],
'variables': [],
'exceptions': [],
'return': {},
'references': [],
}
def make_param(_id, _description, _type=None, _required=None):
ret = {
'id': _id,
'description': _description.strip(" \n\r\t")
}
if _type:
ret['type'] = _type
if _required is not None:
ret['isRequired'] = _required
return ret
def transform_para(para_field):
if isinstance(para_field, addnodes.pending_xref):
return transform_node(para_field)
else:
return para_field.astext()
def resolve_type(data_type):
# Remove @ ~ and \n for cross reference in parameter/return value type to apply to docfx correctly
data_type = re.sub('[@~\n]', '', data_type)
# Add references for docfx to resolve ref if type contains TYPE_SEP_PATTERN
_spec_list = []
_spec_fullnames = re.split(TYPE_SEP_PATTERN, data_type)
_added_reference = {}
if len(_spec_fullnames) > 1:
_added_reference_name = ''
for _spec_fullname in _spec_fullnames:
if _spec_fullname != '':
_spec = {}
_spec['name'] = _spec_fullname.split('.')[-1]
_spec['fullName'] = _spec_fullname
if re.match(TYPE_SEP_PATTERN, _spec_fullname) is None:
_spec['uid'] = _spec_fullname
_spec_list.append(_spec)
_added_reference_name += _spec['name']
_added_reference = {
'uid': data_type,
'name': _added_reference_name,
'fullName': data_type,
'spec.python': _spec_list
}
return data_type, _added_reference
def extract_exception_desc(field_object):
ret = []
if len(field_object) > 0:
for field in field_object:
if 'field_name' == field[0].tagname and field[0].astext() == 'Raises':
assert field[1].tagname == 'field_body'
field_body = field[1]
children = [n for n in field_body
if not isinstance(n, nodes.Invisible)]
for child in children:
if isinstance (child, nodes.paragraph):
pending_xref_index = child.first_child_matching_class(addnodes.pending_xref)
if pending_xref_index is not None:
pending_xref = child[pending_xref_index]
raise_type_index = pending_xref.first_child_matching_class(nodes.literal)
if raise_type_index is not None:
raise_type = pending_xref[raise_type_index]
ret.append({'type': pending_xref['reftarget'], 'desc': raise_type.astext()})
return ret
for entry in entries:
if isinstance(entry, nodes.field):
# pass-through old field
pass
else:
fieldtype, content = entry
fieldtypes = types.get(fieldtype.name, {})
if fieldtype.name == 'exceptions':
for _type, _description in content:
data['exceptions'].append({
'type': _type,
'description': transform_node(_description[0]).strip(" \n\r\t")
})
if fieldtype.name == 'returntype':
for returntype_node in content[1]:
returntype_ret = transform_node(returntype_node)
if returntype_ret:
# Support or in returntype
for returntype in re.split('[ \n]or[ \n]', returntype_ret):
returntype, _added_reference = resolve_type(returntype)
if _added_reference:
if len(data['references']) == 0:
data['references'].append(_added_reference)
elif any(r['uid'] != _added_reference['uid'] for r in data['references']):
data['references'].append(_added_reference)
data['return'].setdefault('type', []).append(returntype)
if fieldtype.name == 'returnvalue':
returnvalue_ret = transform_node(content[1][0])
if returnvalue_ret:
data['return']['description'] = returnvalue_ret.strip(" \n\r\t")
if fieldtype.name in ['parameter', 'variable', 'keyword']:
for field, node_list in content:
_id = field
_description = transform_node(node_list[0])
if field in fieldtypes:
_type = u''.join(transform_para(n) for n in fieldtypes[field])
else:
_type = None
_para_types = []
if fieldtype.name == 'parameter' or fieldtype.name == 'keyword':
if _type:
# Support or in parameter type
for _s_type in re.split('[ \n]or[ \n]', _type):
_s_type, _added_reference = resolve_type(_s_type)
if _added_reference:
if len(data['references']) == 0:
data['references'].append(_added_reference)
elif any(r['uid'] != _added_reference['uid'] for r in data['references']):
data['references'].append(_added_reference)
_para_types.append(_s_type)
_data = make_param(_id=_id, _type=_para_types, _description=_description, _required=False if fieldtype.name == 'keyword' else True)
data['parameters'].append(_data)
if fieldtype.name == 'variable':
if _type:
# Support or in variable type
for _s_type in re.split('[ \n]or[ \n]', _type):
_s_type, _added_reference = resolve_type(_s_type)
if _added_reference:
if len(data['references']) == 0:
data['references'].append(_added_reference)
elif any(r['uid'] != _added_reference['uid'] for r in data['references']):
data['references'].append(_added_reference)
_para_types.append(_s_type)
_data = make_param(_id=_id, _type=_para_types, _description=_description)
data['variables'].append(_data)
ret_list = extract_exception_desc(field_object)
for ret in ret_list:
# only use type in exceptions
data.setdefault('exceptions', []).append({
'type': ret['type']
})
return data
class PatchedDocFieldTransformer(docfields.DocFieldTransformer):
@staticmethod
def type_mapping(type_name):
mapping = {
"staticmethod": "method",
"classmethod": "method",
"exception": "class"
}
return mapping[type_name] if type_name in mapping else type_name
def __init__(self, directive):
self.directive = directive
super(PatchedDocFieldTransformer, self).__init__(directive)
def transform_all(self, node):
"""Transform all field list children of a node."""
# don't traverse, only handle field lists that are immediate children
summary = []
data = {}
name, uid = _get_desc_data(node.parent)
for child in node:
if isinstance(child, remarks):
remarks_string = transform_node(child)
data['remarks'] = remarks_string
elif isinstance(child, addnodes.desc):
if child.get('desctype') == 'attribute':
attribute_map = {} # Used for detecting duplicated attributes in intermediate data and merge them
for item in child:
if isinstance(item, desc_signature) and any(isinstance(n, addnodes.desc_annotation) for n in item):
# capture attributes data and cache it
data.setdefault('added_attribute', [])
item_ids = item.get('ids', [''])
if len(item_ids) == 0: # find a node with no 'ids' attribute
curuid = item.get('module', '') + '.' + item.get('fullname', '')
# generate its uid by module and fullname
else:
curuid = item_ids[0]
if len(curuid) > 0:
parent = curuid[:curuid.rfind('.')]
name = item.children[0].astext()
if curuid in attribute_map:
if len(item_ids) == 0: # ensure the order of docstring attributes and real attributes is fixed
attribute_map[curuid]['syntax']['content'] += (' ' + item.astext())
# concat the description of duplicated nodes
else:
attribute_map[curuid]['syntax']['content'] = item.astext() + ' ' + attribute_map[curuid]['syntax']['content']
else:
if _is_desc_of_enum_class(node):
addedData = {
'uid': curuid,
'id': name,
'parent': parent,
'langs': ['python'],
'name': name,
'fullName': curuid,
'type': item.parent.get('desctype'),
'module': item.get('module'),
'syntax': {
'content': item.astext(),
'return': {
'type': [parent]
}
}
}
else:
addedData = {
'uid': curuid,
'class': parent,
'langs': ['python'],
'name': name,
'fullName': curuid,
'type': 'attribute',
'module': item.get('module'),
'syntax': {
'content': item.astext()
}
}
attribute_map[curuid] = addedData
else:
raise Exception('ids of node: ' + repr(item) + ' is missing.')
# no ids and no duplicate or uid can not be generated.
if 'added_attribute' in data:
data['added_attribute'].extend(attribute_map.values()) # Add attributes data to a temp list
# Don't recurse into child nodes
continue
elif isinstance(child, nodes.field_list):
(entries, types) = _hacked_transform(self.typemap, child)
_data = get_data_structure(entries, types, child)
data.update(_data)
elif isinstance(child, addnodes.seealso):
data['seealso'] = transform_node(child)
elif isinstance(child, nodes.admonition) and 'Example' in child[0].astext():
# Remove the admonition node
child_copy = child.deepcopy()
child_copy.pop(0)
data['example'] = transform_node(child_copy)
else:
content = transform_node(child)
# skip 'Bases' in summary
if not content.startswith('Bases: '):
summary.append(content)
if "desctype" in node.parent and node.parent["desctype"] == 'class':
data.pop('exceptions', '') # Make sure class doesn't have 'exceptions' field.
if summary:
data['summary'] = '\n'.join(summary)
# Don't include empty data
for key, val in data.copy().items():
if not val:
del data[key]
data['type'] = PatchedDocFieldTransformer.type_mapping(node.parent["desctype"]) if "desctype" in node.parent else 'unknown'
self.directive.env.docfx_info_field_data[uid] = data
super(PatchedDocFieldTransformer, self).transform_all(node)
directives.DocFieldTransformer = PatchedDocFieldTransformer
|
py | 1a543ac0fd2aa29c23146e71d7f317f9cafd4958 | from django.urls import path
from . import views
urlpatterns= [ path('',views.learn),
path('dj/', views.learn_django),
path('py/', views.learn_python),
] |
py | 1a543b92029bd93e82dfc1d7744bb19bc84c26a9 | import os
GRID_FOLDER = "gpw-v4-national-identifier-grid-rev11_30_sec_asc/"
GRID_LOOKUP = "gpw_v4_national_identifier_grid_rev11_lookup.txt"
DATA_FOLDER = os.path.expanduser("~") + "/.sedac_gpw_parser/"
def id_lookup(searchterm, lookup_file=DATA_FOLDER+GRID_FOLDER+GRID_LOOKUP,
verbose=True):
success = False
names_ids = []
searchterm = searchterm.lower().replace(" ", "")
with open(lookup_file, "r") as infile:
infile.readline()
for line in infile:
line = line.split("\t")
country_id = line[0]
country_name = line[3]
if searchterm.lower() in country_name.lower().replace(" ", ""):
if verbose:
print(country_name, ":", country_id)
success = True
names_ids.append((country_name, int(country_id)))
if not success:
if verbose:
print("No country found for search term:", searchterm)
return names_ids
|
py | 1a543c365a0d319dec0119aedc73617dfaf2bbce | # __author__ = 'ktc312'
# -*- coding: utf-8 -*-
# coding: utf-8
import urllib2 as ul
from bs4 import BeautifulSoup
import csv
import os
import pandas as pd
import time
import data_cleaning
data_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tw_perm_data_analysis/')
# Construct the visadoor search url
base_url = str('http://visadoor.com/greencards/index?country=Taiwan&submit=Search')
def raw_data_rows_to_csv(list_data, file_name):
with open(data_path + "data/" + file_name, "wb") as f:
writer = csv.writer(f)
writer.writerows(list_data)
# get last_year
def get_last_year():
col_names = ['id', 'Decision_Date', 'Employer', 'City_State', 'Case_Status', 'Job_Title', 'Wage_Offer']
tw_perm_df = pd.read_csv(data_path + 'data/TW_PERM.csv', names=col_names, dtype=str, skiprows=1)
data_cleaning.convert_datetime(tw_perm_df, 'Decision_Date')
sorted_df = tw_perm_df.sort_values('Decision_Date', ascending=True)
return str(sorted_df.iloc[[-1]]['Decision_Date']).split('-')[0][-4:]
get_last_year()
# get cases found
def get_cases_found(last_year):
cases_found_in_page = 0
test_search_term = '&year=' + last_year
soup = BeautifulSoup(ul.urlopen(base_url + test_search_term, data=None, timeout=5), "html.parser")
cases_found_class = soup.findAll("div", {"class": "col-sm-5"})
for div in cases_found_class:
cases_found_in_page = int(str(div).split('<h4>')[1].split(' ')[3])
return cases_found_in_page
# get page count
def get_page_count(cases):
if cases <= 1000:
count = 1
else:
count = (cases/1000) + 1
return count
# get data
def scrape_data(last_year, page_count):
i = 0
encode_raw_data = []
while i < page_count:
search_term = '&year=' + last_year + '&page=' + str(i+1)
soup = BeautifulSoup(ul.urlopen(base_url + search_term, data=None, timeout=5), "html.parser")
# get data table
raw_data = []
table = soup.find('table', attrs={'class': 'table table-bordered table-striped table-hover'})
rows = table.findAll('tr')
for row in rows:
cols = row.findAll('td')
cols = [ele.text.strip() for ele in cols]
for col in cols:
raw_data.append(col)
for u_item in raw_data:
encode_raw_data.append(u_item.encode('UTF8'))
time.sleep(5)
i += 1
i = 0
encode_raw_data_rows = []
while i < len(encode_raw_data):
encode_raw_data_rows.append(encode_raw_data[i:i+7])
i += 7
raw_data_rows_to_csv(encode_raw_data_rows, 'temp_new_data.csv')
col_names = ['id', 'Decision_Date', 'Employer', 'City_State', 'Case_Status', 'Job_Title', 'Wage_Offer']
new_df = pd.read_csv(data_path + 'data/temp_new_data.csv', names=col_names, dtype=str, skiprows=1)
return new_df
# get the latest data
def download_data():
last_yr = get_last_year()
pages = get_page_count(get_cases_found(last_yr))
return scrape_data(last_yr, pages)
|
py | 1a543d408a82d1f2a5821cf86d77428b3e5735b3 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 13:46:42 2019
@author: Zaki
"""
from sympy.parsing import sympy_parser
from pint import UnitRegistry
import numpy
import sympy
ureg = UnitRegistry()
Q = ureg.Quantity
LENGTH = '[length]'
INDUCTANCE = '[length] ** 2 * [mass] / [current] ** 2 / [time] ** 2'
CAPACITANCE = '[current] ** 2 * [time] ** 4 / [length] ** 2 / [mass]'
RESISTANCE = '[length] ** 2 * [mass] / [current] ** 2 / [time] ** 3'
DIMENSIONLESS = 'dimensionless'
LENGTH_UNIT = 'meter'
INDUCTANCE_UNIT = 'nH'
CAPACITANCE_UNIT = 'fF'
RESISTANCE_UNIT = 'ohm'
DIMENSIONLESS_UNIT = ''
### List handling
# Useful function to manipulate to_move entities and ports
def find_last_list(list_entities):
# return the last list of a set of nested lists
if isinstance(list_entities, list):
if len(list_entities)==0:
return list_entities
else:
if isinstance(list_entities[-1], list):
return find_last_list(list_entities[-1])
else:
return list_entities
else:
raise TypeError('There are no list')
def find_penultimate_list(list_entities):
# return the last list of a set of nested lists
if isinstance(list_entities, list):
if len(list_entities)==0:
return False
else:
if isinstance(list_entities[-1], list):
if len(list_entities[-1])==0:
return list_entities
else:
if isinstance(list_entities[-1][-1], list):
return find_penultimate_list(list_entities[-1])
else:
return list_entities
else:
return False
else:
raise TypeError('There are no list')
def add_to_corresponding_list(elt, nested_list, added_elt):
# return the last list of a set of nested lists
if isinstance(nested_list, list):
if elt in nested_list:
index = nested_list.index(elt)
nested_list.insert(index+1, added_elt)
return True
else:
for elt_list in nested_list:
if isinstance(elt_list, list):
if add_to_corresponding_list(elt, elt_list, added_elt):
break
else:
return False
return True
else:
pass#raise TypeError('Argument is not a list')
def general_remove(elt, nested_list):
# same as list.remove(elt) but for a nested list
if isinstance(nested_list, list):
if elt in nested_list:
nested_list.remove(elt)
return True
else:
for elt_list in nested_list:
if isinstance(elt_list, list):
success = general_remove(elt, elt_list)
if success:
break
else:
raise TypeError('Argument is not a list')
def find_corresponding_list(elt, nested_list):
# return the last list of a set of nested lists
if isinstance(nested_list, list):
if elt in nested_list:
return nested_list
else:
for elt_list in nested_list:
if isinstance(elt_list, list):
found_list = find_corresponding_list(elt, elt_list)
if found_list:
break
else:
return False
return found_list
else:
return None
### Naming
def gen_name(name):
# routine to mimic the default naming procedure of HFSS when object
# already exists
end = ''
for ii in name[::-1]:
if ii.isdigit():
end+=ii
else:
break
if end=='':
return name+'1'
number = int(end[::-1])
if number==0:
return name+'1'
else:
prefix = name[:-len(str(number))]
suffix = str(number+1)
return prefix+suffix
def check_name(_class, name):
end = ''
for ii, char in enumerate(name[::-1]):
if char.isdigit():
end+=char
else:
break
else:
ii += 1
if end == '':
radical = name
number = 0
else:
radical = name[:-ii]
number = int(end[::-1])
new_name = name
while(new_name in _class.dict_instances.keys()):
number+=1
new_name = radical+str(number)
if new_name != name:
print("%s: changed '%s' name into '%s'"%(_class.__name__, name, new_name))
return new_name
### Litteral Expressions
def equal_float(float1, float2):
if abs(float1)>1e-10:
rel_diff = abs((float1-float2)/float1)
if rel_diff<1e-5:
return True
else:
return False
elif abs(float2)>1e-10:
rel_diff = abs((float1-float2)/float2)
if rel_diff<1e-5:
return True
else:
return False
else:
return True
def simplify_arith_expr(expr):
try:
out = repr(sympy_parser.parse_expr(str(expr)))
return out
except Exception:
print("Couldn't parse", expr)
raise
def extract_value_unit(expr, units):
"""
:type expr: str
:type units: str
:return: float
"""
try:
return Q(expr).to(units).magnitude
except Exception:
try:
return float(expr)
except Exception:
return expr
def extract_value_dim(expr):
"""
type expr: str
"""
return str(Q(expr).dimensionality)
def parse_entry(*entries, marker=True):
#should take a list of tuple of list... of int, float or str...
parsed = []
for entry in entries:
if not isinstance(entry, list) and not isinstance(entry, tuple):
parsed.append(extract_value_unit(entry, LENGTH_UNIT))
else:
if isinstance(entry, list):
if isinstance(entry, Vector):
parsed.append(Vector(parse_entry(*entry, marker=False)))
else:
parsed.append(parse_entry(*entry, marker=False))
elif isinstance(entry, tuple):
parsed.append(tuple(parse_entry(*entry, marker=False)))
else:
raise TypeError('Not foreseen type: %s'%(type(entry)))
if len(parsed)==1 and marker:
return parsed[0]
else:
return parsed
def rem_unit(other):
try:
value = extract_value_unit(other, LENGTH_UNIT)
return value
except Exception:
return other
def _val(elt):
if isinstance(elt, (int, float, numpy.int64, numpy.float64, numpy.int32, numpy.float32)):
return elt
else:
return float(elt.evalf(subs=variables))
def val(*entries, marker=True):
#should take a list of tuple of list... of int, float or str...
parsed = []
for entry in entries:
if not isinstance(entry, (list, tuple, Vector)):
parsed.append(_val(entry))
else:
if isinstance(entry, Vector):
parsed.append(Vector(val(*entry, marker=False)))
elif isinstance(entry, list):
parsed.append(val(*entry, marker=False))
elif isinstance(entry, tuple):
parsed.append(tuple(val(*entry, marker=False)))
else:
raise TypeError('Not foreseen type: %s'%(type(entry)))
if len(parsed)==1 and marker:
return parsed[0]
else:
return parsed
def way(vec):
if vec[1] != 0:
if abs(vec[0]/vec[1])<1e-2:
if vec[1]>0:
return Vector(0,1)
elif vec[1]<0:
return Vector(0,-1)
if vec[0] != 0 :
if abs(vec[1]/vec[0])<1e-2:
if vec[0]>0:
return Vector(1,0)
elif vec[0]<0:
return Vector(-1,0)
variables = {}
def store_variable(symbol, value): # put value in SI
if isinstance(value, str):
if LENGTH == extract_value_dim(value):
unit = LENGTH_UNIT
if INDUCTANCE == extract_value_dim(value):
unit = INDUCTANCE_UNIT
if CAPACITANCE == extract_value_dim(value):
unit = CAPACITANCE_UNIT
if RESISTANCE == extract_value_dim(value):
unit = RESISTANCE_UNIT
if DIMENSIONLESS == extract_value_dim(value):
unit = DIMENSIONLESS_UNIT
value = extract_value_unit(value, unit)
variables[symbol] = value
class Vector(numpy.ndarray):
"""
Vector is a custom 3D vector class, alowing for opperations optimized to
interface properly with HFSS.
The class can be instenciate as a 2D vector, how ever, it will effectively
creat a 3D vector with 0 for z axis.
"""
def __new__(cls, vec, vec_y=None, vec_z=None):
"""
Init of the 3D vector:
If vec_y, and vec_z are None, then vec must a len=2 or len=3 iterable.
If vec_y is not None, and vec_z is, then creat a vector [vec, vec_y, 0].
If vec_y and vec_z are not None, then creat a vector [vec, vec_y, vec_z].
"""
if vec_y is not None:
vec = [vec, vec_y, 0]
if(vec_z is not None):
vec[2] = vec_z
try:
if(not (len(vec)==2 or len(vec)==3)):
raise TypeError('vec can only be 2 or 3D, not %iD' % (len(vec)))
except:
raise TypeError('vec must be iterable')
if(len(vec) == 2):
vec = [vec[0], vec[1], 0]
obj = numpy.asarray(vec).view(cls)
return obj
@staticmethod
def check(elt):
"""
Utility function to check if an element is compatible with vectors
opperations. It only requiers to be iterable and of len=3.
Args:
elt: The element to be tested
Returns:
Boolean, true if elt is compatible with Vector opperations, False
otherwise.
"""
try:
return len(elt)==3
except:
return False
def __eq__(self, other):
val_self = val(self)
val_other = val(other)
bool_result = (equal_float(val_self[0], val_other[0]) and
equal_float(val_self[1], val_other[1]) and
equal_float(val_self[2], val_other[2]))
return bool_result
def index(self, elt):
val_self = val(self)
val_elt = val(elt)
for ii, item in enumerate(val_self):
if item == val_elt:
break
else:
return -1
return ii
# def __add__(self, other):
# if Vector.check(other):
# return Vector([self[0]+other[0], self[1]+other[1], self[2]+other[2]])
# else:
# try:
# return Vector([self[0]+other, self[1]+other, self[2]+other])
# except:
# raise TypeError('Could not perform add operation')
# def __radd__(self, other):
# return self + other
# def __sub__(self, other) :
# if Vector.check(other):
# return Vector([self[0]-other[0], self[1]-other[1], self[2]-other[2]])
# else:
# try:
# Vector([self[0]-other, self[1]-other, self[2]-other])
# except:
# raise TypeError('Could not perform sub operation')
# def __neg__(self):
# return Vector([-self[0], -self[1], -self[2]])
# def __rsub__(self, other):
# return -self + other
# def __mul__(self, other):
# if Vector.check(other):
# return Vector([self[0]*other[0], self[1]*other[1], self[2]*other[2]])
# else:
# try:
# return Vector([other*self[0], other*self[1], other*self[2]])
# except:
# raise TypeError('Could not perform mul operation')
# def __rmul__(self, other):
# return self * other
# def __truediv__(self, other):
# if Vector.check(other):
# return Vector([self[0]/other[0], self[1]/other[1], self[2]/other[2]])
# else:
# try:
# return Vector([self[0]/other, self[1]/other, self[2]/other])
# except:
# raise TypeError('Could not perform div operation')
# def __rtruediv__(self, other):
# self / other
# def dot(self, other):
# if Vector.check(other):
# return self[0]*other[0]+self[1]*other[1]+self[2]*other[2]
# else:
# raise TypeError('Could not perform dot operation')
def cross(self, other):
"""
This function returns the cross product beteween self and other.
Args:
other: of type Vector
Returns:
type Vector, self x other
"""
if(Vector.check(other) and Vector.check(other)):
return Vector(self[1]*other[2]-self[2]*other[1],
-(self[0]*other[2]-self[2]*other[0]),
self[0]*other[1]-self[1]*other[0])
else:
raise TypeError('Could not perform dot operation')
def scalar_cross(self, other, ref=None):
"""
This function is a bit cryptic. It computes the signed magnitude of
the cross product between self and other, assuming they both are in
the plan orthogonal to ref.
Args:
other: a Vector
ref: an other Vector, if None, assumed to be [0, 0, 1]
Returns:
dot((self x other), ref)
"""
if(ref is None):
ref = Vector(0, 0, 1)
if(Vector.check(other) and Vector.check(ref)):
return self.cross(other).dot(ref)
else:
raise TypeError('Could not perform dot operation')
def norm(self):
return (self[0]**2+self[1]**2+self[2]**2)**0.5
def abs(self):
return Vector([abs(self[0]), abs(self[1]), abs(self[2])])
def unit(self):
norm = self.norm()
return Vector([self[0]/norm, self[1]/norm, self[2]/norm])
def orth(self):
return Vector([-self[1], self[0]])
# def as_nda(self):
# return numpy.array([self[0], self[1], self[2]], dtype=object)
def rot(self, other, ref=None):
'''
This function is just completly cryptic, Ulysse wrote it a long time ago.
Here is what it is doing: we assume that self is expressed in x=(100), y=(010), z=(001)
This function returns the coordinates of self in x'=other,y'=-(other x ref), z'=ref
In other words, this function computes a 3D change of coordinates.
Note:
This function has been writen assuming other and ref are given orthogonal.
Hence, if not the case, it can have unexpected behaviors.
Args:
other: type Vector, the new x reference vector (x')
ref: type Vector, the new z reference vector (z'), if None, taken to be (0,0,1)
Returns:
self expressed in the new coordinate system.
'''
if(ref is None):
ref = Vector([0, 0, 1])
else:
ref = Vector(ref)
other = Vector(other)
if(Vector.check(other) and Vector.check(ref)):
other = Vector(other).unit()
ortho = -other.cross(ref)
return (Vector([self.dot(other.refx()), self.dot(other.orth().refy()), 0])*ref[2] +
Vector([self.dot(other.orth().refx()), 0, self.dot(other.refz())])*ref[1] +
Vector([0, self.dot(other.refy()), self.dot(other.orth().refz())])*ref[0])
else:
raise TypeError('other must be a Vector')
def px(self):
return Vector([self[0], 0, 0])
def py(self):
return Vector([0, self[1], 0])
def pz(self):
return Vector([0, 0, self[2]])
def refx(self, offset=0):
return Vector([self[0], -self[1]+2*offset, self[2]])
def refy(self, offset=0):
return Vector([-self[0]+2*offset, self[1], self[2]])
def refz(self, offset=0):
return Vector([self[0], self[1], -self[2]+2*offset])
# if(__name__ == "__main__"):
# x = Vector([1, 0, 0])
# y = Vector([0, -1, 0])
# print(x.rot(y))
def coor2angle(x, y=None):
if(y is None):
x, y = x
norm = (x**2+y**2)**0.5
if(x != 0 and abs(y/x) < 1):
angle = numpy.arcsin(y/norm)
if(x<0):
angle = numpy.pi - numpy.arcsin(y/norm)
else:
angle = numpy.arccos(x/norm)
if(y<0):
angle = - numpy.arccos(x/norm) + 2*numpy.pi
return angle%(2*numpy.pi)
# if(__name__=="__main__"):
# import matplotlib.pyplot as plt
# plt.figure()
# for theta in numpy.arange(0, 2*numpy.pi, 0.05):
# x, y = numpy.cos(theta), numpy.sin(theta)
# plt.plot(theta, coor2angle(x, y), 'o')
# plt.show() |
py | 1a543d61d2d60640fe39fd0c06163ed3fd09cfab | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
General utils_
"""
import contextlib
import glob
import logging
import math
import os
import platform
import random
import re
import shutil
import signal
import time
import urllib
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from zipfile import ZipFile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from utils_.downloads import gsutil_getsize
from utils_.metrics import box_iou, fitness
# Settings
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
def set_logging(name=None, verbose=True):
# Sets level and returns logger
for h in logging.root.handlers:
logging.root.removeHandler(h) # remove all handlers associated with the root logger object
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING)
return logging.getLogger(name)
LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)
class Profile(contextlib.ContextDecorator):
# Usage: @Profile() decorator or 'with Profile():' context manager
def __enter__(self):
self.start = time.time()
def __exit__(self, type, value, traceback):
print(f'Profile results: {time.time() - self.start:.5f}s')
class Timeout(contextlib.ContextDecorator):
# Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager
def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):
self.seconds = int(seconds)
self.timeout_message = timeout_msg
self.suppress = bool(suppress_timeout_errors)
def _timeout_handler(self, signum, frame):
raise TimeoutError(self.timeout_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM
signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
def __exit__(self, exc_type, exc_val, exc_tb):
signal.alarm(0) # Cancel SIGALRM if it's scheduled
if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
return True
class WorkingDirectory(contextlib.ContextDecorator):
# Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager
def __init__(self, new_dir):
self.dir = new_dir # new dir
self.cwd = Path.cwd().resolve() # current dir
def __enter__(self):
os.chdir(self.dir)
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.cwd)
def try_except(func):
# try-except function. Usage: @try_except decorator
def handler(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
print(e)
return handler
def methods(instance):
# Get class/instance methods
return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
def print_args(name, opt):
# Print argparser arguments
LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
def init_seeds(seed=0):
# Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
# cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible
import torch.backends.cudnn as cudnn
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
# Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.
env = os.getenv(env_var)
if env:
path = Path(env) # use environment variable
else:
cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs
path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir
path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable
path.mkdir(exist_ok=True) # make if required
return path
def is_writeable(dir, test=False):
# Return True if directory has write permissions, test opening a file with write permissions if test=True
if test: # method 1
file = Path(dir) / 'tmp.txt'
try:
with open(file, 'w'): # open file with write permissions
pass
file.unlink() # remove file
return True
except OSError:
return False
else: # method 2
return os.access(dir, os.R_OK) # possible issues on Windows
def is_docker():
# Is environment a Docker container?
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def is_colab():
# Is environment a Google Colab instance?
try:
import google.colab
return True
except ImportError:
return False
def is_pip():
# Is file in a pip package?
return 'site-packages' in Path(__file__).resolve().parts
def is_ascii(s=''):
# Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)
s = str(s) # convert list, tuple, None, etc. to str
return len(s.encode().decode('ascii', 'ignore')) == len(s)
def is_chinese(s='人工智能'):
# Is string composed of any Chinese characters?
return re.search('[\u4e00-\u9fff]', s)
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
def file_size(path):
# Return file/dir size (MB)
path = Path(path)
if path.is_file():
return path.stat().st_size / 1E6
elif path.is_dir():
return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6
else:
return 0.0
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility
return True
except OSError:
return False
@try_except
@WorkingDirectory(ROOT)
def check_git_status():
# Recommend 'git pull' if code is out of date
msg = ', for updates see https://github.com/ultralytics/yolov5'
print(colorstr('github: '), end='')
assert Path('.git').exists(), 'skipping check (not a git repository)' + msg
assert not is_docker(), 'skipping check (Docker image)' + msg
assert check_online(), 'skipping check (offline)' + msg
cmd = 'git fetch && git config --get remote.origin.url'
url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch
branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
if n > 0:
s = f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update."
else:
s = f'up to date with {url} ✅'
print(emojis(s)) # emoji-safe
def check_python(minimum='3.6.2'):
# Check current python version vs. required python version
check_version(platform.python_version(), minimum, name='Python ', hard=True)
def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):
# Check version vs. required version
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string
if hard:
assert result, s # assert min requirements met
if verbose and not result:
LOGGER.warning(s)
return result
@try_except
def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):
# Check installed dependencies meet requirements (pass *.txt file or list of packages)
prefix = colorstr('red', 'bold', 'requirements:')
check_python() # check python version
if isinstance(requirements, (str, Path)): # requirements.txt file
file = Path(requirements)
assert file.exists(), f"{prefix} {file.resolve()} not found, check failed."
with file.open() as f:
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]
else: # list or tuple of packages
requirements = [x for x in requirements if x not in exclude]
n = 0 # number of packages updates
for r in requirements:
try:
pkg.require(r)
except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
s = f"{prefix} {r} not found and is required by YOLOv5"
if install:
print(f"{s}, attempting auto-update...")
try:
assert check_online(), f"'pip install {r}' skipped (offline)"
print(check_output(f"pip install '{r}'", shell=True).decode())
n += 1
except Exception as e:
print(f'{prefix} {e}')
else:
print(f'{s}. Please install and rerun your command.')
if n: # if packages updated
source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
print(emojis(s))
def check_img_size(imgsz, s=32, floor=0):
# Verify image size is a multiple of stride s in each dimension
if isinstance(imgsz, int): # integer i.e. img_size=640
new_size = max(make_divisible(imgsz, int(s)), floor)
else: # list i.e. img_size=[640, 480]
new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
if new_size != imgsz:
print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'
assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'
cv2.imshow('test', np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
return False
def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
def check_yaml(file, suffix=('.yaml', '.yml')):
# Search/download YAML file (if necessary) and return path, checking suffix
return check_file(file, suffix)
def check_file(file, suffix=''):
# Search/download file (if necessary) and return path
check_suffix(file, suffix) # optional
file = str(file) # convert to str()
if Path(file).is_file() or file == '': # exists
return file
elif file.startswith(('http:/', 'https:/')): # download
url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth
if Path(file).is_file():
print(f'Found {url} locally at {file}') # file already exists
else:
print(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, file)
assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
return file
else: # search
files = []
for d in 'data', 'models', 'utils_': # search directories
files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
assert len(files), f'File not found: {file}' # assert file was found
assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
def check_dataset(data, autodownload=True):
# Download and/or unzip dataset if not found locally
# Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip
# Download (optional)
extract_dir = ''
if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip
download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1)
data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml'))
extract_dir, autodownload = data.parent, False
# Read yaml (optional)
if isinstance(data, (str, Path)):
with open(data, errors='ignore') as f:
data = yaml.safe_load(f) # dictionary
# Parse yaml
path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.'
for k in 'train', 'val', 'test':
if data.get(k): # prepend path
data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]]
assert 'nc' in data, "Dataset 'nc' key missing."
if 'names' not in data:
data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing
train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and autodownload: # download script
root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
print(f'Downloading {s} to {f}...')
torch.hub.download_url_to_file(s, f)
Path(root).mkdir(parents=True, exist_ok=True) # create root
ZipFile(f).extractall(path=root) # unzip
Path(f).unlink() # remove zip
r = None # success
elif s.startswith('bash '): # bash script
print(f'Running {s} ...')
r = os.system(s)
else: # python script
r = exec(s, {'yaml': data}) # return None
print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n")
else:
raise Exception('Dataset not found.')
return data # dictionary
def url2file(url):
# Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
return file
def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):
# Multi-threaded file download and unzip function, used in data.yaml for autodownload
def download_one(url, dir):
# Download 1 file
f = dir / Path(url).name # filename
if Path(url).is_file(): # exists in current path
Path(url).rename(f) # move to dir
elif not f.exists():
print(f'Downloading {url} to {f}...')
if curl:
os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail
else:
torch.hub.download_url_to_file(url, f, progress=True) # torch download
if unzip and f.suffix in ('.zip', '.gz'):
print(f'Unzipping {f}...')
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir) # unzip
elif f.suffix == '.gz':
os.system(f'tar xfz {f} --directory {f.parent}') # unzip
if delete:
f.unlink() # remove zip
dir = Path(dir)
dir.mkdir(parents=True, exist_ok=True) # make directory
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def make_divisible(x, divisor):
# Returns nearest x divisible by divisor
if isinstance(divisor, torch.Tensor):
divisor = int(divisor.max()) # to int
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
if clip:
clip_coords(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
if isinstance(boxes, torch.Tensor): # faster individually
boxes[:, 0].clamp_(0, shape[1]) # x1
boxes[:, 1].clamp_(0, shape[0]) # y1
boxes[:, 2].clamp_(0, shape[1]) # x2
boxes[:, 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=(), max_det=300):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def strip_optimizer(f='best.pt', s=''): # from utils_.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def print_mutation(results, hyp, save_dir, bucket):
evolve_csv, results_csv, evolve_yaml = save_dir / 'evolve.csv', save_dir / 'results.csv', save_dir / 'hyp_evolve.yaml'
keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps]
keys = tuple(x.strip() for x in keys)
vals = results + tuple(hyp.values())
n = len(keys)
# Download (optional)
if bucket:
url = f'gs://{bucket}/evolve.csv'
if gsutil_getsize(url) > (os.path.getsize(evolve_csv) if os.path.exists(evolve_csv) else 0):
os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local
# Log to evolve.csv
s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header
with open(evolve_csv, 'a') as f:
f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n')
# Print to screen
print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys))
print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals), end='\n\n\n')
# Save yaml
with open(evolve_yaml, 'w') as f:
data = pd.read_csv(evolve_csv)
data = data.rename(columns=lambda x: x.strip()) # strip keys
i = np.argmax(fitness(data.values[:, :7])) #
f.write('# YOLOv5 Hyperparameter Evolution Results\n' +
f'# Best generation: {i}\n' +
f'# Last generation: {len(data) - 1}\n' +
'# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' +
'# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n')
yaml.safe_dump(hyp, f, sort_keys=False)
if bucket:
os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload
def apply_classifier(x, model, img, im0):
# Apply a second stage classifier to YOLO outputs
# Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('example%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def increment_path(path, exist_ok=False, sep='', mkdir=False):
# Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
path = Path(f"{path}{sep}{n}{suffix}") # increment path
if mkdir:
path.mkdir(parents=True, exist_ok=True) # make directory
return path
# Variables
NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm
|
py | 1a543d9ed67e08c2d90f2716b25d00bc61bdfff2 | import os
import time
import pytest
import pdb
from VariantCalling import GATK
# test_GATK_UG.py
def test_run_ug(gatk_object, datadir, clean_tmp):
"""
Test function to run GATK UG on a BAM file using a log file
"""
#create timestamp for log file:
timestr = time.strftime("%Y%m%d_%H%M%S")
outfile = gatk_object.run_caller(program='UnifiedGenotyper',
prefix="{0}/outdir/test".format(datadir),
log_file="{0}/outdir/gatk_ug_{1}.log".format(datadir, timestr))
assert os.path.isfile(outfile) is True
def test_run_ug_nocompress(gatk_object, datadir, clean_tmp):
"""
Test function to run GATK UG on a BAM file generating an uncompressed VCF
"""
outfile = gatk_object.run_caller(program='UnifiedGenotyper',
prefix="{0}/outdir/test".format(datadir),
compress=False)
assert os.path.isfile(outfile) is True
def test_run_ug_with_ivals(gatk_object, datadir, clean_tmp):
"""
Test function to run GATK UG on a BAM file and set the interval to
analyze on the command line
"""
outfile = gatk_object.run_caller(program='UnifiedGenotyper',
prefix="{0}/outdir/test1".format(datadir),
L='chr1:10000-20000')
assert os.path.isfile(outfile) is True
def test_run_ug_with_params(gatk_object, datadir, clean_tmp):
"""
Test function to run GATK UG on a BAM file using some optional params
"""
outfile = gatk_object.run_caller(program='UnifiedGenotyper',
prefix="{0}/outdir/test2".format(datadir), glm='INDEL',
out_mode='EMIT_ALL_SITES')
assert os.path.isfile(outfile) is True
def test_run_ug_with_verbose(gatk_object, datadir, clean_tmp):
"""
Test function to run GATK UG on a BAM file using verbose=True
"""
outfile = gatk_object.run_caller(program='UnifiedGenotyper',
prefix="{0}/outdir/test2".format(datadir),
verbose=True)
assert os.path.isfile(outfile) is True
def test_run_ug_multithreaded(gatk_object, datadir, clean_tmp):
"""
Test function to run GATK UG on a BAM file using more than one thread
"""
outfile = gatk_object.run_caller(program='UnifiedGenotyper',
prefix="{0}/outdir/test2".format(datadir),
nt=2)
|
py | 1a543e61afe1e5fb028ae03754da9f1b4be430ae | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Tom van Steijn, Royal HaskoningDHV
import adopy
import numpy as np
import pytest
import shutil
import os
@pytest.fixture
def steadyflofile(tmpdir):
datadir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
flofilename = r'flairs.FLO'
flofile = os.path.join(datadir, flofilename)
testfile = tmpdir.join(flofilename)
shutil.copyfile(flofile, testfile)
return testfile
@pytest.fixture
def transientflofile(tmpdir):
datadir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
flofilename = r'flairs1_2007.flo'
flofile = os.path.join(datadir, flofilename)
testfile = tmpdir.join(flofilename)
shutil.copyfile(flofile, testfile)
return testfile
class TestSteadyFloFile(object):
def test_read(self, steadyflofile):
with adopy.open_flo(steadyflofile, transient=False) as src:
flo = src.as_dict()
assert flo['PHI1'].values.shape == (136365,)
class TestTransientFloFile(object):
def test_read(self, transientflofile):
with adopy.open_flo(transientflofile, transient=True) as src:
flo = src.read() |
py | 1a543f0696018ac7f1587b513ddb8ce6d85d3866 | """Test asyncpraw.models.user."""
import pytest
from asynctest import mock
from asyncpraw.exceptions import RedditAPIException
from asyncpraw.models import Multireddit, Redditor, Subreddit
from .. import IntegrationTest
class TestUser(IntegrationTest):
async def test_blocked(self):
self.reddit.read_only = False
with self.use_cassette():
blocked = await self.reddit.user.blocked()
assert len(blocked) > 0
assert all(isinstance(user, Redditor) for user in blocked)
async def test_blocked_fullname(self):
self.reddit.read_only = False
with self.use_cassette():
blocked = next(iter(await self.reddit.user.blocked()))
assert blocked.fullname.startswith("t2_")
assert not blocked.fullname.startswith("t2_t2")
async def test_contributor_subreddits(self):
self.reddit.read_only = False
with self.use_cassette():
count = 0
async for subreddit in self.reddit.user.contributor_subreddits():
assert isinstance(subreddit, Subreddit)
count += 1
assert count > 0
async def test_friends(self):
self.reddit.read_only = False
with self.use_cassette():
friends = await self.reddit.user.friends()
assert len(friends) > 0
assert all(isinstance(friend, Redditor) for friend in friends)
@mock.patch("asyncio.sleep", return_value=None)
async def test_friend_exist(self, _):
self.reddit.read_only = False
with self.use_cassette():
friend = await self.reddit.user.friends(user=await self.reddit.user.me())
assert isinstance(friend, Redditor)
@mock.patch("asyncio.sleep", return_value=None)
async def test_friend_not_exist(self, _):
self.reddit.read_only = False
with self.use_cassette():
with pytest.raises(RedditAPIException):
await self.reddit.user.friends(user="fake__user_user_user")
async def test_karma(self):
self.reddit.read_only = False
with self.use_cassette():
karma = await self.reddit.user.karma()
assert isinstance(karma, dict)
for subreddit in karma:
assert isinstance(subreddit, Subreddit)
keys = sorted(karma[subreddit].keys())
assert ["comment_karma", "link_karma"] == keys
async def test_me(self):
self.reddit.read_only = False
with self.use_cassette():
me = await self.reddit.user.me()
assert isinstance(me, Redditor)
me.praw_is_cached = True
me = await self.reddit.user.me()
assert me.praw_is_cached
@mock.patch("asyncio.sleep", return_value=None)
async def test_me__bypass_cache(self, _):
self.reddit.read_only = False
with self.use_cassette():
me = await self.reddit.user.me()
me.praw_is_cached = True
me = await self.reddit.user.me(use_cache=False)
assert not hasattr(me, "praw_is_cached")
async def test_multireddits(self):
self.reddit.read_only = False
with self.use_cassette():
multireddits = await self.reddit.user.multireddits()
assert isinstance(multireddits, list)
assert multireddits
assert all(isinstance(x, Multireddit) for x in multireddits)
async def test_subreddits(self):
self.reddit.read_only = False
with self.use_cassette():
count = 0
async for subreddit in self.reddit.user.subreddits():
assert isinstance(subreddit, Subreddit)
count += 1
assert count > 0
|
py | 1a54403575c2c4c156b4fd6bbdf726cf7205244d | #!/usr/bin/python3
# Generates a table of Unicode NFD-NFC normalizations
# public domain - <[email protected]>
import unicodedata
print("/* automatically generated */\n")
nfd_tbl = []
for c in range(128, 0x530):
# create an NFD-normalized string for this character
s = unicodedata.normalize("NFD", chr(c))
if len(s) != 1:
# NFC string
nfc_s = "\\x%x" % c
# convert the NFD string to hex
nfd_s = "".join([ "\\x%x" % ord(pc) for pc in s ])
nfd_tbl.append([nfc_s, nfd_s, len(s), unicodedata.name(chr(c)), s])
print("#define NFD_TBL_SIZE %d\n" % len(nfd_tbl))
print("struct unicode_nfd_tbl_e {")
print(" wchar_t nfc;")
print(" wchar_t *nfd;")
print(" int size;")
print("} unicode_nfd_tbl[NFD_TBL_SIZE] = {")
for e in nfd_tbl:
print(" { L'%s', L\"%s\", %d }, /* %s */" % (e[0], e[1], e[2], e[3]))
print("};")
nfc_tbl = sorted(nfd_tbl, key=lambda nfd: nfd[4])
print("struct unicode_nfc_tbl_e {")
print(" wchar_t *nfd;")
print(" wchar_t nfc;")
print(" int size;")
print("} unicode_nfc_tbl[NFD_TBL_SIZE] = {")
for e in nfc_tbl:
print(" { L\"%s\", L'%s', %d }, /* %s */" % (e[1], e[0], e[2], e[3]))
print("};")
|
py | 1a54403803d76fb03f2caf5198d84c48b51b924c | # Generated by Django 2.2.1 on 2019-05-05 03:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0004_sublessonuserdata'),
]
operations = [
migrations.AddField(
model_name='sublessonuserdata',
name='tries',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
|
py | 1a5440401bf523a9d84c6f9aca2d95c5f515f8fe | #!/usr/bin/env python3
# Connect the ipad (ground station) to your computer and find the dji
# go flight log. Upload that to https://www.phantomhelp.com/LogViewer,
# download as csv and copy that next to the flight movie and srt file.
# extract srt form of subtitles from dji movie (caption setting needs
# to be turned on when movie is recorded)
#
# ffmpeg -txt_format text -i input_file.MOV output_file.srt
import argparse
import cv2
import datetime
import skvideo.io # pip3 install scikit-video
import math
import fractions
import json
from matplotlib import pyplot as plt
import numpy as np
import os
import pyexiv2
import re
import sys
from scipy import interpolate # strait up linear interpolation, nothing fancy
from rcUAS import wgs84
from props import PropertyNode
import props_json
import djilog
parser = argparse.ArgumentParser(description='extract and geotag dji movie frames.')
parser.add_argument('--video', required=True, help='input video')
parser.add_argument('--camera', help='select camera calibration file')
parser.add_argument('--cam-mount', choices=['forward', 'down', 'rear'],
default='down',
help='approximate camera mounting orientation')
parser.add_argument('--interval', type=float, default=1.0, help='extraction interval')
parser.add_argument('--distance', type=float, help='max extraction distance interval')
parser.add_argument('--start-time', type=float, help='begin frame grabbing at this time.')
parser.add_argument('--end-time', type=float, help='end frame grabbing at this time.')
parser.add_argument('--start-counter', type=int, default=1, help='first image counter')
parser.add_argument('--ground', type=float, help='ground altitude in meters')
parser.add_argument('--djicsv', help='name of dji exported csv log file from the flight, see https://www.phantomhelp.com/logviewer/upload/')
args = parser.parse_args()
r2d = 180.0 / math.pi
match_ratio = 0.75
scale = 0.4
filter_method = 'homography'
tol = 3.0
overlap = 0.20
djicsv = djilog.djicsv()
djicsv.load(args.djicsv)
class Fraction(fractions.Fraction):
"""Only create Fractions from floats.
>>> Fraction(0.3)
Fraction(3, 10)
>>> Fraction(1.1)
Fraction(11, 10)
"""
def __new__(cls, value, ignore=None):
"""Should be compatible with Python 2.6, though untested."""
return fractions.Fraction.from_float(value).limit_denominator(99999)
def dms_to_decimal(degrees, minutes, seconds, sign=' '):
"""Convert degrees, minutes, seconds into decimal degrees.
>>> dms_to_decimal(10, 10, 10)
10.169444444444444
>>> dms_to_decimal(8, 9, 10, 'S')
-8.152777777777779
"""
return (-1 if sign[0] in 'SWsw' else 1) * (
float(degrees) +
float(minutes) / 60 +
float(seconds) / 3600
)
def decimal_to_dms(decimal):
"""Convert decimal degrees into degrees, minutes, seconds.
>>> decimal_to_dms(50.445891)
[Fraction(50, 1), Fraction(26, 1), Fraction(113019, 2500)]
>>> decimal_to_dms(-125.976893)
[Fraction(125, 1), Fraction(58, 1), Fraction(92037, 2500)]
"""
remainder, degrees = math.modf(abs(decimal))
remainder, minutes = math.modf(remainder * 60)
return [Fraction(n) for n in (degrees, minutes, remainder * 60)]
# find affine transform between matching keypoints in pixel
# coordinate space. fullAffine=True means unconstrained to
# include best warp/shear. fullAffine=False means limit the
# matrix to only best rotation, translation, and scale.
def findAffine(src, dst, fullAffine=False):
affine_minpts = 7
#print("src:", src)
#print("dst:", dst)
if len(src) >= affine_minpts:
# affine = cv2.estimateRigidTransform(np.array([src]), np.array([dst]), fullAffine)
affine, status = \
cv2.estimateAffinePartial2D(np.array([src]).astype(np.float32),
np.array([dst]).astype(np.float32))
else:
affine = None
#print str(affine)
return affine
def decomposeAffine(affine):
if affine is None:
return (0.0, 0.0, 0.0, 1.0, 1.0)
tx = affine[0][2]
ty = affine[1][2]
a = affine[0][0]
b = affine[0][1]
c = affine[1][0]
d = affine[1][1]
sx = math.sqrt( a*a + b*b )
if a < 0.0:
sx = -sx
sy = math.sqrt( c*c + d*d )
if d < 0.0:
sy = -sy
rotate_deg = math.atan2(-b,a) * 180.0/math.pi
if rotate_deg < -180.0:
rotate_deg += 360.0
if rotate_deg > 180.0:
rotate_deg -= 360.0
return (rotate_deg, tx, ty, sx, sy)
def filterMatches(kp1, kp2, matches):
mkp1, mkp2 = [], []
idx_pairs = []
used = np.zeros(len(kp2), np.bool_)
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * match_ratio:
#print " dist[0] = %d dist[1] = %d" % (m[0].distance, m[1].distance)
m = m[0]
# FIXME: ignore the bottom section of movie for feature detection
#if kp1[m.queryIdx].pt[1] > h*0.75:
# continue
if not used[m.trainIdx]:
used[m.trainIdx] = True
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
idx_pairs.append( (m.queryIdx, m.trainIdx) )
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, kp_pairs, idx_pairs, mkp1
def filterFeatures(p1, p2, K, method):
inliers = 0
total = len(p1)
space = ""
status = []
M = None
if len(p1) < 7:
# not enough points
return None, np.zeros(total), [], []
if method == 'homography':
M, status = cv2.findHomography(p1, p2, cv2.LMEDS, tol)
elif method == 'fundamental':
M, status = cv2.findFundamentalMat(p1, p2, cv2.LMEDS, tol)
elif method == 'essential':
M, status = cv2.findEssentialMat(p1, p2, K, cv2.LMEDS, threshold=tol)
elif method == 'none':
M = None
status = np.ones(total)
newp1 = []
newp2 = []
for i, flag in enumerate(status):
if flag:
newp1.append(p1[i])
newp2.append(p2[i])
p1 = np.float32(newp1)
p2 = np.float32(newp2)
inliers = np.sum(status)
total = len(status)
#print '%s%d / %d inliers/matched' % (space, np.sum(status), len(status))
return M, status, np.float32(newp1), np.float32(newp2)
# pathname work
abspath = os.path.abspath(args.video)
basename, ext = os.path.splitext(abspath)
srtname = basename + ".srt"
dirname = basename + "_frames"
print("basename:", basename)
print("srtname:", srtname)
print("dirname:", dirname)
local_config = os.path.join(dirname, "camera.json")
config = PropertyNode()
if args.camera:
# seed the camera calibration and distortion coefficients from a
# known camera config
print('Setting camera config from:', args.camera)
props_json.load(args.camera, config)
config.setString('name', args.camera)
props_json.save(local_config, config)
elif os.path.exists(local_config):
# load local config file if it exists
props_json.load(local_config, config)
K_list = []
for i in range(9):
K_list.append( config.getFloatEnum('K', i) )
K = np.copy(np.array(K_list)).reshape(3,3)
dist = []
for i in range(5):
dist.append( config.getFloatEnum("dist_coeffs", i) )
# check for required input files
if not os.path.isfile(args.video):
print("%s doesn't exist, aborting ..." % args.video)
quit()
if os.path.isfile(basename + ".srt"):
srtname = basename + ".srt"
elif os.path.isfile(basename + ".SRT"):
srtname = basename + ".SRT"
else:
print("SRT (caption) file doesn't exist, aborting ...")
quit()
# output directory
os.makedirs(dirname, exist_ok=True)
# setup feature detection
detector = cv2.SIFT_create(nfeatures=1000)
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
flann_params = { 'algorithm': FLANN_INDEX_KDTREE,
'trees': 5 }
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
srt = djilog.djisrt()
srt.load(srtname)
# fetch video metadata
metadata = skvideo.io.ffprobe(args.video)
#print(metadata.keys())
#print(json.dumps(metadata["video"], indent=4))
fps_string = metadata['video']['@avg_frame_rate']
(num, den) = fps_string.split('/')
fps = float(num) / float(den)
codec = metadata['video']['@codec_long_name']
w = int(metadata['video']['@width'])
h = int(metadata['video']['@height'])
print('fps:', fps)
print('codec:', codec)
print('output size:', w, 'x', h)
# extract frames
print("Opening ", args.video)
reader = skvideo.io.FFmpegReader(args.video, inputdict={}, outputdict={})
meta = os.path.join(dirname, "image-metadata.txt")
f = open(meta, 'w')
print("writing meta data to", meta)
last_time = -1000000
counter = 0
img_counter = args.start_counter
last_lat = 0
last_lon = 0
kp_list_ref = []
des_list_ref = []
for frame in reader.nextFrame():
frame = frame[:,:,::-1] # convert from RGB to BGR (to make opencv happy)
time = float(counter) / fps
counter += 1
print("frame:", counter, "time:", "%.3f" % time)
if args.start_time and time < args.start_time:
continue
if args.end_time and time > args.end_time:
break
if srt.need_interpolate:
lat_deg = srt.interp_lats(time)
lon_deg = srt.interp_lons(time)
alt_m = srt.interp_heights(time) + args.ground
else:
if counter - 1 >= len(srt.times):
print("MORE FRAMES THAN SRT ENTRIS")
continue
time_str = srt.times[counter - 1]
lat_deg = srt.lats[counter - 1]
lon_deg = srt.lons[counter - 1]
alt_m = srt.heights[counter - 1]
# compute unix version of timestamp (here in local tz)
main_str, t1, t2 = time_str.split(",")
fraction = (float(t1)*1000 + float(t2)) / 1000000
print("dt:", time_str)
date_time_obj = datetime.datetime.strptime(main_str, '%Y-%m-%d %H:%M:%S')
unix_sec = float(date_time_obj.strftime('%s')) + fraction
print("from local:", unix_sec)
record = djicsv.query(unix_sec)
roll = record['roll']
pitch = record['pitch']
yaw = record['yaw']
if yaw < 0: yaw += 360.0
if abs(lat_deg) < 0.001 and abs(lon_deg) < 0.001:
continue
write_frame = False
# by distance camera has moved
(c1, c2, dist_m) = wgs84.geo_inverse(lat_deg, lon_deg, last_lat, last_lon)
print("dist:", dist_m)
#if time >= last_time + args.interval and dist_m >= args.distance:
if args.distance and dist_m >= args.distance:
write_frame = True
# by visual overlap
method = cv2.INTER_AREA
frame_scale = cv2.resize(frame, (0,0), fx=scale, fy=scale,
interpolation=method)
cv2.imshow('frame', frame_scale)
gray = cv2.cvtColor(frame_scale, cv2.COLOR_BGR2GRAY)
(h, w) = gray.shape
kp_list = detector.detect(gray)
kp_list, des_list = detector.compute(gray, kp_list)
if not (des_list_ref is None) and not (des_list is None) and len(des_list_ref) and len(des_list):
matches = matcher.knnMatch(des_list, trainDescriptors=des_list_ref, k=2)
p1, p2, kp_pairs, idx_pairs, mkp1 = filterMatches(kp_list, kp_list_ref, matches)
M, status, newp1, newp2 = filterFeatures(p1, p2, K, filter_method)
filtered = []
for i, flag in enumerate(status):
if flag:
filtered.append(mkp1[i])
affine = findAffine(p2, p1, fullAffine=False)
if affine is None:
write_frame = True
else:
(rot, tx, ty, sx, sy) = decomposeAffine(affine)
xperc = abs(tx) / w
yperc = abs(ty) / h
perc = math.sqrt(xperc*xperc + yperc*yperc)
print("pixel dist:", tx, ty, "%.1f%% %.1f%%" % (xperc*100, yperc*100))
if perc >= overlap:
write_frame = True
else:
# first frame
write_frame = True
cv2.waitKey(1)
if write_frame:
print("WRITE FRAME")
file = os.path.join(dirname, "img_%04d" % img_counter + ".jpg")
img_counter += 1
cv2.imwrite(file, frame)
# geotag the image
exif = pyexiv2.ImageMetadata(file)
exif.read()
print(lat_deg, lon_deg, alt_m)
exif['Exif.Image.DateTime'] = time_str
GPS = 'Exif.GPSInfo.GPS'
exif[GPS + 'AltitudeRef'] = '0' if alt_m >= 0 else '1'
exif[GPS + 'Altitude'] = Fraction(alt_m)
exif[GPS + 'Latitude'] = decimal_to_dms(lat_deg)
exif[GPS + 'LatitudeRef'] = 'N' if lat_deg >= 0 else 'S'
exif[GPS + 'Longitude'] = decimal_to_dms(lon_deg)
exif[GPS + 'LongitudeRef'] = 'E' if lon_deg >= 0 else 'W'
exif[GPS + 'MapDatum'] = 'WGS-84'
exif.write()
head, tail = os.path.split(file)
f.write("%s,%.8f,%.8f,%.4f,%.4f,%.4f,%.4f,%.2f\n" % (tail, lat_deg, lon_deg, alt_m, yaw, pitch, roll, time))
# by distance
last_lat = lat_deg
last_lon = lon_deg
# by time
last_time = time
# by overlap
kp_list_ref = kp_list
des_list_ref = des_list
f.close()
|
py | 1a5442f404b0e40b12bd5191aac6c585defcc442 | import os
import pytest
import fora
import fora.loader
from fora.utils import FatalError
def test_init():
class DefaultArgs:
debug = False
diff = False
fora.args = DefaultArgs()
def test_group_dependency_cycle_complex(request):
os.chdir(request.fspath.dirname)
with pytest.raises(FatalError, match="cycle"):
fora.loader.load_inventory("inventory.py")
os.chdir(request.config.invocation_dir)
|
py | 1a54430e507fbfa4dc762a97cbdbc4cff294cf00 | # -*- coding: utf-8 -*-
"""
cannlytics.traceability..utils.utils
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains general cannabis analytics utility functions.
"""
from datetime import datetime, timedelta
from re import sub, findall
def camelcase(string):
"""Turn a given string to CamelCase.
Args:
string (str): A given string to turn to CamelCase.
Returns:
(str): A string in CamelCase.
"""
key = ''.join(x for x in string.title() if not x.isspace())
key = key.replace('_', '').replace('-', '')
return key
def get_timestamp(past=0, future=0, time_zone='local'):
"""Get an ISO formatted timestamp.
Args:
past (int): Number of minutes in the past to get a timestamp.
future (int): Number of minutes into the future to get a timestamp.
time_zone (str): UNIMPLEMENTED Set a given timezone.
Returns:
(str): An ISO formatted date/time string.
"""
now = datetime.now()
now += timedelta(minutes=future)
now -= timedelta(minutes=past)
if time_zone is None:
return now.isoformat()[:19]
else:
return now.isoformat()
def snake_case(string):
"""Turn a given string to snake case.
Handles CamelCase, replaces known special characters with
preferred namespaces, replaces spaces with underscores,
and removes all other nuisance characters.
Args:
string (str): The string to turn to snake case.
Returns"
(str): A snake case string.
"""
key = string.replace(' ', '_')
key = key.replace('&', 'and')
key = key.replace('%', 'percent')
key = key.replace('#', 'number')
key = key.replace('$', 'dollars')
key = key.replace('/', '_')
key = key.replace(r'\\', '_')
key = sub('[!@#$%^&*()[]{};:,./<>?\|`~-=+]', ' ', key)
keys = findall(r'[A-Z]?[a-z]+|[A-Z]{2,}(?=[A-Z][a-z]|\d|\W|$)|\d+', key)
return '_'.join(map(str.lower, keys))
|
py | 1a5446516c7026e478792e9475c9ed552de00e75 | # Generated by Django 2.2.2 on 2019-06-07 17:19
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pokemon_app', '0005_auto_20190607_1659'),
]
operations = [
migrations.AddField(
model_name='fanart',
name='create_date',
field=models.DateField(default=datetime.datetime.now),
),
]
|
py | 1a5446b04d65eac67c6b8be224b0dcff7aa1de67 | # -*- coding: utf-8 -*-
'''
noxfile
~~~~~~~
Nox configuration script
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import os
import sys
import glob
import json
import pprint
import shutil
import tempfile
if __name__ == '__main__':
sys.stderr.write('Do not execute this file directly. Use nox instead, it will know how to handle this file\n')
sys.stderr.flush()
exit(1)
# Import 3rd-party libs
import nox
from nox.command import CommandFailed
IS_PY3 = sys.version_info > (2,)
# Be verbose when runing under a CI context
PIP_INSTALL_SILENT = (os.environ.get('JENKINS_URL') or os.environ.get('CI') or os.environ.get('DRONE')) is None
# Global Path Definitions
REPO_ROOT = os.path.abspath(os.path.dirname(__file__))
SITECUSTOMIZE_DIR = os.path.join(REPO_ROOT, 'tests', 'support', 'coverage')
IS_WINDOWS = sys.platform.lower().startswith('win')
# Python versions to run against
_PYTHON_VERSIONS = ('2', '2.7', '3', '3.4', '3.5', '3.6', '3.7')
# Nox options
# Reuse existing virtualenvs
nox.options.reuse_existing_virtualenvs = True
# Don't fail on missing interpreters
nox.options.error_on_missing_interpreters = False
def _create_ci_directories():
for dirname in ('logs', 'coverage', 'xml-unittests-output'):
path = os.path.join(REPO_ROOT, 'artifacts', dirname)
if not os.path.exists(path):
os.makedirs(path)
def _get_session_python_version_info(session):
try:
version_info = session._runner._real_python_version_info
except AttributeError:
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
session_py_version = session.run(
'python', '-c'
'import sys; sys.stdout.write("{}.{}.{}".format(*sys.version_info))',
silent=True,
log=False,
)
version_info = tuple(int(part) for part in session_py_version.split('.') if part.isdigit())
session._runner._real_python_version_info = version_info
finally:
session._runner.global_config.install_only = old_install_only_value
return version_info
def _get_session_python_site_packages_dir(session):
try:
site_packages_dir = session._runner._site_packages_dir
except AttributeError:
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
site_packages_dir = session.run(
'python', '-c'
'import sys; from distutils.sysconfig import get_python_lib; sys.stdout.write(get_python_lib())',
silent=True,
log=False,
)
session._runner._site_packages_dir = site_packages_dir
finally:
session._runner.global_config.install_only = old_install_only_value
return site_packages_dir
def _get_pydir(session):
version_info = _get_session_python_version_info(session)
if version_info < (2, 7):
session.error('Only Python >= 2.7 is supported')
return 'py{}.{}'.format(*version_info)
def _get_distro_info(session):
try:
distro = session._runner._distro
except AttributeError:
# The distro package doesn't output anything for Windows
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
session.install('--progress-bar=off', 'distro', silent=PIP_INSTALL_SILENT)
output = session.run('distro', '-j', silent=True)
distro = json.loads(output.strip())
session.log('Distro information:\n%s', pprint.pformat(distro))
session._runner._distro = distro
finally:
session._runner.global_config.install_only = old_install_only_value
return distro
def _install_system_packages(session):
'''
Because some python packages are provided by the distribution and cannot
be pip installed, and because we don't want the whole system python packages
on our virtualenvs, we copy the required system python packages into
the virtualenv
'''
system_python_packages = {
'__debian_based_distros__': [
'/usr/lib/python{py_version}/dist-packages/*apt*'
]
}
for key in ('ubuntu-14.04', 'ubuntu-16.04', 'ubuntu-18.04', 'debian-8', 'debian-9'):
system_python_packages[key] = system_python_packages['__debian_based_distros__']
distro = _get_distro_info(session)
distro_keys = [
'{id}'.format(**distro),
'{id}-{version}'.format(**distro),
'{id}-{version_parts[major]}'.format(**distro)
]
version_info = _get_session_python_version_info(session)
py_version_keys = [
'{}'.format(*version_info),
'{}.{}'.format(*version_info)
]
session_site_packages_dir = _get_session_python_site_packages_dir(session)
for distro_key in distro_keys:
if distro_key not in system_python_packages:
continue
patterns = system_python_packages[distro_key]
for pattern in patterns:
for py_version in py_version_keys:
matches = set(glob.glob(pattern.format(py_version=py_version)))
if not matches:
continue
for match in matches:
src = os.path.realpath(match)
dst = os.path.join(session_site_packages_dir, os.path.basename(match))
if os.path.exists(dst):
session.log('Not overwritting already existing %s with %s', dst, src)
continue
session.log('Copying %s into %s', src, dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copyfile(src, dst)
def _get_distro_pip_constraints(session, transport):
# Install requirements
distro_constraints = []
if transport == 'tcp':
# The TCP requirements are the exact same requirements as the ZeroMQ ones
transport = 'zeromq'
pydir = _get_pydir(session)
if IS_WINDOWS:
_distro_constraints = os.path.join(REPO_ROOT,
'requirements',
'static',
pydir,
'{}-windows.txt'.format(transport))
if os.path.exists(_distro_constraints):
distro_constraints.append(_distro_constraints)
_distro_constraints = os.path.join(REPO_ROOT,
'requirements',
'static',
pydir,
'windows.txt')
if os.path.exists(_distro_constraints):
distro_constraints.append(_distro_constraints)
else:
_install_system_packages(session)
distro = _get_distro_info(session)
distro_keys = [
'linux',
'{id}'.format(**distro),
'{id}-{version}'.format(**distro),
'{id}-{version_parts[major]}'.format(**distro)
]
for distro_key in distro_keys:
_distro_constraints = os.path.join(REPO_ROOT,
'requirements',
'static',
pydir,
'{}.txt'.format(distro_key))
if os.path.exists(_distro_constraints):
distro_constraints.append(_distro_constraints)
_distro_constraints = os.path.join(REPO_ROOT,
'requirements',
'static',
pydir,
'{}-{}.txt'.format(transport, distro_key))
if os.path.exists(_distro_constraints):
distro_constraints.append(_distro_constraints)
return distro_constraints
def _install_requirements(session, transport, *extra_requirements):
# Install requirements
distro_constraints = _get_distro_pip_constraints(session, transport)
_requirements_files = [
os.path.join(REPO_ROOT, 'requirements', 'base.txt'),
os.path.join(REPO_ROOT, 'requirements', 'zeromq.txt'),
os.path.join(REPO_ROOT, 'requirements', 'pytest.txt')
]
if sys.platform.startswith('linux'):
requirements_files = [
os.path.join(REPO_ROOT, 'requirements', 'static', 'linux.in')
]
elif sys.platform.startswith('win'):
requirements_files = [
os.path.join(REPO_ROOT, 'pkg', 'windows', 'req.txt'),
os.path.join(REPO_ROOT, 'requirements', 'static', 'windows.in')
]
elif sys.platform.startswith('darwin'):
requirements_files = [
os.path.join(REPO_ROOT, 'pkg', 'osx', 'req.txt'),
os.path.join(REPO_ROOT, 'pkg', 'osx', 'req_ext.txt'),
os.path.join(REPO_ROOT, 'requirements', 'static', 'osx.in')
]
while True:
if not requirements_files:
break
requirements_file = requirements_files.pop(0)
if requirements_file not in _requirements_files:
_requirements_files.append(requirements_file)
session.log('Processing {}'.format(requirements_file))
with open(requirements_file) as rfh: # pylint: disable=resource-leakage
for line in rfh:
line = line.strip()
if not line:
continue
if line.startswith('-r'):
reqfile = os.path.join(os.path.dirname(requirements_file), line.strip().split()[-1])
if reqfile in _requirements_files:
continue
_requirements_files.append(reqfile)
continue
for requirements_file in _requirements_files:
install_command = [
'--progress-bar=off', '-r', requirements_file
]
for distro_constraint in distro_constraints:
install_command.extend([
'--constraint', distro_constraint
])
session.install(*install_command, silent=PIP_INSTALL_SILENT)
if extra_requirements:
install_command = [
'--progress-bar=off',
]
for distro_constraint in distro_constraints:
install_command.extend([
'--constraint', distro_constraint
])
install_command += list(extra_requirements)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
def _run_with_coverage(session, *test_cmd):
session.install('--progress-bar=off', 'coverage==4.5.3', silent=PIP_INSTALL_SILENT)
session.run('coverage', 'erase')
python_path_env_var = os.environ.get('PYTHONPATH') or None
if python_path_env_var is None:
python_path_env_var = SITECUSTOMIZE_DIR
else:
python_path_entries = python_path_env_var.split(os.pathsep)
if SITECUSTOMIZE_DIR in python_path_entries:
python_path_entries.remove(SITECUSTOMIZE_DIR)
python_path_entries.insert(0, SITECUSTOMIZE_DIR)
python_path_env_var = os.pathsep.join(python_path_entries)
try:
session.run(
*test_cmd,
env={
# The updated python path so that sitecustomize is importable
'PYTHONPATH': python_path_env_var,
# The full path to the .coverage data file. Makes sure we always write
# them to the same directory
'COVERAGE_FILE': os.path.abspath(os.path.join(REPO_ROOT, '.coverage')),
# Instruct sub processes to also run under coverage
'COVERAGE_PROCESS_START': os.path.join(REPO_ROOT, '.coveragerc')
}
)
finally:
# Always combine and generate the XML coverage report
try:
session.run('coverage', 'combine')
except CommandFailed:
# Sometimes some of the coverage files are corrupt which would trigger a CommandFailed
# exception
pass
session.run('coverage', 'xml', '-o', os.path.join(REPO_ROOT, 'artifacts', 'coverage', 'coverage.xml'))
def _runtests(session, coverage, cmd_args):
# Create required artifacts directories
_create_ci_directories()
try:
if coverage is True:
_run_with_coverage(session, 'coverage', 'run', os.path.join('tests', 'runtests.py'), *cmd_args)
else:
session.run('python', os.path.join('tests', 'runtests.py'), *cmd_args)
except CommandFailed:
# Disabling re-running failed tests for the time being
raise
# pylint: disable=unreachable
names_file_path = os.path.join('artifacts', 'failed-tests.txt')
session.log('Re-running failed tests if possible')
session.install('--progress-bar=off', 'xunitparser==1.3.3', silent=PIP_INSTALL_SILENT)
session.run(
'python',
os.path.join('tests', 'support', 'generate-names-file-from-failed-test-reports.py'),
names_file_path
)
if not os.path.exists(names_file_path):
session.log(
'Failed tests file(%s) was not found. Not rerunning failed tests.',
names_file_path
)
# raise the original exception
raise
with open(names_file_path) as rfh:
contents = rfh.read().strip()
if not contents:
session.log(
'The failed tests file(%s) is empty. Not rerunning failed tests.',
names_file_path
)
# raise the original exception
raise
failed_tests_count = len(contents.splitlines())
if failed_tests_count > 500:
# 500 test failures?! Something else must have gone wrong, don't even bother
session.error(
'Total failed tests({}) > 500. No point on re-running the failed tests'.format(
failed_tests_count
)
)
for idx, flag in enumerate(cmd_args[:]):
if '--names-file=' in flag:
cmd_args.pop(idx)
break
elif flag == '--names-file':
cmd_args.pop(idx) # pop --names-file
cmd_args.pop(idx) # pop the actual names file
break
cmd_args.append('--names-file={}'.format(names_file_path))
if coverage is True:
_run_with_coverage(session, 'coverage', 'run', '-m', 'tests.runtests', *cmd_args)
else:
session.run('python', os.path.join('tests', 'runtests.py'), *cmd_args)
# pylint: enable=unreachable
@nox.session(python=_PYTHON_VERSIONS, name='runtests-parametrized')
@nox.parametrize('coverage', [False, True])
@nox.parametrize('transport', ['zeromq', 'tcp'])
@nox.parametrize('crypto', [None, 'm2crypto', 'pycryptodomex'])
def runtests_parametrized(session, coverage, transport, crypto):
# Install requirements
_install_requirements(session, transport, 'unittest-xml-reporting==2.2.1')
if crypto:
if crypto == 'm2crypto':
session.run('pip', 'uninstall', '-y', 'pycrypto', 'pycryptodome', 'pycryptodomex', silent=True)
else:
session.run('pip', 'uninstall', '-y', 'm2crypto', silent=True)
distro_constraints = _get_distro_pip_constraints(session, transport)
install_command = [
'--progress-bar=off',
]
for distro_constraint in distro_constraints:
install_command.extend([
'--constraint', distro_constraint
])
install_command.append(crypto)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = [
'--tests-logfile={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
'--transport={}'.format(transport)
] + session.posargs
_runtests(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize('coverage', [False, True])
def runtests(session, coverage):
'''
runtests.py session with zeromq transport and default crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=None, transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp')
@nox.parametrize('coverage', [False, True])
def runtests_tcp(session, coverage):
'''
runtests.py session with TCP transport and default crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=None, transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq(session, coverage):
'''
runtests.py session with zeromq transport and default crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=None, transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_m2crypto(session, coverage):
'''
runtests.py session with zeromq transport and m2crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_tcp_m2crypto(session, coverage):
'''
runtests.py session with TCP transport and m2crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq_m2crypto(session, coverage):
'''
runtests.py session with zeromq transport and m2crypto
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def runtests_pycryptodomex(session, coverage):
'''
runtests.py session with zeromq transport and pycryptodomex
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def runtests_tcp_pycryptodomex(session, coverage):
'''
runtests.py session with TCP transport and pycryptodomex
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq_pycryptodomex(session, coverage):
'''
runtests.py session with zeromq transport and pycryptodomex
'''
session.notify(
'runtests-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-cloud')
@nox.parametrize('coverage', [False, True])
def runtests_cloud(session, coverage):
# Install requirements
_install_requirements(session, 'zeromq', 'unittest-xml-reporting==2.2.1')
pydir = _get_pydir(session)
cloud_requirements = os.path.join(REPO_ROOT, 'requirements', 'static', pydir, 'cloud.txt')
session.install('--progress-bar=off', '-r', cloud_requirements, silent=PIP_INSTALL_SILENT)
cmd_args = [
'--tests-logfile={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
'--cloud-provider-tests'
] + session.posargs
_runtests(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tornado')
@nox.parametrize('coverage', [False, True])
def runtests_tornado(session, coverage):
# Install requirements
_install_requirements(session, 'zeromq', 'unittest-xml-reporting==2.2.1')
session.install('--progress-bar=off', 'tornado==5.0.2', silent=PIP_INSTALL_SILENT)
session.install('--progress-bar=off', 'pyzmq==17.0.0', silent=PIP_INSTALL_SILENT)
cmd_args = [
'--tests-logfile={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
] + session.posargs
_runtests(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-parametrized')
@nox.parametrize('coverage', [False, True])
@nox.parametrize('transport', ['zeromq', 'tcp'])
@nox.parametrize('crypto', [None, 'm2crypto', 'pycryptodomex'])
def pytest_parametrized(session, coverage, transport, crypto):
# Install requirements
_install_requirements(session, transport)
if crypto:
if crypto == 'm2crypto':
session.run('pip', 'uninstall', '-y', 'pycrypto', 'pycryptodome', 'pycryptodomex', silent=True)
else:
session.run('pip', 'uninstall', '-y', 'm2crypto', silent=True)
distro_constraints = _get_distro_pip_constraints(session, transport)
install_command = [
'--progress-bar=off',
]
for distro_constraint in distro_constraints:
install_command.extend([
'--constraint', distro_constraint
])
install_command.append(crypto)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = [
'--rootdir', REPO_ROOT,
'--log-file={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
'--no-print-logs',
'-ra',
'-s',
'--transport={}'.format(transport)
] + session.posargs
_pytest(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize('coverage', [False, True])
def pytest(session, coverage):
'''
pytest session with zeromq transport and default crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=None, transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp')
@nox.parametrize('coverage', [False, True])
def pytest_tcp(session, coverage):
'''
pytest session with TCP transport and default crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=None, transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq(session, coverage):
'''
pytest session with zeromq transport and default crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=None, transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_m2crypto(session, coverage):
'''
pytest session with zeromq transport and m2crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_tcp_m2crypto(session, coverage):
'''
pytest session with TCP transport and m2crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq_m2crypto(session, coverage):
'''
pytest session with zeromq transport and m2crypto
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'m2crypto\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def pytest_pycryptodomex(session, coverage):
'''
pytest session with zeromq transport and pycryptodomex
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def pytest_tcp_pycryptodomex(session, coverage):
'''
pytest session with TCP transport and pycryptodomex
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'tcp\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq-pycryptodomex')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq_pycryptodomex(session, coverage):
'''
pytest session with zeromq transport and pycryptodomex
'''
session.notify(
'pytest-parametrized-{}(coverage={}, crypto=\'pycryptodomex\', transport=\'zeromq\')'.format(
session.python,
coverage
)
)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-cloud')
@nox.parametrize('coverage', [False, True])
def pytest_cloud(session, coverage):
# Install requirements
_install_requirements(session, 'zeromq')
pydir = _get_pydir(session)
cloud_requirements = os.path.join(REPO_ROOT, 'requirements', 'static', pydir, 'cloud.txt')
session.install('--progress-bar=off', '-r', cloud_requirements, silent=PIP_INSTALL_SILENT)
cmd_args = [
'--rootdir', REPO_ROOT,
'--log-file={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
'--no-print-logs',
'-ra',
'-s',
os.path.join(REPO_ROOT, 'tests', 'integration', 'cloud', 'providers')
] + session.posargs
_pytest(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tornado')
@nox.parametrize('coverage', [False, True])
def pytest_tornado(session, coverage):
# Install requirements
_install_requirements(session, 'zeromq')
session.install('--progress-bar=off', 'tornado==5.0.2', silent=PIP_INSTALL_SILENT)
session.install('--progress-bar=off', 'pyzmq==17.0.0', silent=PIP_INSTALL_SILENT)
cmd_args = [
'--rootdir', REPO_ROOT,
'--log-file={}'.format(
os.path.join(REPO_ROOT, 'artifacts', 'logs', 'runtests.log')
),
'--no-print-logs',
'-ra',
'-s',
] + session.posargs
_pytest(session, coverage, cmd_args)
def _pytest(session, coverage, cmd_args):
# Create required artifacts directories
_create_ci_directories()
try:
if coverage is True:
_run_with_coverage(session, 'coverage', 'run', '-m', 'py.test', *cmd_args)
else:
session.run('py.test', *cmd_args)
except CommandFailed:
# Re-run failed tests
session.log('Re-running failed tests')
cmd_args.append('--lf')
if coverage is True:
_run_with_coverage(session, 'coverage', 'run', '-m', 'py.test', *cmd_args)
else:
session.run('py.test', *cmd_args)
def _lint(session, rcfile, flags, paths):
_install_requirements(session, 'zeromq')
session.install('--progress-bar=off', '-r', 'requirements/static/{}/lint.txt'.format(_get_pydir(session)), silent=PIP_INSTALL_SILENT)
session.run('pylint', '--version')
pylint_report_path = os.environ.get('PYLINT_REPORT')
cmd_args = [
'pylint',
'--rcfile={}'.format(rcfile)
] + list(flags) + list(paths)
stdout = tempfile.TemporaryFile(mode='w+b')
lint_failed = False
try:
session.run(*cmd_args, stdout=stdout)
except CommandFailed:
lint_failed = True
raise
finally:
stdout.seek(0)
contents = stdout.read()
if contents:
if IS_PY3:
contents = contents.decode('utf-8')
else:
contents = contents.encode('utf-8')
sys.stdout.write(contents)
sys.stdout.flush()
if pylint_report_path:
# Write report
with open(pylint_report_path, 'w') as wfh:
wfh.write(contents)
session.log('Report file written to %r', pylint_report_path)
stdout.close()
@nox.session(python='2.7')
def lint(session):
'''
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
'''
session.notify('lint-salt-{}'.format(session.python))
session.notify('lint-tests-{}'.format(session.python))
@nox.session(python='2.7', name='lint-salt')
def lint_salt(session):
'''
Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.
'''
flags = [
'--disable=I,W1307,C0411,C0413,W8410,str-format-in-logging'
]
if session.posargs:
paths = session.posargs
else:
paths = ['setup.py', 'salt/']
_lint(session, '.testing.pylintrc', flags, paths)
@nox.session(python='2.7', name='lint-tests')
def lint_tests(session):
'''
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
'''
flags = [
'--disable=I,W0232,E1002,W1307,C0411,C0413,W8410,str-format-in-logging'
]
if session.posargs:
paths = session.posargs
else:
paths = ['tests/']
_lint(session, '.testing.pylintrc', flags, paths)
@nox.session(python='3')
def docs(session):
'''
Build Salt's Documentation
'''
pydir = _get_pydir(session)
if pydir == 'py3.4':
session.error('Sphinx only runs on Python >= 3.5')
session.install(
'--progress-bar=off',
'-r', 'requirements/static/{}/docs.txt'.format(pydir),
silent=PIP_INSTALL_SILENT)
os.chdir('doc/')
session.run('make', 'clean', external=True)
session.run('make', 'html', 'SPHINXOPTS=-W', external=True)
session.run('tar', '-czvf', 'doc-archive.tar.gz', '_build/html')
os.chdir('..')
|
py | 1a5446e7fb4fb3cd26b68b91b4c9f2ee20c185c6 | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import cudf
import cupy as cp
import numpy as np
from pandas import DataFrame as pdDF
from cuml.common import input_to_cuml_array, CumlArray
from cuml.common import input_to_host_array
from cuml.common import has_cupy
from cuml.common.input_utils import convert_dtype
from cuml.common.memory_utils import _check_array_contiguity
from numba import cuda as nbcuda
###############################################################################
# Parameters #
###############################################################################
test_dtypes_all = [
np.float16, np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64
]
test_dtypes_acceptable = [
np.float32, np.float64
]
test_input_types = [
'numpy', 'numba', 'cupy', 'cudf', 'pandas', 'cuml'
]
test_num_rows = [1, 100]
test_num_cols = [1, 100]
###############################################################################
# Tests #
###############################################################################
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('num_rows', test_num_rows)
@pytest.mark.parametrize('num_cols', test_num_cols)
@pytest.mark.parametrize('order', ['C', 'F', 'K'])
def test_input_to_cuml_array(dtype, input_type, num_rows, num_cols, order):
input_data, real_data = get_input(input_type, num_rows, num_cols,
dtype, order=order)
if input_type == 'cupy' and input_data is None:
pytest.skip('cupy not installed')
X, n_rows, n_cols, res_dtype = input_to_cuml_array(input_data,
order=order)
np.testing.assert_equal(X.to_output('numpy'), real_data)
assert n_rows == num_rows == X.shape[0] == len(X)
assert n_cols == num_cols == X.shape[1]
assert dtype == res_dtype == X.dtype
del input_data
del real_data
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', ['numba', 'cupy'])
@pytest.mark.parametrize('order', ['C', 'F'])
@pytest.mark.parametrize('order_check', ['C', 'F'])
def test_fail_on_order(dtype, input_type, order, order_check):
# this is tested only for non cudf dataframe or numpy arrays
# those are converted form order by their respective libraries
input_data, real_data = get_input(input_type, 10, 10, dtype, order=order)
if input_type == 'cupy' and input_data is None:
pytest.skip('cupy not installed')
if order == order_check:
input_to_cuml_array(input_data, fail_on_order=False, order=order)
else:
with pytest.raises(ValueError):
input_to_cuml_array(input_data, fail_on_order=True,
order=order_check)
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('from_order', ['C', 'F'])
@pytest.mark.parametrize('to_order', ['C', 'F', 'K'])
def test_convert_matrix_order_cuml_array(dtype, input_type, from_order,
to_order):
input_data, real_data = get_input(input_type, 10, 10, dtype,
order=from_order)
# conv_data = np.array(real_data, order=to_order, copy=True)
if from_order == to_order or to_order == 'K':
conv_data, *_ = input_to_cuml_array(input_data, fail_on_order=False,
order=to_order)
else:
# Warning is raised for non cudf dataframe or numpy arrays
# those are converted form order by their respective libraries
if input_type in ['numpy', 'cupy', 'numba']:
# with pytest.warns(UserWarning):
# warning disabled due to using cuml logger, need to
# adapt tests for that.
conv_data, *_ = input_to_cuml_array(input_data,
fail_on_order=False,
order=to_order)
else:
conv_data, *_ = input_to_cuml_array(input_data,
fail_on_order=False,
order=to_order)
if to_order == 'K':
if input_type in ['cudf']:
assert conv_data.order == 'F'
elif input_type in ['pandas']:
assert conv_data.order == 'C'
else:
assert conv_data.order == from_order
else:
assert conv_data.order == to_order
np.testing.assert_equal(real_data, conv_data.to_output('numpy'))
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('shape', [(1, 10), (10, 1)])
@pytest.mark.parametrize('from_order', ['C', 'F'])
@pytest.mark.parametrize('to_order', ['C', 'F', 'K'])
def test_convert_vector_order_cuml_array(dtype, input_type, shape, from_order,
to_order):
input_data, real_data = get_input(input_type, shape[0], shape[1], dtype,
order=from_order)
# conv_data = np.array(real_data, order=to_order, copy=True)
conv_data, *_ = input_to_cuml_array(input_data, fail_on_order=False,
order=to_order)
np.testing.assert_equal(real_data, conv_data.to_output('numpy'))
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('num_rows', test_num_rows)
@pytest.mark.parametrize('num_cols', test_num_cols)
@pytest.mark.parametrize('order', ['C', 'F'])
def test_input_to_host_array(dtype, input_type, num_rows, num_cols, order):
input_data, real_data = get_input(input_type, num_rows, num_cols, dtype,
order=order)
if input_type == 'cupy' and input_data is None:
pytest.skip('cupy not installed')
X, X_ptr, n_rows, n_cols, dtype = input_to_host_array(input_data,
order=order)
np.testing.assert_equal(X, real_data)
assert n_rows == num_rows
assert n_cols == num_cols
assert dtype == dtype
del input_data
del real_data
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('check_dtype', test_dtypes_all)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('order', ['C', 'F'])
def test_dtype_check(dtype, check_dtype, input_type, order):
if (dtype == np.float16 or check_dtype == np.float16)\
and input_type != 'numpy':
pytest.xfail("float16 not yet supported by numba/cuDF")
if dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
if input_type in ['cudf', 'pandas']:
pytest.xfail("unsigned int types not yet supported")
input_data, real_data = get_input(input_type, 10, 10, dtype, order=order)
if input_type == 'cupy' and input_data is None:
pytest.skip('cupy not installed')
if dtype == check_dtype:
_, _, _, got_dtype = \
input_to_cuml_array(input_data, check_dtype=check_dtype,
order=order)
assert got_dtype == check_dtype
else:
with pytest.raises(TypeError):
_, _, _, got_dtype = \
input_to_cuml_array(input_data, check_dtype=check_dtype,
order=order)
@pytest.mark.parametrize('num_rows', test_num_rows)
@pytest.mark.parametrize('num_cols', test_num_cols)
@pytest.mark.parametrize('to_dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('from_dtype', test_dtypes_all)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('order', ['C', 'F'])
def test_convert_input_dtype(from_dtype, to_dtype, input_type, num_rows,
num_cols, order):
if from_dtype == np.float16 and input_type != 'numpy':
pytest.xfail("float16 not yet supported by numba/cuDF")
if from_dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
if input_type == 'cudf':
pytest.xfail("unsigned int types not yet supported by \
cuDF")
elif not has_cupy():
pytest.xfail("unsigned int types not yet supported by \
cuDF and cuPy is not installed.")
input_data, real_data = get_input(input_type, num_rows, num_cols,
from_dtype, out_dtype=to_dtype,
order=order)
if input_type == 'cupy' and input_data is None:
pytest.skip('cupy not installed')
converted_data = convert_dtype(input_data, to_dtype=to_dtype)
if input_type == 'numpy':
np.testing.assert_equal(converted_data, real_data)
elif input_type == 'cudf':
np.testing.assert_equal(converted_data.as_matrix(), real_data)
elif input_type == 'pandas':
np.testing.assert_equal(converted_data.to_numpy(), real_data)
else:
np.testing.assert_equal(converted_data.copy_to_host(), real_data)
if from_dtype == to_dtype:
check_ptr(converted_data, input_data, input_type)
@pytest.mark.parametrize('dtype', test_dtypes_acceptable)
@pytest.mark.parametrize('input_type', ['numpy', 'cupy'])
@pytest.mark.parametrize('order', ['C', 'F'])
@pytest.mark.parametrize('contiguous', [True, False])
@pytest.mark.parametrize('force_contiguous', [True, False])
def test_non_contiguous_to_contiguous_input(dtype, input_type, order,
contiguous, force_contiguous):
input_data, real_data = get_input(input_type, 10, 8, dtype,
order=order)
if not contiguous:
if order == 'F':
data_view = input_data[:-3]
real_data = real_data[:-3]
else:
data_view = input_data[:, :-3]
real_data = real_data[:, :-3]
else:
data_view = input_data
cumlary, *_ = input_to_cuml_array(data_view,
force_contiguous=force_contiguous)
if force_contiguous:
assert(_check_array_contiguity(cumlary))
np.testing.assert_equal(real_data, cumlary.to_output('numpy'))
###############################################################################
# Utility Functions #
###############################################################################
def check_numpy_order(ary, order):
if order == 'F':
return ary.flags.f_contiguous
else:
return ary.flags.c_contiguous
def check_ptr(a, b, input_type):
if input_type == 'cudf':
for (_, col_a), (_, col_b) in zip(a._data.items(), b._data.items()):
assert col_a.base_data.ptr == col_b.base_data.ptr
else:
def get_ptr(x):
try:
return x.__cuda_array_interface__['data'][0]
except AttributeError:
return x.__array_interface__['data'][0]
if input_type == 'pandas':
a = a.values
b = b.values
assert get_ptr(a) == get_ptr(b)
def get_input(type, nrows, ncols, dtype, order='C', out_dtype=False):
rand_mat = (cp.random.rand(nrows, ncols) * 10)
rand_mat = cp.array(rand_mat, dtype=dtype, order=order)
if type == 'numpy':
result = np.array(cp.asnumpy(rand_mat), order=order)
if type == 'cupy':
result = rand_mat
if type == 'numba':
result = nbcuda.as_cuda_array(rand_mat)
if type == 'cudf':
result = cudf.DataFrame(rand_mat)
if type == 'pandas':
result = pdDF(cp.asnumpy(rand_mat))
if type == 'cuml':
result = CumlArray(data=rand_mat)
if out_dtype:
return result, np.array(cp.asnumpy(rand_mat).astype(out_dtype),
order=order)
else:
return result, np.array(cp.asnumpy(rand_mat), order=order)
|
py | 1a54477c2bcfbaf61955dd70e45bfa93c4509101 | class Solution:
def reverse(self, start, end):
r = None
curr = start
while curr!=end:
prev=curr
curr = curr.next
prev.next = r
r = prev
start.next = end
return r
def reverseKGroup(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
dummy=ListNode(-1, head)
prev=dummy
curr = head
while curr:
start = curr
i = k
while curr and i:
curr=curr.next
i -= 1
if not i:
prev.next = self.reverse(start, curr)
prev = start
return dummy.next
|
py | 1a5448f7b2dc71c151b4c3cf5b8569747d1a3fb5 | # -*- coding: utf-8 -*-
#
# websockets documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 31 20:48:44 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.abspath('..'), 'src'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append('sphinxcontrib.spelling')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'websockets'
copyright = '2013-{}, Aymeric Augustin'.format(datetime.date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '7.0'
# The full version, including alpha/beta/rc tags.
release = '7.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo': 'websockets.svg',
'description': 'A library for building WebSocket servers and clients in Python with a focus on correctness and simplicity.',
'github_button': True,
'github_user': 'aaugustin',
'github_repo': 'websockets',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'websocketsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'websockets.tex', 'websockets Documentation',
'Aymeric Augustin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'websockets', 'websockets Documentation',
['Aymeric Augustin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'websockets', 'websockets Documentation',
'Aymeric Augustin', 'websockets', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/3/': None}
|
py | 1a5449028ab640c502342f0983d42533604d12d4 | from django.conf import settings
from django.utils.encoding import force_unicode
import os
import __main__
_map_file_path = '_generated_media_names.py'
_media_dir = '_generated_media'
# __main__ is not guaranteed to have the __file__ attribute
if hasattr(__main__, '__file__'):
_root = os.path.dirname(__main__.__file__)
_map_file_path = os.path.join(_root, _map_file_path)
_media_dir = os.path.join(_root, _media_dir)
GENERATED_MEDIA_DIR = os.path.abspath(
getattr(settings, 'GENERATED_MEDIA_DIR', _media_dir))
GENERATED_MEDIA_NAMES_MODULE = getattr(settings, 'GENERATED_MEDIA_NAMES_MODULE',
'_generated_media_names')
GENERATED_MEDIA_NAMES_FILE = os.path.abspath(
getattr(settings, 'GENERATED_MEDIA_NAMES_FILE', _map_file_path))
DEV_MEDIA_URL = getattr(settings, 'DEV_MEDIA_URL',
getattr(settings, 'STATIC_URL', settings.MEDIA_URL))
PRODUCTION_MEDIA_URL = getattr(settings, 'PRODUCTION_MEDIA_URL', DEV_MEDIA_URL)
DEFAULT_MEDIA_GENERATORS = (
'mediagenerator.generators.copyfiles.CopyFiles',
'mediagenerator.generators.bundles.Bundles',
'mediagenerator.generators.manifest.Manifest',
)
try:
# Only include sprites if PIL is installed
import Image
DEFAULT_MEDIA_GENERATORS += ('mediagenerator.generators.sprites.Sprites',)
except ImportError:
pass
MEDIA_GENERATORS = getattr(settings, 'MEDIA_GENERATORS', DEFAULT_MEDIA_GENERATORS)
_global_media_dirs = getattr(settings, 'GLOBAL_MEDIA_DIRS',
getattr(settings, 'STATICFILES_DIRS', ()))
GLOBAL_MEDIA_DIRS = [os.path.normcase(os.path.normpath(force_unicode(path)))
for path in _global_media_dirs]
IGNORE_APP_MEDIA_DIRS = getattr(settings, 'IGNORE_APP_MEDIA_DIRS',
('django.contrib.admin',))
MEDIA_DEV_MODE = getattr(settings, 'MEDIA_DEV_MODE', settings.DEBUG)
DEFAULT_FILE_STORAGE = 'mediagenerator.multidomain_media_url.CustomFileSystemStorage'
ENABLE_MULTI_DOMAIN_MEDIA = False
ENABLE_LESS_CSS_CLEAN_CSS = getattr(settings, 'ENABLE_LESS_CSS_CLEAN_CSS', False)
# enable the LESS.js based debugger
ENABLE_LESS_CSS_DEBUG = getattr(settings, 'ENABLE_LESS_CSS_DEBUG', False) |
py | 1a5449041db46ed694c45a059ff59ad488796655 | from sqlalchemy.exc import IntegrityError
from .. import auth
from ..base_view import BaseView
from ..collaborator.models import Collaborator
from ..department.models import Department
from ..dependent.models import Dependent
from ..dependent.schemas import DependentsSchema
from .schemas import CollaboratorSchema
class CollaboratorView(BaseView):
schema = CollaboratorSchema
model = Collaborator
@auth.login_required
def get(self, id=None):
if id:
collaborator = self.model.query.filter_by(id=id).first_or_404(
"Collaborator with id not found"
)
return self.jsonify(collaborator), 200
return self.jsonify(self.model.query.all(), many=True), 200
@auth.login_required
def post(self):
try:
super(CollaboratorView, self).post()
data = self.get_data()
department = data.pop("department")
department = Department.query.filter_by(name=department).first_or_404(
"Department with name not found"
)
data["department"] = department
collaborator = self.model(**data)
collaborator.save()
return self.jsonify(collaborator), 201
except IntegrityError:
self.abort(400, "Collaborator already exists")
@auth.login_required
def delete(self, id=None):
collaborator = (
self.model().query.filter_by(id=id).first_or_404("Collaborator not found")
)
collaborator.delete()
return super(CollaboratorView, self).delete()
@auth.login_required
def put(self, id=None):
try:
super(CollaboratorView, self).put()
collaborator = (
self.model()
.query.filter_by(id=id)
.first_or_404("Collaborator not found")
)
data = self.get_data(partial=True)
department = data.get("department")
if department:
department_instance = Department.query.filter_by(
name=department
).first_or_404("Department not found")
collaborator.department = department_instance
collaborator.full_name = data.get("full_name", collaborator.full_name)
collaborator.save()
return self.jsonify(collaborator), 200
except IntegrityError:
self.abort(400, "Collaborator already exists")
class CollaboratorDependents(BaseView):
model = Dependent
@auth.login_required
def post(self, id=None):
collaborator = Collaborator.query.filter_by(id=id).first_or_404(
"Collaborator with id not found"
)
schema = DependentsSchema()
data = self.get_data(schema)
data["collaborator"] = collaborator
dependent = self.model(**data)
dependent.save()
return schema.jsonify(dependent), 201
|
py | 1a54490797f5fbfd645c9f2e116a82f1fec52707 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from thumbor.filters import BaseFilter, filter_method
from thumbor.ext.filters import _saturation
class Filter(BaseFilter):
@filter_method(BaseFilter.DecimalNumber)
def saturation(self, change):
mode, data = self.engine.image_data_as_rgb()
imgdata = _saturation.apply(mode, change, data)
self.engine.set_image_data(imgdata)
|
py | 1a54492474e73ca68cbde65172db4a43cf1790f0 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_voip_profile
short_description: Configure VoIP profiles in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify voip feature and profile category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
version_added: 2.9
voip_profile:
description:
- Configure VoIP profiles.
default: null
type: dict
suboptions:
comment:
description:
- Comment.
type: str
name:
description:
- Profile name.
required: true
type: str
sccp:
description:
- SCCP.
type: dict
suboptions:
block_mcast:
description:
- Enable/disable block multicast RTP connections.
type: str
choices:
- disable
- enable
log_call_summary:
description:
- Enable/disable log summary of SCCP calls.
type: str
choices:
- disable
- enable
log_violations:
description:
- Enable/disable logging of SCCP violations.
type: str
choices:
- disable
- enable
max_calls:
description:
- Maximum calls per minute per SCCP client (max 65535).
type: int
status:
description:
- Enable/disable SCCP.
type: str
choices:
- disable
- enable
verify_header:
description:
- Enable/disable verify SCCP header content.
type: str
choices:
- disable
- enable
sip:
description:
- SIP.
type: dict
suboptions:
ack_rate:
description:
- ACK request rate limit (per second, per policy).
type: int
block_ack:
description:
- Enable/disable block ACK requests.
type: str
choices:
- disable
- enable
block_bye:
description:
- Enable/disable block BYE requests.
type: str
choices:
- disable
- enable
block_cancel:
description:
- Enable/disable block CANCEL requests.
type: str
choices:
- disable
- enable
block_geo_red_options:
description:
- Enable/disable block OPTIONS requests, but OPTIONS requests still notify for redundancy.
type: str
choices:
- disable
- enable
block_info:
description:
- Enable/disable block INFO requests.
type: str
choices:
- disable
- enable
block_invite:
description:
- Enable/disable block INVITE requests.
type: str
choices:
- disable
- enable
block_long_lines:
description:
- Enable/disable block requests with headers exceeding max-line-length.
type: str
choices:
- disable
- enable
block_message:
description:
- Enable/disable block MESSAGE requests.
type: str
choices:
- disable
- enable
block_notify:
description:
- Enable/disable block NOTIFY requests.
type: str
choices:
- disable
- enable
block_options:
description:
- Enable/disable block OPTIONS requests and no OPTIONS as notifying message for redundancy either.
type: str
choices:
- disable
- enable
block_prack:
description:
- Enable/disable block prack requests.
type: str
choices:
- disable
- enable
block_publish:
description:
- Enable/disable block PUBLISH requests.
type: str
choices:
- disable
- enable
block_refer:
description:
- Enable/disable block REFER requests.
type: str
choices:
- disable
- enable
block_register:
description:
- Enable/disable block REGISTER requests.
type: str
choices:
- disable
- enable
block_subscribe:
description:
- Enable/disable block SUBSCRIBE requests.
type: str
choices:
- disable
- enable
block_unknown:
description:
- Block unrecognized SIP requests (enabled by default).
type: str
choices:
- disable
- enable
block_update:
description:
- Enable/disable block UPDATE requests.
type: str
choices:
- disable
- enable
bye_rate:
description:
- BYE request rate limit (per second, per policy).
type: int
call_keepalive:
description:
- Continue tracking calls with no RTP for this many minutes.
type: int
cancel_rate:
description:
- CANCEL request rate limit (per second, per policy).
type: int
contact_fixup:
description:
- "Fixup contact anyway even if contact's IP:port doesn't match session's IP:port."
type: str
choices:
- disable
- enable
hnt_restrict_source_ip:
description:
- Enable/disable restrict RTP source IP to be the same as SIP source IP when HNT is enabled.
type: str
choices:
- disable
- enable
hosted_nat_traversal:
description:
- Hosted NAT Traversal (HNT).
type: str
choices:
- disable
- enable
info_rate:
description:
- INFO request rate limit (per second, per policy).
type: int
invite_rate:
description:
- INVITE request rate limit (per second, per policy).
type: int
ips_rtp:
description:
- Enable/disable allow IPS on RTP.
type: str
choices:
- disable
- enable
log_call_summary:
description:
- Enable/disable logging of SIP call summary.
type: str
choices:
- disable
- enable
log_violations:
description:
- Enable/disable logging of SIP violations.
type: str
choices:
- disable
- enable
malformed_header_allow:
description:
- Action for malformed Allow header.
type: str
choices:
- discard
- pass
- respond
malformed_header_call_id:
description:
- Action for malformed Call-ID header.
type: str
choices:
- discard
- pass
- respond
malformed_header_contact:
description:
- Action for malformed Contact header.
type: str
choices:
- discard
- pass
- respond
malformed_header_content_length:
description:
- Action for malformed Content-Length header.
type: str
choices:
- discard
- pass
- respond
malformed_header_content_type:
description:
- Action for malformed Content-Type header.
type: str
choices:
- discard
- pass
- respond
malformed_header_cseq:
description:
- Action for malformed CSeq header.
type: str
choices:
- discard
- pass
- respond
malformed_header_expires:
description:
- Action for malformed Expires header.
type: str
choices:
- discard
- pass
- respond
malformed_header_from:
description:
- Action for malformed From header.
type: str
choices:
- discard
- pass
- respond
malformed_header_max_forwards:
description:
- Action for malformed Max-Forwards header.
type: str
choices:
- discard
- pass
- respond
malformed_header_p_asserted_identity:
description:
- Action for malformed P-Asserted-Identity header.
type: str
choices:
- discard
- pass
- respond
malformed_header_rack:
description:
- Action for malformed RAck header.
type: str
choices:
- discard
- pass
- respond
malformed_header_record_route:
description:
- Action for malformed Record-Route header.
type: str
choices:
- discard
- pass
- respond
malformed_header_route:
description:
- Action for malformed Route header.
type: str
choices:
- discard
- pass
- respond
malformed_header_rseq:
description:
- Action for malformed RSeq header.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_a:
description:
- Action for malformed SDP a line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_b:
description:
- Action for malformed SDP b line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_c:
description:
- Action for malformed SDP c line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_i:
description:
- Action for malformed SDP i line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_k:
description:
- Action for malformed SDP k line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_m:
description:
- Action for malformed SDP m line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_o:
description:
- Action for malformed SDP o line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_r:
description:
- Action for malformed SDP r line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_s:
description:
- Action for malformed SDP s line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_t:
description:
- Action for malformed SDP t line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_v:
description:
- Action for malformed SDP v line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_z:
description:
- Action for malformed SDP z line.
type: str
choices:
- discard
- pass
- respond
malformed_header_to:
description:
- Action for malformed To header.
type: str
choices:
- discard
- pass
- respond
malformed_header_via:
description:
- Action for malformed VIA header.
type: str
choices:
- discard
- pass
- respond
malformed_request_line:
description:
- Action for malformed request line.
type: str
choices:
- discard
- pass
- respond
max_body_length:
description:
- Maximum SIP message body length (0 meaning no limit).
type: int
max_dialogs:
description:
- Maximum number of concurrent calls/dialogs (per policy).
type: int
max_idle_dialogs:
description:
- Maximum number established but idle dialogs to retain (per policy).
type: int
max_line_length:
description:
- Maximum SIP header line length (78-4096).
type: int
message_rate:
description:
- MESSAGE request rate limit (per second, per policy).
type: int
nat_trace:
description:
- Enable/disable preservation of original IP in SDP i line.
type: str
choices:
- disable
- enable
no_sdp_fixup:
description:
- Enable/disable no SDP fix-up.
type: str
choices:
- disable
- enable
notify_rate:
description:
- NOTIFY request rate limit (per second, per policy).
type: int
open_contact_pinhole:
description:
- Enable/disable open pinhole for non-REGISTER Contact port.
type: str
choices:
- disable
- enable
open_record_route_pinhole:
description:
- Enable/disable open pinhole for Record-Route port.
type: str
choices:
- disable
- enable
open_register_pinhole:
description:
- Enable/disable open pinhole for REGISTER Contact port.
type: str
choices:
- disable
- enable
open_via_pinhole:
description:
- Enable/disable open pinhole for Via port.
type: str
choices:
- disable
- enable
options_rate:
description:
- OPTIONS request rate limit (per second, per policy).
type: int
prack_rate:
description:
- PRACK request rate limit (per second, per policy).
type: int
preserve_override:
description:
- "Override i line to preserve original IPS ."
type: str
choices:
- disable
- enable
provisional_invite_expiry_time:
description:
- Expiry time for provisional INVITE (10 - 3600 sec).
type: int
publish_rate:
description:
- PUBLISH request rate limit (per second, per policy).
type: int
refer_rate:
description:
- REFER request rate limit (per second, per policy).
type: int
register_contact_trace:
description:
- Enable/disable trace original IP/port within the contact header of REGISTER requests.
type: str
choices:
- disable
- enable
register_rate:
description:
- REGISTER request rate limit (per second, per policy).
type: int
rfc2543_branch:
description:
- Enable/disable support via branch compliant with RFC 2543.
type: str
choices:
- disable
- enable
rtp:
description:
- Enable/disable create pinholes for RTP traffic to traverse firewall.
type: str
choices:
- disable
- enable
ssl_algorithm:
description:
- Relative strength of encryption algorithms accepted in negotiation.
type: str
choices:
- high
- medium
- low
ssl_auth_client:
description:
- Require a client certificate and authenticate it with the peer/peergrp. Source user.peer.name user.peergrp.name.
type: str
ssl_auth_server:
description:
- Authenticate the server's certificate with the peer/peergrp. Source user.peer.name user.peergrp.name.
type: str
ssl_client_certificate:
description:
- Name of Certificate to offer to server if requested. Source vpn.certificate.local.name.
type: str
ssl_client_renegotiation:
description:
- Allow/block client renegotiation by server.
type: str
choices:
- allow
- deny
- secure
ssl_max_version:
description:
- Highest SSL/TLS version to negotiate.
type: str
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl_min_version:
description:
- Lowest SSL/TLS version to negotiate.
type: str
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl_mode:
description:
- SSL/TLS mode for encryption & decryption of traffic.
type: str
choices:
- off
- full
ssl_pfs:
description:
- SSL Perfect Forward Secrecy.
type: str
choices:
- require
- deny
- allow
ssl_send_empty_frags:
description:
- Send empty fragments to avoid attack on CBC IV (SSL 3.0 & TLS 1.0 only).
type: str
choices:
- enable
- disable
ssl_server_certificate:
description:
- Name of Certificate return to the client in every SSL connection. Source vpn.certificate.local.name.
type: str
status:
description:
- Enable/disable SIP.
type: str
choices:
- disable
- enable
strict_register:
description:
- Enable/disable only allow the registrar to connect.
type: str
choices:
- disable
- enable
subscribe_rate:
description:
- SUBSCRIBE request rate limit (per second, per policy).
type: int
unknown_header:
description:
- Action for unknown SIP header.
type: str
choices:
- discard
- pass
- respond
update_rate:
description:
- UPDATE request rate limit (per second, per policy).
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure VoIP profiles.
fortios_voip_profile:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
voip_profile:
comment: "Comment."
name: "default_name_4"
sccp:
block_mcast: "disable"
log_call_summary: "disable"
log_violations: "disable"
max_calls: "9"
status: "disable"
verify_header: "disable"
sip:
ack_rate: "13"
block_ack: "disable"
block_bye: "disable"
block_cancel: "disable"
block_geo_red_options: "disable"
block_info: "disable"
block_invite: "disable"
block_long_lines: "disable"
block_message: "disable"
block_notify: "disable"
block_options: "disable"
block_prack: "disable"
block_publish: "disable"
block_refer: "disable"
block_register: "disable"
block_subscribe: "disable"
block_unknown: "disable"
block_update: "disable"
bye_rate: "31"
call_keepalive: "32"
cancel_rate: "33"
contact_fixup: "disable"
hnt_restrict_source_ip: "disable"
hosted_nat_traversal: "disable"
info_rate: "37"
invite_rate: "38"
ips_rtp: "disable"
log_call_summary: "disable"
log_violations: "disable"
malformed_header_allow: "discard"
malformed_header_call_id: "discard"
malformed_header_contact: "discard"
malformed_header_content_length: "discard"
malformed_header_content_type: "discard"
malformed_header_cseq: "discard"
malformed_header_expires: "discard"
malformed_header_from: "discard"
malformed_header_max_forwards: "discard"
malformed_header_p_asserted_identity: "discard"
malformed_header_rack: "discard"
malformed_header_record_route: "discard"
malformed_header_route: "discard"
malformed_header_rseq: "discard"
malformed_header_sdp_a: "discard"
malformed_header_sdp_b: "discard"
malformed_header_sdp_c: "discard"
malformed_header_sdp_i: "discard"
malformed_header_sdp_k: "discard"
malformed_header_sdp_m: "discard"
malformed_header_sdp_o: "discard"
malformed_header_sdp_r: "discard"
malformed_header_sdp_s: "discard"
malformed_header_sdp_t: "discard"
malformed_header_sdp_v: "discard"
malformed_header_sdp_z: "discard"
malformed_header_to: "discard"
malformed_header_via: "discard"
malformed_request_line: "discard"
max_body_length: "71"
max_dialogs: "72"
max_idle_dialogs: "73"
max_line_length: "74"
message_rate: "75"
nat_trace: "disable"
no_sdp_fixup: "disable"
notify_rate: "78"
open_contact_pinhole: "disable"
open_record_route_pinhole: "disable"
open_register_pinhole: "disable"
open_via_pinhole: "disable"
options_rate: "83"
prack_rate: "84"
preserve_override: "disable"
provisional_invite_expiry_time: "86"
publish_rate: "87"
refer_rate: "88"
register_contact_trace: "disable"
register_rate: "90"
rfc2543_branch: "disable"
rtp: "disable"
ssl_algorithm: "high"
ssl_auth_client: "<your_own_value> (source user.peer.name user.peergrp.name)"
ssl_auth_server: "<your_own_value> (source user.peer.name user.peergrp.name)"
ssl_client_certificate: "<your_own_value> (source vpn.certificate.local.name)"
ssl_client_renegotiation: "allow"
ssl_max_version: "ssl-3.0"
ssl_min_version: "ssl-3.0"
ssl_mode: "off"
ssl_pfs: "require"
ssl_send_empty_frags: "enable"
ssl_server_certificate: "<your_own_value> (source vpn.certificate.local.name)"
status: "disable"
strict_register: "disable"
subscribe_rate: "106"
unknown_header: "discard"
update_rate: "108"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_voip_profile_data(json):
option_list = ['comment', 'name', 'sccp',
'sip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def voip_profile(data, fos):
vdom = data['vdom']
state = data['state']
voip_profile_data = data['voip_profile']
filtered_data = underscore_to_hyphen(filter_voip_profile_data(voip_profile_data))
if state == "present":
return fos.set('voip',
'profile',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('voip',
'profile',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_voip(data, fos):
if data['voip_profile']:
resp = voip_profile(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"voip_profile": {
"required": False, "type": "dict", "default": None,
"options": {
"comment": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"sccp": {"required": False, "type": "dict",
"options": {
"block_mcast": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log_call_summary": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log_violations": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"max_calls": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"verify_header": {"required": False, "type": "str",
"choices": ["disable", "enable"]}
}},
"sip": {"required": False, "type": "dict",
"options": {
"ack_rate": {"required": False, "type": "int"},
"block_ack": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_bye": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_cancel": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_geo_red_options": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_info": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_invite": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_long_lines": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_message": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_notify": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_options": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_prack": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_publish": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_refer": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_register": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_subscribe": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_unknown": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"block_update": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"bye_rate": {"required": False, "type": "int"},
"call_keepalive": {"required": False, "type": "int"},
"cancel_rate": {"required": False, "type": "int"},
"contact_fixup": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"hnt_restrict_source_ip": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"hosted_nat_traversal": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"info_rate": {"required": False, "type": "int"},
"invite_rate": {"required": False, "type": "int"},
"ips_rtp": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log_call_summary": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log_violations": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"malformed_header_allow": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_call_id": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_contact": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_content_length": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_content_type": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_cseq": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_expires": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_from": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_max_forwards": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_p_asserted_identity": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_rack": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_record_route": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_route": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_rseq": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_a": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_b": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_c": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_i": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_k": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_m": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_o": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_r": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_s": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_t": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_v": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_sdp_z": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_to": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_header_via": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"malformed_request_line": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"max_body_length": {"required": False, "type": "int"},
"max_dialogs": {"required": False, "type": "int"},
"max_idle_dialogs": {"required": False, "type": "int"},
"max_line_length": {"required": False, "type": "int"},
"message_rate": {"required": False, "type": "int"},
"nat_trace": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"no_sdp_fixup": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"notify_rate": {"required": False, "type": "int"},
"open_contact_pinhole": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"open_record_route_pinhole": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"open_register_pinhole": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"open_via_pinhole": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"options_rate": {"required": False, "type": "int"},
"prack_rate": {"required": False, "type": "int"},
"preserve_override": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"provisional_invite_expiry_time": {"required": False, "type": "int"},
"publish_rate": {"required": False, "type": "int"},
"refer_rate": {"required": False, "type": "int"},
"register_contact_trace": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"register_rate": {"required": False, "type": "int"},
"rfc2543_branch": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"rtp": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl_algorithm": {"required": False, "type": "str",
"choices": ["high", "medium", "low"]},
"ssl_auth_client": {"required": False, "type": "str"},
"ssl_auth_server": {"required": False, "type": "str"},
"ssl_client_certificate": {"required": False, "type": "str"},
"ssl_client_renegotiation": {"required": False, "type": "str",
"choices": ["allow", "deny", "secure"]},
"ssl_max_version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]},
"ssl_min_version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]},
"ssl_mode": {"required": False, "type": "str",
"choices": ["off", "full"]},
"ssl_pfs": {"required": False, "type": "str",
"choices": ["require", "deny", "allow"]},
"ssl_send_empty_frags": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl_server_certificate": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"strict_register": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"subscribe_rate": {"required": False, "type": "int"},
"unknown_header": {"required": False, "type": "str",
"choices": ["discard", "pass", "respond"]},
"update_rate": {"required": False, "type": "int"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_voip(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_voip(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
py | 1a5449b0de14968d186c799d8659f7f47c803556 | from .core import ( # noqa
AssetID,
AssetIDPlusDay,
EPOCH,
ExplodingObject,
FakeDataPortal,
FetcherDataPortal,
MockDailyBarReader,
OpenPrice,
RecordBatchBlotter,
add_security_data,
all_pairs_matching_predicate,
all_subindices,
assert_single_position,
assert_timestamp_equal,
check_allclose,
check_arrays,
chrange,
create_daily_df_for_asset,
create_data_portal,
create_data_portal_from_trade_history,
create_empty_splits_mergers_frame,
create_minute_bar_data,
create_minute_df_for_asset,
drain_zipline,
empty_asset_finder,
empty_assets_db,
empty_trading_env,
make_alternating_boolean_array,
make_cascading_boolean_array,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
patch_os_environment,
patch_read_csv,
permute_rows,
powerset,
product_upper_triangle,
read_compressed,
seconds_to_timestamp,
security_list_copy,
str_to_seconds,
subtest,
temp_pipeline_engine,
test_resource_path,
tmp_asset_finder,
tmp_assets_db,
tmp_bcolz_equity_minute_bar_reader,
tmp_dir,
tmp_trading_env,
to_series,
to_utc,
trades_by_sid_to_dfs,
write_bcolz_minute_data,
write_compressed,
)
from .fixtures import ZiplineTestCase # noqa
|
py | 1a544b9322d9ee0ccbb05350ca2ce9a386064acb | # -*- coding: utf-8 -*-
# Copyright (c) 2016, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from libbifrost import _bf, _check, _get, BifrostObject
import os
import time
import ctypes
import numpy as np
try:
import simplejson as json
except ImportError:
print "WARNING: Install simplejson for better performance"
import json
class ProcLog(BifrostObject):
def __init__(self, name):
BifrostObject.__init__(
self, _bf.bfProcLogCreate, _bf.bfProcLogDestroy, name)
def update(self, contents):
"""Updates (replaces) the contents of the log
contents: string or dict containing data to write to the log
"""
if contents is None:
raise ValueError("Contents cannot be None")
if isinstance(contents, dict):
contents = '\n'.join(['%s : %s' % item
for item in contents.items()])
_check(_bf.bfProcLogUpdate(self.obj, contents))
def _multi_convert(value):
"""
Function try and convert numerical values to numerical types.
"""
try:
value = int(value, 10)
except ValueError:
try:
value = float(value)
except ValueError:
pass
return value
def load_by_filename(filename):
"""
Function to read in a ProcLog file and return the contents as a
dictionary.
"""
contents = {}
with open(filename, 'r') as fh:
## Read the file all at once to avoid problems but only after it has a size
for attempt in xrange(5):
if os.path.getsize(filename) != 0:
break
time.sleep(0.001)
lines = fh.read()
## Loop through lines
for line in lines.split('\n'):
### Parse the key : value pairs
try:
key, value = line.split(':', 1)
except ValueError:
continue
### Trim off excess whitespace
key = key.strip().rstrip()
value = value.strip().rstrip()
### Convert and save
contents[key] = _multi_convert(value)
# Done
return contents
def load_by_pid(pid, include_rings=False):
"""
Function to read in and parse all ProcLog files associated with a given
process ID. The contents of these files are returned as a collection of
dictionaries ordered by:
block name
ProcLog name
entry name
"""
# Make sure we have a directory to load from
baseDir = os.path.join('/dev/shm/bifrost/', str(pid))
if not os.path.isdir(baseDir):
raise RuntimeError("Cannot find log directory associated with PID %s" % pid)
# Load
contents = {}
for parent,subnames,filenames in os.walk(baseDir):
for filename in filenames:
filename = os.path.join(parent, filename)
## Extract the block and logfile names
logName = os.path.basename(filename)
blockName = os.path.basename( os.path.dirname(filename) )
if blockName == 'rings' and not include_rings:
continue
## Load the file's contents
try:
subContents = load_by_filename(filename)
except IOError:
continue
## Save
try:
contents[blockName][logName] = subContents
except KeyError:
contents[blockName] = {logName:subContents}
# Done
return contents
|
py | 1a544c0d5d198861bee46d9b745cc66011de750b | import pyrogram
import asyncio
import os
from pyrogram import Client, filters
from pyrogram.types import Message, User, InlineKeyboardMarkup, InlineKeyboardButton
from donlee_robot.donlee_robot import DonLee_Robot
from config import FORCE_CHANNEL, SAVE_USER, DEV_USERNAME, WELCOME_BUTTON_NAME, CUSTOM_WELCOME_TEXT, CUSTOM_WELCOME
# f"👋Hy {mention} Welcome To {groupname}"
Url = f"t.me/{FORCE_CHANNEL}"
WELCOME_BUTTONS = [[ InlineKeyboardButton(WELCOME_BUTTON_NAME, url=Url)]]
@DonLee_Robot.on_message(filters.command('id') & (filters.private | filters.group))
async def showid(client, message):
chat_type = message.chat.type
if chat_type == "private":
user_id = message.chat.id
await message.reply_text(
f"Your ID : `{user_id}`",
parse_mode="md",
quote=True
)
elif (chat_type == "group") or (chat_type == "supergroup"):
user_id = message.from_user.id
chat_id = message.chat.id
if message.reply_to_message:
reply_id = f"Rᴇᴘʟɪᴇᴅ Usᴇʀ ID : `{message.reply_to_message.from_user.id}`"
else:
reply_id = ""
await message.reply_text(
f"Yᴏᴜʀ ID : `{user_id}`\nTʜɪs Gʀᴏᴜᴘ ID : `{chat_id}`\n\n{reply_id}",
parse_mode="md",
quote=True
)
@DonLee_Robot.on_message(filters.command('info') & (filters.private | filters.group))
async def showinfo(client, message):
try:
cmd, id = message.text.split(" ", 1)
except:
id = False
pass
if id:
if (len(id) == 10 or len(id) == 9):
try:
checkid = int(id)
except:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
else:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
if SAVE_USER == "yes":
name, username, dcid = await find_user(str(id))
else:
try:
user = await client.get_users(int(id))
name = str(user.first_name + (user.last_name or ""))
username = user.username
dcid = user.dc_id
except:
name = False
pass
if not name:
await message.reply_text("__USER Details not found!!__", quote=True, parse_mode="md")
return
else:
if message.reply_to_message:
name = str(message.reply_to_message.from_user.first_name\
+ (message.reply_to_message.from_user.last_name or ""))
id = message.reply_to_message.from_user.id
username = message.reply_to_message.from_user.username
dcid = message.reply_to_message.from_user.dc_id
else:
name = str(message.from_user.first_name\
+ (message.from_user.last_name or ""))
id = message.from_user.id
username = message.from_user.username
dcid = message.from_user.dc_id
if not str(username) == "None":
user_name = f"@{username}"
else:
user_name = "none"
await message.reply_text(
f"<b>👨💼Nᴀᴍᴇ</b> : {name}\n\n"
f"<b>📃Usᴇʀ ID</b> : <code>{id}</code>\n\n"
f"<b>👤Usᴇʀɴᴀᴍᴇ</b> : {user_name}\n\n"
f"<b>🔐Pᴇʀᴍᴀɴᴀɴᴛ USER ʟɪɴᴋ</b> : <a href='tg://user?id={id}'>Click here!</a>\n\n"
f"<b>📑DC ID</b> : {dcid}\n\n",
quote=True,
parse_mode="html"
)
@DonLee_Robot.on_message(filters.group & filters.forwarded)
async def forward(bot, message):
await message.delete()
@DonLee_Robot.on_message(filters.group & filters.via_bot)
async def inline(bot, message):
await message.delete()
@DonLee_Robot.on_message(filters.new_chat_members)
async def auto_welcome(bot: DonLee_Robot, msg: Message):
# from PR0FESS0R-99 import Auto-Welcome-Bot
# from PR0FESS0R-99 import ID-Bot
# first = msg.from_user.first_name
# last = msg.from_user.last_name
# mention = msg.from_user.mention
# username = msg.from_user.username
# id = msg.from_user.id
# group_name = msg.chat.title
# group_username = msg.chat.username
# button_name = os.environ.get("WELCOME_BUTTON_NAME", name_button)
# button_link = os.environ.get("WELCOME_BUTTON_LINK", link_button)
# welcome_text = f"Hey {mention}\nWelcome To {group_name}"
# WELCOME_TEXT = os.environ.get("WELCOME_TEXT", welcome_text)
print("Welcome Message Activate")
# YES = "True"
# NO = "False"
# HOOOO = CUSTOM_WELCOME
# BUTTON = bool(os.environ.get("CUSTOM_WELCOME"))
if CUSTOM_WELCOME == "yes":
Auto_Delete=await msg.reply_text(text=CUSTOM_WELCOME_TEXT.format(
mention = msg.from_user.mention,
groupname = msg.chat.title
),
reply_markup=InlineKeyboardMarkup(WELCOME_BUTTONS)
)
await asyncio.sleep(60) # in seconds
await Auto_Delete.delete()
else:
await msg.delete()
@DonLee_Robot.on_message((filters.command(["report"]) | filters.regex("@admins") | filters.regex("@admin")) & filters.group)
async def report(bot, message):
if message.reply_to_message:
chat_id = message.chat.id
reporter = str(message.from_user.id)
mention = message.from_user.mention
admins = await bot.get_chat_members(chat_id=chat_id, filter="administrators")
success = False
report = f"Reporter : {mention} ({reporter})" + "\n"
report += f"Message : {message.reply_to_message.link}"
for admin in admins:
try:
reported_post = await message.reply_to_message.forward(admin.user.id)
await reported_post.reply_text(
text=report,
chat_id=admin.user.id,
disable_web_page_preview=True
)
success = True
except:
pass
if success:
await message.reply_text("**Reported to Admins!**")
@DonLee_Robot.on_message(filters.command(["ban"]))
async def ban(bot, message):
chatid = message.chat.id
if message.reply_to_message:
admins_list = await bot.get_chat_members(
chat_id=chatid, filter="administrators"
)
admins = []
for admin in admins_list:
id = admin.user.id
admins.append(id)
userid = message.from_user.id
if userid in admins:
user_to_ban = message.reply_to_message.from_user.id
if user_to_ban in admins:
await message.reply(text="Tʜɪɴᴋ ʜᴇ ɪs Aᴅᴍɪɴ, Cᴀɴ'ᴛ Bᴀɴ Aᴅᴍɪɴs")
else:
try:
await bot.kick_chat_member(chat_id=chatid, user_id=user_to_ban)
await message.reply_text(
f"Bye {message.reply_to_message.from_user.mention}"
)
except Exception as error:
await message.reply_text(f"{error}")
else:
await message.reply_text("Nɪᴄᴇ ᴛʀʏ, Bᴜᴛ ᴡʀᴏɴɢ ᴍᴏᴠᴇ..")
return
else:
return
@DonLee_Robot.on_message(filters.command(["unban"]))
async def ban(bot, message):
chatid = message.chat.id
if message.reply_to_message:
admins_list = await bot.get_chat_members(
chat_id=chatid,
filter="administrators"
)
admins = []
for admin in admins_list:
id = admin.user.id
admins.append(id)
userid = message.from_user.id
if userid in admins:
user_to_ban = message.reply_to_message.from_user.id
if user_to_unban in admins:
await message.reply(text="Tʜɪɴᴋ ʜᴇ ɪs Aᴅᴍɪɴ, Cᴀɴ'ᴛ Bᴀɴ Aᴅᴍɪɴs")
else:
try:
await bot.unban_chat_member(chat_id=chatid, user_id=user_to_unban)
await message.reply_text(
f"welcome {message.reply_to_message.from_user.mention}"
)
except Exception as error:
await message.reply_text(f"{error}")
else:
await message.reply_text("Nɪᴄᴇ ᴛʀʏ, Bᴜᴛ ᴡʀᴏɴɢ ᴍᴏᴠᴇ..")
return
else:
return
@DonLee_Robot.on_message(filters.channel & filters.text | filters.media )
async def tag(client, message):
await message.copy(message.chat.id)
|
py | 1a544c54c098283409c8ddbbd4041c989b72e666 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Dependency(Model):
"""Deployment dependency information.
:param depends_on: Gets the list of dependencies.
:type depends_on: list of :class:`BasicDependency
<Default.models.BasicDependency>`
:param id: Gets or sets the ID of the dependency.
:type id: str
:param resource_type: Gets or sets the dependency resource type.
:type resource_type: str
:param resource_name: Gets or sets the dependency resource name.
:type resource_name: str
"""
_attribute_map = {
'depends_on': {'key': 'dependsOn', 'type': '[BasicDependency]'},
'id': {'key': 'id', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
}
def __init__(self, depends_on=None, id=None, resource_type=None, resource_name=None):
self.depends_on = depends_on
self.id = id
self.resource_type = resource_type
self.resource_name = resource_name
|
py | 1a544ccd3d94390582ca4490ea880daba49eab6c | print("dic for person and user will ask about information")
person = {"name":"Ajinkya","gender":"Male","address":"Toronto","phone": 5488881681 }
key = input("Please enter what information would like to know about: ").lower()
result = person.get(key,"Invalid Input")
print(result) |
py | 1a544e0e4f64e74d16a28a62084a4d40be8bc99c | #!/usr/bin/python
# Copyright 2010 Chris Read <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
|
py | 1a544ee02b6cd26b706b56a2749c7360f9926aeb | # Given a binary search tree and a node in it, find the in-order successor of that node in the BST.
#
# The successor of a node p is the node with the smallest key greater than p.val.
#
# Input: root = [2, 1, 3], p = 1
# Output: 2
# Explanation: 1
# 's in-order successor node is 2. Note that both p and the return value is of TreeNode type.
#
# Input: root = [5,3,6,2,4,null,null,1], p = 6
# Output: null
# Explanation: There is no in-order successor of the current node, so the answer is null.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def inorderSuccessor(self, root, p):
if p.right:
curr = p.right
while curr.left:
curr = curr.left
return curr
stack, inorder = [], float("inf")
while stack or root:
while root:
stack.append(root)
root = root.left
root = stack.pop()
if inorder == p.val:
return root
inorder = root.val
root = root.right
return None
|
py | 1a544f3232dc15b322657bfae97aaeb1883c876d | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import pytest
import os
import sys
from ly_test_tools.o3de.editor_test import EditorTestSuite
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../automatedtesting_shared')
from Tools.LyTestTools.ly_test_tools.environment import process_utils
from Tools.LyTestTools.ly_test_tools.launchers import launcher_helper
from Tools.LyTestTools.ly_test_tools.log.log_monitor import LogMonitor
import Tools.LyTestTools.ly_test_tools.environment.waiter as waiter
@pytest.mark.SUITE_sandbox
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
class TestAutomation(EditorTestSuite):
def test_Multiplayer_SimpleGameServerLauncher_ConnectsSuccessfully(self, workspace, launcher_platform):
unexpected_lines = []
expected_lines = ["New outgoing connection to remote address:"]
halt_on_unexpected = False
timeout = 180
# Start the AutomatedTesting.ServerLauncher.exe in hosting mode, no rendering mode, and wait for it to exist
server_launcher = launcher_helper.create_server_launcher(workspace)
server_launcher.args.extend(['+host', '-rhi=Null'])
server_launcher.start()
waiter.wait_for(lambda: process_utils.process_exists(f"AutomatedTesting.ServerLauncher.exe", ignore_extensions=True))
# Start the AutomatedTesting.GameLauncher.exe in client mode, no rendering mode, and wait for it to exist
game_launcher = launcher_helper.create_game_launcher(workspace)
game_launcher.args.extend(['+connect', '-rhi=Null'])
game_launcher.start()
waiter.wait_for(lambda: process_utils.process_exists(f"AutomatedTesting.GameLauncher.exe", ignore_extensions=True))
# Verify that the GameLauncher.exe was able to connect to the ServerLauncher.exe by checking the logs
game_launcher_log_file = os.path.join(game_launcher.workspace.paths.project_log(), 'Game.log')
game_launcher_log_monitor = LogMonitor(game_launcher, game_launcher_log_file)
game_launcher_log_monitor.monitor_log_for_lines(expected_lines, unexpected_lines, halt_on_unexpected, timeout)
|
py | 1a544f9d0bd67a185b75a0f08ee3d32154f78164 | from sympy.core.expr import unchanged
from sympy.sets import (ConditionSet, Intersection, FiniteSet,
EmptySet, Union, Contains, ImageSet)
from sympy.core.function import (Function, Lambda)
from sympy.core.mod import Mod
from sympy.core.numbers import (oo, pi)
from sympy.core.relational import (Eq, Ne)
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.trigonometric import (asin, sin)
from sympy.logic.boolalg import And
from sympy.matrices.dense import Matrix
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.sets.sets import Interval
from sympy.testing.pytest import raises, warns_deprecated_sympy
w = Symbol('w')
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
f = Function('f')
def test_CondSet():
sin_sols_principal = ConditionSet(x, Eq(sin(x), 0),
Interval(0, 2*pi, False, True))
assert pi in sin_sols_principal
assert pi/2 not in sin_sols_principal
assert 3*pi not in sin_sols_principal
assert oo not in sin_sols_principal
assert 5 in ConditionSet(x, x**2 > 4, S.Reals)
assert 1 not in ConditionSet(x, x**2 > 4, S.Reals)
# in this case, 0 is not part of the base set so
# it can't be in any subset selected by the condition
assert 0 not in ConditionSet(x, y > 5, Interval(1, 7))
# since 'in' requires a true/false, the following raises
# an error because the given value provides no information
# for the condition to evaluate (since the condition does
# not depend on the dummy symbol): the result is `y > 5`.
# In this case, ConditionSet is just acting like
# Piecewise((Interval(1, 7), y > 5), (S.EmptySet, True)).
raises(TypeError, lambda: 6 in ConditionSet(x, y > 5,
Interval(1, 7)))
X = MatrixSymbol('X', 2, 2)
matrix_set = ConditionSet(X, Eq(X*Matrix([[1, 1], [1, 1]]), X))
Y = Matrix([[0, 0], [0, 0]])
assert matrix_set.contains(Y).doit() is S.true
Z = Matrix([[1, 2], [3, 4]])
assert matrix_set.contains(Z).doit() is S.false
assert isinstance(ConditionSet(x, x < 1, {x, y}).base_set,
FiniteSet)
raises(TypeError, lambda: ConditionSet(x, x + 1, {x, y}))
raises(TypeError, lambda: ConditionSet(x, x, 1))
I = S.Integers
U = S.UniversalSet
C = ConditionSet
assert C(x, False, I) is S.EmptySet
assert C(x, True, I) is I
assert C(x, x < 1, C(x, x < 2, I)
) == C(x, (x < 1) & (x < 2), I)
assert C(y, y < 1, C(x, y < 2, I)
) == C(x, (x < 1) & (y < 2), I), C(y, y < 1, C(x, y < 2, I))
assert C(y, y < 1, C(x, x < 2, I)
) == C(y, (y < 1) & (y < 2), I)
assert C(y, y < 1, C(x, y < x, I)
) == C(x, (x < 1) & (y < x), I)
assert unchanged(C, y, x < 1, C(x, y < x, I))
assert ConditionSet(x, x < 1).base_set is U
# arg checking is not done at instantiation but this
# will raise an error when containment is tested
assert ConditionSet((x,), x < 1).base_set is U
c = ConditionSet((x, y), x < y, I**2)
assert (1, 2) in c
assert (1, pi) not in c
raises(TypeError, lambda: C(x, x > 1, C((x, y), x > 1, I**2)))
# signature mismatch since only 3 args are accepted
raises(TypeError, lambda: C((x, y), x + y < 2, U, U))
def test_CondSet_intersect():
input_conditionset = ConditionSet(x, x**2 > 4, Interval(1, 4, False,
False))
other_domain = Interval(0, 3, False, False)
output_conditionset = ConditionSet(x, x**2 > 4, Interval(
1, 3, False, False))
assert Intersection(input_conditionset, other_domain
) == output_conditionset
def test_issue_9849():
assert ConditionSet(x, Eq(x, x), S.Naturals
) is S.Naturals
assert ConditionSet(x, Eq(Abs(sin(x)), -1), S.Naturals
) == S.EmptySet
def test_simplified_FiniteSet_in_CondSet():
assert ConditionSet(x, And(x < 1, x > -3), FiniteSet(0, 1, 2)
) == FiniteSet(0)
assert ConditionSet(x, x < 0, FiniteSet(0, 1, 2)) == EmptySet
assert ConditionSet(x, And(x < -3), EmptySet) == EmptySet
y = Symbol('y')
assert (ConditionSet(x, And(x > 0), FiniteSet(-1, 0, 1, y)) ==
Union(FiniteSet(1), ConditionSet(x, And(x > 0), FiniteSet(y))))
assert (ConditionSet(x, Eq(Mod(x, 3), 1), FiniteSet(1, 4, 2, y)) ==
Union(FiniteSet(1, 4), ConditionSet(x, Eq(Mod(x, 3), 1),
FiniteSet(y))))
def test_free_symbols():
assert ConditionSet(x, Eq(y, 0), FiniteSet(z)
).free_symbols == {y, z}
assert ConditionSet(x, Eq(x, 0), FiniteSet(z)
).free_symbols == {z}
assert ConditionSet(x, Eq(x, 0), FiniteSet(x, z)
).free_symbols == {x, z}
assert ConditionSet(x, Eq(x, 0), ImageSet(Lambda(y, y**2),
S.Integers)).free_symbols == set()
def test_bound_symbols():
assert ConditionSet(x, Eq(y, 0), FiniteSet(z)
).bound_symbols == [x]
assert ConditionSet(x, Eq(x, 0), FiniteSet(x, y)
).bound_symbols == [x]
assert ConditionSet(x, x < 10, ImageSet(Lambda(y, y**2), S.Integers)
).bound_symbols == [x]
assert ConditionSet(x, x < 10, ConditionSet(y, y > 1, S.Integers)
).bound_symbols == [x]
def test_as_dummy():
_0, _1 = symbols('_0 _1')
assert ConditionSet(x, x < 1, Interval(y, oo)
).as_dummy() == ConditionSet(_0, _0 < 1, Interval(y, oo))
assert ConditionSet(x, x < 1, Interval(x, oo)
).as_dummy() == ConditionSet(_0, _0 < 1, Interval(x, oo))
assert ConditionSet(x, x < 1, ImageSet(Lambda(y, y**2), S.Integers)
).as_dummy() == ConditionSet(
_0, _0 < 1, ImageSet(Lambda(_0, _0**2), S.Integers))
e = ConditionSet((x, y), x <= y, S.Reals**2)
assert e.bound_symbols == [x, y]
assert e.as_dummy() == ConditionSet((_0, _1), _0 <= _1, S.Reals**2)
assert e.as_dummy() == ConditionSet((y, x), y <= x, S.Reals**2
).as_dummy()
def test_subs_CondSet():
s = FiniteSet(z, y)
c = ConditionSet(x, x < 2, s)
assert c.subs(x, y) == c
assert c.subs(z, y) == ConditionSet(x, x < 2, FiniteSet(y))
assert c.xreplace({x: y}) == ConditionSet(y, y < 2, s)
assert ConditionSet(x, x < y, s
).subs(y, w) == ConditionSet(x, x < w, s.subs(y, w))
# if the user uses assumptions that cause the condition
# to evaluate, that can't be helped from SymPy's end
n = Symbol('n', negative=True)
assert ConditionSet(n, 0 < n, S.Integers) is S.EmptySet
p = Symbol('p', positive=True)
assert ConditionSet(n, n < y, S.Integers
).subs(n, x) == ConditionSet(n, n < y, S.Integers)
raises(ValueError, lambda: ConditionSet(
x + 1, x < 1, S.Integers))
assert ConditionSet(
p, n < x, Interval(-5, 5)).subs(x, p) == Interval(-5, 5), ConditionSet(
p, n < x, Interval(-5, 5)).subs(x, p)
assert ConditionSet(
n, n < x, Interval(-oo, 0)).subs(x, p
) == Interval(-oo, 0)
assert ConditionSet(f(x), f(x) < 1, {w, z}
).subs(f(x), y) == ConditionSet(f(x), f(x) < 1, {w, z})
# issue 17341
k = Symbol('k')
img1 = ImageSet(Lambda(k, 2*k*pi + asin(y)), S.Integers)
img2 = ImageSet(Lambda(k, 2*k*pi + asin(S.One/3)), S.Integers)
assert ConditionSet(x, Contains(
y, Interval(-1,1)), img1).subs(y, S.One/3).dummy_eq(img2)
assert (0, 1) in ConditionSet((x, y), x + y < 3, S.Integers**2)
raises(TypeError, lambda: ConditionSet(n, n < -10, Interval(0, 10)))
def test_subs_CondSet_tebr():
with warns_deprecated_sympy():
assert ConditionSet((x, y), {x + 1, x + y}, S.Reals**2) == \
ConditionSet((x, y), Eq(x + 1, 0) & Eq(x + y, 0), S.Reals**2)
def test_dummy_eq():
C = ConditionSet
I = S.Integers
c = C(x, x < 1, I)
assert c.dummy_eq(C(y, y < 1, I))
assert c.dummy_eq(1) == False
assert c.dummy_eq(C(x, x < 1, S.Reals)) == False
c1 = ConditionSet((x, y), Eq(x + 1, 0) & Eq(x + y, 0), S.Reals**2)
c2 = ConditionSet((x, y), Eq(x + 1, 0) & Eq(x + y, 0), S.Reals**2)
c3 = ConditionSet((x, y), Eq(x + 1, 0) & Eq(x + y, 0), S.Complexes**2)
assert c1.dummy_eq(c2)
assert c1.dummy_eq(c3) is False
assert c.dummy_eq(c1) is False
assert c1.dummy_eq(c) is False
# issue 19496
m = Symbol('m')
n = Symbol('n')
a = Symbol('a')
d1 = ImageSet(Lambda(m, m*pi), S.Integers)
d2 = ImageSet(Lambda(n, n*pi), S.Integers)
c1 = ConditionSet(x, Ne(a, 0), d1)
c2 = ConditionSet(x, Ne(a, 0), d2)
assert c1.dummy_eq(c2)
def test_contains():
assert 6 in ConditionSet(x, x > 5, Interval(1, 7))
assert (8 in ConditionSet(x, y > 5, Interval(1, 7))) is False
# `in` should give True or False; in this case there is not
# enough information for that result
raises(TypeError,
lambda: 6 in ConditionSet(x, y > 5, Interval(1, 7)))
# here, there is enough information but the comparison is
# not defined
raises(TypeError, lambda: 0 in ConditionSet(x, 1/x >= 0, S.Reals))
assert ConditionSet(x, y > 5, Interval(1, 7)
).contains(6) == (y > 5)
assert ConditionSet(x, y > 5, Interval(1, 7)
).contains(8) is S.false
assert ConditionSet(x, y > 5, Interval(1, 7)
).contains(w) == And(Contains(w, Interval(1, 7)), y > 5)
# This returns an unevaluated Contains object
# because 1/0 should not be defined for 1 and 0 in the context of
# reals.
assert ConditionSet(x, 1/x >= 0, S.Reals).contains(0) == \
Contains(0, ConditionSet(x, 1/x >= 0, S.Reals), evaluate=False)
c = ConditionSet((x, y), x + y > 1, S.Integers**2)
assert not c.contains(1)
assert c.contains((2, 1))
assert not c.contains((0, 1))
c = ConditionSet((w, (x, y)), w + x + y > 1, S.Integers*S.Integers**2)
assert not c.contains(1)
assert not c.contains((1, 2))
assert not c.contains(((1, 2), 3))
assert not c.contains(((1, 2), (3, 4)))
assert c.contains((1, (3, 4)))
def test_as_relational():
assert ConditionSet((x, y), x > 1, S.Integers**2).as_relational((x, y)
) == (x > 1) & Contains((x, y), S.Integers**2)
assert ConditionSet(x, x > 1, S.Integers).as_relational(x
) == Contains(x, S.Integers) & (x > 1)
def test_flatten():
"""Tests whether there is basic denesting functionality"""
inner = ConditionSet(x, sin(x) + x > 0)
outer = ConditionSet(x, Contains(x, inner), S.Reals)
assert outer == ConditionSet(x, sin(x) + x > 0, S.Reals)
inner = ConditionSet(y, sin(y) + y > 0)
outer = ConditionSet(x, Contains(y, inner), S.Reals)
assert outer != ConditionSet(x, sin(x) + x > 0, S.Reals)
inner = ConditionSet(x, sin(x) + x > 0).intersect(Interval(-1, 1))
outer = ConditionSet(x, Contains(x, inner), S.Reals)
assert outer == ConditionSet(x, sin(x) + x > 0, Interval(-1, 1))
def test_duplicate():
from sympy.core.function import BadSignatureError
# test coverage for line 95 in conditionset.py, check for duplicates in symbols
dup = symbols('a,a')
raises(BadSignatureError, lambda: ConditionSet(dup, x < 0))
|
py | 1a544ff76023b83dbafe24e2986bb82c86f2ff77 | class Piece:
def __init__(self, piece_type, piece_colour, piece_name, xy = None):
assert piece_colour.lower() in ['black', 'white'], 'Invalid colour'
assert piece_type.lower() in ['pawn', 'bishop', 'rook', 'knight', 'king', 'queen'], 'Invalid piece_type'
self.type = piece_type
self.colour = piece_colour
self.name = piece_name
if xy is None:
print('Warning : xy initialised as None')
else:
xy = parse_xy(xy)
assert xy[0] in range(8) and xy[1] in range(8), 'Piece location out of range'
self.xy = xy
self.open_to_passant = False
self.peace_moves = None
self.kill_moves = None
def set_xy(self, xy):
xy = parse_xy(xy)
assert x in range(8) and y in range(8), 'Piece location out of range'
if self.type == 'pawn':
if self.colour == 'white' and self.xy[0] == 1 and xy[0] == 3:
self.open_to_passant = True
elif self.colour == 'black' and self.xy[0] == 6 and xy[0] == 4:
self.open_to_passant = True
else:
self.open_to_passant = False
self.xy = xy
def update_Moves(self, Board):
x, y = self.xy
peace_moves, kill_moves = [], []
move_functions_dict = get_move_functions(self.type)
if self.type == 'pawn':
if self.colour == 'white':
peace_moves.append((x + 1, y))
kill_moves += [(x + 1, y + 1), (x + 1, y - 1)]
if x == 1:
peace_moves.append((x + 2, y))
else:
peace_moves.append((x - 1, y))
kill_moves += [(x - 1, y + 1), (x - 1, y - 1)]
if x == 6:
peace_moves.append((x - 2, y))
peace_moves = [xy for xy in peace_moves if Board.is_peace_Space(xy)]
kill_moves = [Board.is_kill_Move(new_xy, current_xy = self.xy, is_pawn = True) for new_xy in kill_moves]
kill_moves = [val for val in kill_moves if val[0]]
elif self.type == 'knight':
peace_moves = [xy for xy in list(zip( [x+2, x+2, x+1 , x+1, x-1, x-1, x-2, x-2],
[y+1, y-1, y+2, y-2, y+2, y-2, y+1, y-1]))
if Board.is_peace_Space(xy)]
kill_moves = list(zip( [x+2, x+2, x+1 , x+1, x-1, x-1, x-2, x-2],
[y+1, y-1, y+2, y-2, y+2, y-2, y+1, y-1]))
kill_moves = [Board.is_kill_Move(new_xy, current_xy = self.xy, is_pawn = True) for new_xy in kill_moves]
kill_moves = [val for val in kill_moves if val[0]]
elif self.type == 'king':
peace_moves = [xy for xy in list(zip( [x , x , x+1, x+1, x+1, x-1, x-1, x-1],
[y+1, y-1, y , y+1, y-1, y , y+1, y-1]))
if Board.is_peace_Space(xy)]
kill_moves = list(zip( [x , x , x+1, x+1, x+1, x-1, x-1, x-1],
[y+1, y-1, y , y+1, y-1, y , y+1, y-1]))
kill_moves = [Board.is_kill_Move(new_xy, current_xy = self.xy, is_pawn = True) for new_xy in kill_moves]
kill_moves = [val for val in kill_moves if val[0]]
elif self.type in ['bishop', 'queen', 'rook']:
for func in move_functions_dict[self.type]:
i = 1
new_xy = func((x, y, i))
while Board.is_peace_Space(new_xy) or Board.is_kill_Move(new_xy, current_xy = self.xy)[0]:
vals = Board.is_kill_Move(new_xy, current_xy = self.xy)
if vals[0]:
kill_moves.append(vals)
break
peace_moves += [new_xy]
i += 1
self.peace_moves = peace_moves
self.kill_moves = kill_moves
return peace_moves, kill_moves
def get_xy(self):
return self.xy
def get_peace_Moves(self, board_coords = True):
if board_coords:
return xy_to_board(self.peace_moves)
else:
return self.peace_moves
def get_kill_Moves(self, board_coords = True):
if board_coords:
return xy_to_board(self.kill_moves)
else:
return self.kill_moves
def __str__(self):
rep = 'Piece(' + str(self.name) + ') at ' + xy_to_board(self.xy)
return rep
def __repr__(self):
return self.__str__()
class Board:
def __init__(self, to_setup = False):
colours = ('black', 'white')
self.board = [[Space((i, j), colours[(i+j)%2]) for j in range(8)] for i in range(8)]
self.live_Pieces = {}
self.dead_Pieces = {}
if to_setup:
self.setup_Game()
def setup_Game(self):
colours = ('white', 'black')
pieces = ('rook', 'knight', 'bishop', 'queen', 'king', 'bishop', 'knight', 'rook')
for colour_no in range(len(colours)):
colour = colours[colour_no]
for y in range(8):
x = colour_no * 5 + 1
self.add_Piece((x, y), 'pawn', colour)
for y in range(len(pieces)):
x = colour_no*7
self.add_Piece((x, y), pieces[y], colour)
self.check_live_Pieces(correct = True)
def get_Space(self, xy):
x, y = parse_xy(xy, True)
return self.board[x][y]
def add_Piece(self, xy, piece_type, piece_colour):
xy = parse_xy(xy)
assert xy is not None, 'Invalid xy'
assert self.xy_is_empty(xy), 'Space not empty'
i = 0
piece_name = piece_colour + '_' + piece_type + str(i)
self.check_live_Pieces(correct = True)
while piece_name in self.live_Pieces.keys() or piece_name in self.dead_Pieces.keys():
i += 1
piece_name = piece_name[:-1] + str(i)
new_Piece = Piece(piece_type, piece_colour, piece_name, xy)
self.get_Space(xy).occupy(new_Piece)
self.live_Pieces[piece_name] = xy
def clear_Space(self, xy, dead = True):
xy = parse_xy(xy)
piece = self.get_Space(xy).get_Piece()
self.get_Space(xy).vacate()
if dead:
self.dead_Pieces[piece.name] = xy
del self.live_Pieces[piece.name]
def get_live_Pieces(self, update = False):
all_Pieces = {}
for row in self.board:
for Space in row:
Piece = Space.get_Piece()
if Piece is not None:
#print(type(Piece), Space)
i = 0
piece_name = Piece.name
all_Pieces[piece_name] = Space.xy
return all_Pieces
def check_live_Pieces(self, correct = False):
correct_live_Pieces = self.get_live_Pieces()
if self.live_Pieces == correct_live_Pieces:
return True
else:
#print("live_Pieces don't match")
if correct:
self.live_Pieces = correct_live_Pieces
print('corrected live_Pieces')
return False
def xy_on_board(self, xy):
return xy[0] in range(8) and xy[1] in range(8)
def xy_is_empty(self, xy):
return xy not in self.live_Pieces.values()
def is_peace_Space(self, xy):
xy = parse_xy(xy)
if xy is None:
#print('Destination xy is not on board')
return False
return self.xy_is_empty(xy)
def is_kill_Move(self, xy, current_xy, is_pawn = False):
xy = parse_xy(xy)
current_xy = parse_xy(current_xy)
if xy is None:
#print('Destination xy is not on board')
return False, None
if current_xy is None:
print('Invalid current_xy. There may be an error.')
return False, None
current_Piece = self.get_Space(current_xy).get_Piece()
if current_Piece is None:
return False, None
if not is_pawn:
opp_Piece = self.get_Space(xy).get_Piece()
if opp_Piece is None:
#print('No Piece at ' + str(xy))
return False, None
else:
if opp_Piece.colour == current_Piece.colour:
return False, None
else:
return xy, xy
else: # if pawn
opp_Piece = self.get_Space(xy).get_Piece()
"""assert ( xy[0] == current_xy[0] + 1 and
current_Piece.colour == 'white') or (
xy[0] == current_xy[0] - 1 and
current_Piece.colour == 'black')"""
x, y = xy
if opp_Piece is None:
if current_Piece.colour == 'white' and current_xy[0] == 4:
opp_Piece2 = self.board[x-1][y].get_Piece()
if opp_Piece2.type == 'pawn' and opp_Piece2.open_to_passant and opp_Piece2.colour == 'black':
return xy, (x-1, y)
elif current_Piece.colour == 'black' and current_xy[0] == 3:
opp_Piece2 = self.board[x+1][y].get_Piece()
if opp_Piece2.type == 'pawn' and opp_Piece2.open_to_passant and opp_Piece2.colour == 'white':
return xy, (x+1, y)
else:
return False, None
else:
if opp_Piece.colour == current_Piece.colour:
return False, None
else:
return xy, xy
def update_all_Moves(self):
self.check_live_Pieces(correct = True)
for piece_name, xy in self.live_Pieces.items():
print('checking moves for', piece_name)
self.get_Space(xy).get_Piece().update_Moves(self)
def get_Space(self, xy):
x, y = parse_xy(xy)
return self.board[x][y]
def move_Piece(self, xy_1, xy_2):
p = self.get_Space(xy_1).get_Piece()
self.get_Space(xy_1).vacate()
self.get_Space(xy_2).occupy(p)
def clear_Board(self):
self.__init__()
def __str__(self):
rep = '\t ' + '_'*79+ '\n'
breaker = ['\t|'+''.join([' |*********|' for i in range(4)]) + '\n' +
'\t|'+''.join(['_________|_________|' for i in range(4)]) + '\n',
'\t|'+''.join(['*********| |' for i in range(4)]) + '\n' +
'\t|'+''.join(['_________|_________|' for i in range(4)]) + '\n']
for i in range(len(self.board), 0, -1):
row = self.board[i-1]
rep_row = str(i) + '\t'
for j in range(len(row)):
Space = row[j]
if Space.held_by is not None:
rep_row += '| '+str(Space.held_by.colour[0] + ' ' + Space.held_by.type).ljust(8)
else:
rep_row += '| '+' '.ljust(8)
rep_row += '|\n'
rep += rep_row + breaker[i%2]
rep += ' \t '
rep += ' '.join([l.ljust(9) for l in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']])
return rep
def __repr__(self):
return self.__str__()
class Space:
def __init__(self, xy, colour):
assert colour in ['black', 'white'], 'Invalid colour for Space object'
self.colour = colour
x, y = xy
self.x, self.y = x, y
self.xy = (x, y)
self.x_name, self.y_name = xy_to_board(xy)
self.held_by = None
def occupy(self, Piece):
self.held_by = Piece
def vacate(self):
self.held_by = None
def get_Piece(self):
return self.held_by
def __str__(self):
return 'Space '+ str(self.x_name) + str(self.y_name) + ' ('+self.colour+')'
def __repr__(self):
return self.__str__()
class Game:
def __init__(self):
self.gameboard = Board()
self.gameboard.setup_Game()
self.all_piece_types = ('pawn', 'bishop', 'rook', 'knight', 'king', 'queen')
self.all_colours = ('black', 'white')
def __str__(self):
return self.gameboard.__str__()
def __repr__(self):
return self.gameboard.__repr__()
def get_move_functions(piece_type):
bishfuncs = [ lambda xyi : (xyi[0] + xyi[2], xyi[1] + xyi[2]),
lambda xyi : (xyi[0] + xyi[2], xyi[1] - xyi[2]),
lambda xyi : (xyi[0] - xyi[2], xyi[1] + xyi[2]),
lambda xyi : (xyi[0] - xyi[2], xyi[1] - xyi[2])]
rookfuncs = [ lambda xyi : (xyi[0] + xyi[2], xyi[1]),
lambda xyi : (xyi[0] - xyi[2], xyi[1]),
lambda xyi : (xyi[0] , xyi[1] + xyi[2]),
lambda xyi : (xyi[0] , xyi[1] - xyi[2])]
queenfuncs = bishfuncs + rookfuncs
funcs = dict(zip(['bishop', 'rook', 'queen'], [bishfuncs, rookfuncs, queenfuncs]))
return funcs
def xy_to_board(xy):
if type(xy) == tuple and len(xy) == 2:
if xy < (8, 8) and (xy) >= (0, 0):
x, y = xy
return ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'][y] + str(x+1)
elif type(xy) == list:
return [xy_to_board(xy0) for xy0 in xy]
return None
def parse_xy(xy, report_error = False):
if type(xy) == tuple and len(xy) == 2:
if xy < (8, 8) and xy >= (0, 0):
return xy
elif type(xy) == str and len(xy) == 2:
y, x = xy[0], int(xy[1])
if y in 'abcdefgh' and x in range(1, 9):
y, x = dict(zip('abcdefgh', range(8)))[y], x-1
return (x, y)
if report_error:
print('invalid xy:', xy)
return None
"""
import checkmate as cm
g = cm.Game()
print(g.gameboard)
b = g.gameboard
b.update_all_Moves()
b.move_Piece('b2', 'b4')
b.move_Piece('b4', 'b5')
b.move_Piece('c7', 'c5')
b.update_all_Moves()
p = b.get_Space('b2').get_Piece()
b.move_Piece('b2', 'b4')
b.get_Space('b1').vacate()
b.get_Space('a3').occupy(p)
"""
|
py | 1a545054a6ac5c2565a129300e821819ba959203 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_print_voucher
import account_receipt_voucher
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
py | 1a5450630cfdc1f50fe4551cfdb0a5b88a58e869 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework.ops import Tensor
__all__ = ['assert_same_float_dtype', 'is_numeric_tensor', 'assert_scalar_int']
NUMERIC_TYPES = frozenset([dtypes.float32, dtypes.float64, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.qint8, dtypes.qint32,
dtypes.quint8, dtypes.complex64])
def is_numeric_tensor(tensor):
return isinstance(tensor, Tensor) and tensor.dtype in NUMERIC_TYPES
def _assert_same_base_type(items, expected_type=None):
"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be `dtypes.float32` or `dtypes.float64`. If neither `tensors` nor
`dtype` is supplied, default to `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected float, got %s.' % dtype)
return dtype
def assert_scalar_int(tensor):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: Tensor to test.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of type `tf.int32` or `tf.int64`.
"""
data_type = tensor.dtype
if data_type.base_dtype not in [dtypes.int32, dtypes.int64]:
raise ValueError('Unexpected type %s for %s.' % (data_type, tensor.name))
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Unexpected shape %s for %s.' % (shape, tensor.name))
return tensor
|
py | 1a5450ceb5f188b2078d6bb675757a5a983cfb0a | # Generated by Django 3.0.4 on 2020-11-13 15:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BookCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_title', models.CharField(max_length=264, verbose_name='Category')),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
]
|
py | 1a5451750a48eae0c18493a421632fb10a8396b6 | import unittest
from slack_sdk.http_retry import RateLimitErrorRetryHandler
from slack_sdk.scim import SCIMClient
from tests.slack_sdk.scim.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from ..my_retry_handler import MyRetryHandler
class TestSCIMClient(unittest.TestCase):
def setUp(self):
setup_mock_web_api_server(self)
def tearDown(self):
cleanup_mock_web_api_server(self)
def test_retries(self):
retry_handler = MyRetryHandler(max_retry_count=2)
client = SCIMClient(
base_url="http://localhost:8888/",
token="xoxp-remote_disconnected",
retry_handlers=[retry_handler],
)
try:
client.search_users(start_index=0, count=1)
self.fail("An exception is expected")
except Exception as _:
pass
self.assertEqual(2, retry_handler.call_count)
def test_ratelimited(self):
client = SCIMClient(
base_url="http://localhost:8888/",
token="xoxp-ratelimited",
)
client.retry_handlers.append(RateLimitErrorRetryHandler())
response = client.search_users(start_index=0, count=1)
# Just running retries; no assertions for call count so far
self.assertEqual(429, response.status_code)
|
py | 1a54518e97e51ad93aa136cc99020b1931e45f50 | # -*- coding: utf-8 -*-
"""Console script for puzzlesolver."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for puzzlesolver."""
click.echo("Replace this message by putting your code into "
"puzzlesolver.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
py | 1a545197f54f0a561cea7ce2d811edce4b6001ef | import os
import pyttsx3
import pyaudio
import speech_recognition as sr
assistente = pyttsx3.init()
recon = sr.Recognizer()
inpvoz = ""
def retorno(frase):
assistente.say(frase)
assistente.setProperty("voice", b"brasil")
assistente.setProperty("rate", 210)
assistente.setProperty("volume", 1)
assistente.runAndWait()
def ouvir(source):
recon.adjust_for_ambient_noise(source)
audio = recon.listen(source)
inpvoz = recon.recognize_google(audio, language="pt-BR")
return inpvoz
def continuar(source):
retorno(
"Posso ajudar com algo mais? Responda sim para continuar e não para finalizar!"
)
continuar = ouvir(source)
print(f"Você disse {continuar}")
return continuar
|
py | 1a54522e9e851ca7585133e8a8e48930733551dc | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subtype assigner for lib2to3 trees.
This module assigns extra type information to the lib2to3 trees. This
information is more specific than whether something is an operator or an
identifier. For instance, it can specify if a node in the tree is part of a
subscript.
AssignSubtypes(): the main function exported by this module.
Annotations:
subtype: The subtype of a pytree token. See 'format_token' module for a list
of subtypes.
"""
from lib2to3 import pytree
from lib2to3.pgen2 import token
from lib2to3.pygram import python_symbols as syms
from yapf.yapflib import format_token
from yapf.yapflib import pytree_utils
from yapf.yapflib import pytree_visitor
from yapf.yapflib import style
def AssignSubtypes(tree):
"""Run the subtype assigner visitor over the tree, modifying it in place.
Arguments:
tree: the top-level pytree node to annotate with subtypes.
"""
subtype_assigner = _SubtypeAssigner()
subtype_assigner.Visit(tree)
# Map tokens in argument lists to their respective subtype.
_ARGLIST_TOKEN_TO_SUBTYPE = {
'=': format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
':': format_token.Subtype.TYPED_NAME,
'*': format_token.Subtype.VARARGS_STAR,
'**': format_token.Subtype.KWARGS_STAR_STAR,
}
class _SubtypeAssigner(pytree_visitor.PyTreeVisitor):
"""_SubtypeAssigner - see file-level docstring for detailed description.
The subtype is added as an annotation to the pytree token.
"""
def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name
# dictsetmaker ::= (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [',']))
for child in node.children:
self.Visit(child)
comp_for = False
dict_maker = False
for child in node.children:
if pytree_utils.NodeName(child) == 'comp_for':
comp_for = True
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICT_SET_GENERATOR)
elif pytree_utils.NodeName(child) in ('COLON', 'DOUBLESTAR'):
dict_maker = True
if not comp_for and dict_maker:
last_was_colon = False
unpacking = False
for child in node.children:
if pytree_utils.NodeName(child) == 'DOUBLESTAR':
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.KWARGS_STAR_STAR)
if last_was_colon:
if style.Get('INDENT_DICTIONARY_VALUE'):
_InsertPseudoParentheses(child)
else:
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICTIONARY_VALUE)
elif (isinstance(child, pytree.Node) or
(not child.value.startswith('#') and child.value not in '{:,')):
# Mark the first leaf of a key entry as a DICTIONARY_KEY. We
# normally want to split before them if the dictionary cannot exist
# on a single line.
if not unpacking or pytree_utils.FirstLeafNode(child).value == '**':
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICTIONARY_KEY)
_AppendSubtypeRec(child, format_token.Subtype.DICTIONARY_KEY_PART)
last_was_colon = pytree_utils.NodeName(child) == 'COLON'
if pytree_utils.NodeName(child) == 'DOUBLESTAR':
unpacking = True
elif last_was_colon:
unpacking = False
def Visit_expr_stmt(self, node): # pylint: disable=invalid-name
# expr_stmt ::= testlist_star_expr (augassign (yield_expr|testlist)
# | ('=' (yield_expr|testlist_star_expr))*)
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '=':
_AppendTokenSubtype(child, format_token.Subtype.ASSIGN_OPERATOR)
def Visit_or_test(self, node): # pylint: disable=invalid-name
# or_test ::= and_test ('or' and_test)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'or':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_test(self, node): # pylint: disable=invalid-name
# and_test ::= not_test ('and' not_test)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'and':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_not_test(self, node): # pylint: disable=invalid-name
# not_test ::= 'not' not_test | comparison
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'not':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_comparison(self, node): # pylint: disable=invalid-name
# comparison ::= expr (comp_op expr)*
# comp_op ::= '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not in'|'is'|'is not'
for child in node.children:
self.Visit(child)
if (isinstance(child, pytree.Leaf) and
child.value in {'<', '>', '==', '>=', '<=', '<>', '!=', 'in', 'is'}):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
elif pytree_utils.NodeName(child) == 'comp_op':
for grandchild in child.children:
_AppendTokenSubtype(grandchild, format_token.Subtype.BINARY_OPERATOR)
def Visit_star_expr(self, node): # pylint: disable=invalid-name
# star_expr ::= '*' expr
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '*':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
_AppendTokenSubtype(child, format_token.Subtype.VARARGS_STAR)
def Visit_expr(self, node): # pylint: disable=invalid-name
# expr ::= xor_expr ('|' xor_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '|':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_xor_expr(self, node): # pylint: disable=invalid-name
# xor_expr ::= and_expr ('^' and_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '^':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_expr(self, node): # pylint: disable=invalid-name
# and_expr ::= shift_expr ('&' shift_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '&':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_shift_expr(self, node): # pylint: disable=invalid-name
# shift_expr ::= arith_expr (('<<'|'>>') arith_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in {'<<', '>>'}:
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_arith_expr(self, node): # pylint: disable=invalid-name
# arith_expr ::= term (('+'|'-') term)*
for child in node.children:
self.Visit(child)
if _IsAExprOperator(child):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
_AppendTokenSubtype(child, format_token.Subtype.A_EXPR_OPERATOR)
if _IsSimpleExpression(node):
for child in node.children:
if _IsAExprOperator(child):
_AppendTokenSubtype(child, format_token.Subtype.SIMPLE_EXPRESSION)
def Visit_term(self, node): # pylint: disable=invalid-name
# term ::= factor (('*'|'/'|'%'|'//'|'@') factor)*
for child in node.children:
self.Visit(child)
if _IsMExprOperator(child):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
_AppendTokenSubtype(child, format_token.Subtype.M_EXPR_OPERATOR)
if _IsSimpleExpression(node):
for child in node.children:
if _IsMExprOperator(child):
_AppendTokenSubtype(child, format_token.Subtype.SIMPLE_EXPRESSION)
def Visit_factor(self, node): # pylint: disable=invalid-name
# factor ::= ('+'|'-'|'~') factor | power
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '+-~':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_power(self, node): # pylint: disable=invalid-name
# power ::= atom trailer* ['**' factor]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '**':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_trailer(self, node): # pylint: disable=invalid-name
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '[]':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_BRACKET)
def Visit_subscript(self, node): # pylint: disable=invalid-name
# subscript ::= test | [test] ':' [test] [sliceop]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_sliceop(self, node): # pylint: disable=invalid-name
# sliceop ::= ':' [test]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_argument(self, node): # pylint: disable=invalid-name
# argument ::=
# test [comp_for] | test '=' test
self._ProcessArgLists(node)
def Visit_arglist(self, node): # pylint: disable=invalid-name
# arglist ::=
# (argument ',')* (argument [',']
# | '*' test (',' argument)* [',' '**' test]
# | '**' test)
self._ProcessArgLists(node)
_SetArgListSubtype(node, format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST)
def Visit_tname(self, node): # pylint: disable=invalid-name
self._ProcessArgLists(node)
_SetArgListSubtype(node, format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST)
def Visit_decorator(self, node): # pylint: disable=invalid-name
# decorator ::=
# '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
for child in node.children:
if isinstance(child, pytree.Leaf) and child.value == '@':
_AppendTokenSubtype(child, subtype=format_token.Subtype.DECORATOR)
self.Visit(child)
def Visit_funcdef(self, node): # pylint: disable=invalid-name
# funcdef ::=
# 'def' NAME parameters ['->' test] ':' suite
for child in node.children:
if pytree_utils.NodeName(child) == 'NAME' and child.value != 'def':
_AppendTokenSubtype(child, format_token.Subtype.FUNC_DEF)
break
for child in node.children:
self.Visit(child)
def Visit_parameters(self, node): # pylint: disable=invalid-name
# parameters ::= '(' [typedargslist] ')'
self._ProcessArgLists(node)
if len(node.children) > 2:
_AppendFirstLeafTokenSubtype(node.children[1],
format_token.Subtype.PARAMETER_START)
_AppendLastLeafTokenSubtype(node.children[-2],
format_token.Subtype.PARAMETER_STOP)
def Visit_typedargslist(self, node): # pylint: disable=invalid-name
# typedargslist ::=
# ((tfpdef ['=' test] ',')*
# ('*' [tname] (',' tname ['=' test])* [',' '**' tname]
# | '**' tname)
# | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
self._ProcessArgLists(node)
_SetArgListSubtype(node, format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST)
tname = False
if not node.children:
return
_AppendFirstLeafTokenSubtype(node.children[0],
format_token.Subtype.PARAMETER_START)
_AppendLastLeafTokenSubtype(node.children[-1],
format_token.Subtype.PARAMETER_STOP)
tname = pytree_utils.NodeName(node.children[0]) == 'tname'
for i in range(1, len(node.children)):
prev_child = node.children[i - 1]
child = node.children[i]
if pytree_utils.NodeName(prev_child) == 'COMMA':
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.PARAMETER_START)
elif pytree_utils.NodeName(child) == 'COMMA':
_AppendLastLeafTokenSubtype(prev_child,
format_token.Subtype.PARAMETER_STOP)
if pytree_utils.NodeName(child) == 'tname':
tname = True
_SetArgListSubtype(child, format_token.Subtype.TYPED_NAME,
format_token.Subtype.TYPED_NAME_ARG_LIST)
elif pytree_utils.NodeName(child) == 'COMMA':
tname = False
elif pytree_utils.NodeName(child) == 'EQUAL' and tname:
_AppendTokenSubtype(child, subtype=format_token.Subtype.TYPED_NAME)
tname = False
def Visit_varargslist(self, node): # pylint: disable=invalid-name
# varargslist ::=
# ((vfpdef ['=' test] ',')*
# ('*' [vname] (',' vname ['=' test])* [',' '**' vname]
# | '**' vname)
# | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
self._ProcessArgLists(node)
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '=':
_AppendTokenSubtype(child, format_token.Subtype.VARARGS_LIST)
def Visit_comp_for(self, node): # pylint: disable=invalid-name
# comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter]
_AppendSubtypeRec(node, format_token.Subtype.COMP_FOR)
# Mark the previous node as COMP_EXPR unless this is a nested comprehension
# as these will have the outer comprehension as their previous node.
attr = pytree_utils.GetNodeAnnotation(node.parent,
pytree_utils.Annotation.SUBTYPE)
if not attr or format_token.Subtype.COMP_FOR not in attr:
_AppendSubtypeRec(node.parent.children[0], format_token.Subtype.COMP_EXPR)
self.DefaultNodeVisit(node)
def Visit_old_comp_for(self, node): # pylint: disable=invalid-name
# Python 3.7
self.Visit_comp_for(node)
def Visit_comp_if(self, node): # pylint: disable=invalid-name
# comp_if ::= 'if' old_test [comp_iter]
_AppendSubtypeRec(node, format_token.Subtype.COMP_IF)
self.DefaultNodeVisit(node)
def Visit_old_comp_if(self, node): # pylint: disable=invalid-name
# Python 3.7
self.Visit_comp_if(node)
def _ProcessArgLists(self, node):
"""Common method for processing argument lists."""
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf):
_AppendTokenSubtype(
child,
subtype=_ARGLIST_TOKEN_TO_SUBTYPE.get(child.value,
format_token.Subtype.NONE))
def _SetArgListSubtype(node, node_subtype, list_subtype):
"""Set named assign subtype on elements in a arg list."""
def HasSubtype(node):
"""Return True if the arg list has a named assign subtype."""
if isinstance(node, pytree.Leaf):
return node_subtype in pytree_utils.GetNodeAnnotation(
node, pytree_utils.Annotation.SUBTYPE, set())
for child in node.children:
node_name = pytree_utils.NodeName(child)
if node_name not in {'atom', 'arglist', 'power'}:
if HasSubtype(child):
return True
return False
if not HasSubtype(node):
return
for child in node.children:
node_name = pytree_utils.NodeName(child)
if node_name not in {'atom', 'COMMA'}:
_AppendFirstLeafTokenSubtype(child, list_subtype)
def _AppendTokenSubtype(node, subtype):
"""Append the token's subtype only if it's not already set."""
pytree_utils.AppendNodeAnnotation(node, pytree_utils.Annotation.SUBTYPE,
subtype)
def _AppendFirstLeafTokenSubtype(node, subtype):
"""Append the first leaf token's subtypes."""
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
_AppendFirstLeafTokenSubtype(node.children[0], subtype)
def _AppendLastLeafTokenSubtype(node, subtype):
"""Append the last leaf token's subtypes."""
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
_AppendLastLeafTokenSubtype(node.children[-1], subtype)
def _AppendSubtypeRec(node, subtype, force=True):
"""Append the leafs in the node to the given subtype."""
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
for child in node.children:
_AppendSubtypeRec(child, subtype, force=force)
def _InsertPseudoParentheses(node):
"""Insert pseudo parentheses so that dicts can be formatted correctly."""
comment_node = None
if isinstance(node, pytree.Node):
if node.children[-1].type == token.COMMENT:
comment_node = node.children[-1].clone()
node.children[-1].remove()
first = pytree_utils.FirstLeafNode(node)
last = pytree_utils.LastLeafNode(node)
if first == last and first.type == token.COMMENT:
# A comment was inserted before the value, which is a pytree.Leaf.
# Encompass the dictionary's value into an ATOM node.
last = first.next_sibling
last_clone = last.clone()
new_node = pytree.Node(syms.atom, [first.clone(), last_clone])
for orig_leaf, clone_leaf in zip(last.leaves(), last_clone.leaves()):
pytree_utils.CopyYapfAnnotations(orig_leaf, clone_leaf)
if hasattr(orig_leaf, 'is_pseudo'):
clone_leaf.is_pseudo = orig_leaf.is_pseudo
node.replace(new_node)
node = new_node
last.remove()
first = pytree_utils.FirstLeafNode(node)
last = pytree_utils.LastLeafNode(node)
lparen = pytree.Leaf(
token.LPAR, u'(', context=('', (first.get_lineno(), first.column - 1)))
last_lineno = last.get_lineno()
if last.type == token.STRING and '\n' in last.value:
last_lineno += last.value.count('\n')
if last.type == token.STRING and '\n' in last.value:
last_column = len(last.value.split('\n')[-1]) + 1
else:
last_column = last.column + len(last.value) + 1
rparen = pytree.Leaf(
token.RPAR, u')', context=('', (last_lineno, last_column)))
lparen.is_pseudo = True
rparen.is_pseudo = True
if isinstance(node, pytree.Node):
node.insert_child(0, lparen)
node.append_child(rparen)
if comment_node:
node.append_child(comment_node)
_AppendFirstLeafTokenSubtype(node, format_token.Subtype.DICTIONARY_VALUE)
else:
clone = node.clone()
for orig_leaf, clone_leaf in zip(node.leaves(), clone.leaves()):
pytree_utils.CopyYapfAnnotations(orig_leaf, clone_leaf)
new_node = pytree.Node(syms.atom, [lparen, clone, rparen])
node.replace(new_node)
_AppendFirstLeafTokenSubtype(clone, format_token.Subtype.DICTIONARY_VALUE)
def _IsAExprOperator(node):
return isinstance(node, pytree.Leaf) and node.value in {'+', '-'}
def _IsMExprOperator(node):
return isinstance(node,
pytree.Leaf) and node.value in {'*', '/', '%', '//', '@'}
def _IsSimpleExpression(node):
"""A node with only leafs as children."""
return all(isinstance(child, pytree.Leaf) for child in node.children)
|
py | 1a54526372929ba3aee6a3387c64c2ac8bc5f06b | from django.urls import reverse
from parameterized import parameterized, parameterized_class
from rest_framework import status
from api.applications.libraries.case_status_helpers import get_case_statuses
from api.audit_trail.enums import AuditType
from api.audit_trail.models import Audit
from api.cases.enums import CaseTypeEnum
from api.goods.enums import PvGrading
from lite_content.lite_api import strings
from api.parties.enums import PartyType, SubType
from api.staticdata.f680_clearance_types.enums import F680ClearanceTypeEnum
from api.staticdata.statuses.enums import CaseStatusEnum
from api.staticdata.statuses.libraries.get_case_status import get_case_status_by_status
from test_helpers.clients import DataTestClient
class EditStandardApplicationTests(DataTestClient):
def setUp(self):
super().setUp()
self.data = {"name": "new app name!"}
def test_edit_unsubmitted_application_name_success(self):
""" Test edit the application name of an unsubmitted application. An unsubmitted application
has the 'draft' status.
"""
application = self.create_draft_standard_application(self.organisation)
url = reverse("applications:application", kwargs={"pk": application.id})
updated_at = application.updated_at
response = self.client.put(url, self.data, **self.exporter_headers)
application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(application.name, self.data["name"])
self.assertGreater(application.updated_at, updated_at)
# Unsubmitted (draft) applications should not create audit entries when edited
self.assertEqual(Audit.objects.count(), 0)
@parameterized.expand(get_case_statuses(read_only=False))
def test_edit_application_name_in_editable_status_success(self, editable_status):
old_name = "Old Name"
application = self.create_draft_standard_application(self.organisation, reference_name=old_name)
self.submit_application(application)
application.status = get_case_status_by_status(editable_status)
application.save()
url = reverse("applications:application", kwargs={"pk": application.id})
updated_at = application.updated_at
response = self.client.put(url, self.data, **self.exporter_headers)
application.refresh_from_db()
audit_qs = Audit.objects.all()
audit_object = audit_qs.first()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(application.name, self.data["name"])
self.assertNotEqual(application.updated_at, updated_at)
self.assertEqual(audit_qs.count(), 2)
self.assertEqual(audit_object.verb, AuditType.UPDATED_APPLICATION_NAME)
self.assertEqual(audit_object.payload, {"new_name": self.data["name"], "old_name": old_name})
@parameterized.expand(get_case_statuses(read_only=True))
def test_edit_application_name_in_read_only_status_failure(self, read_only_status):
application = self.create_draft_standard_application(self.organisation)
self.submit_application(application)
application.status = get_case_status_by_status(read_only_status)
application.save()
url = reverse("applications:application", kwargs={"pk": application.id})
response = self.client.put(url, self.data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_edit_submitted_application_reference_number(self):
""" Test successful editing of an application's reference number when the application's status
is non read-only.
"""
application = self.create_draft_standard_application(self.organisation)
self.submit_application(application)
application.status = get_case_status_by_status(CaseStatusEnum.APPLICANT_EDITING)
application.save()
url = reverse("applications:application", kwargs={"pk": application.id})
updated_at = application.updated_at
audit_qs = Audit.objects.all()
new_ref = "35236246"
update_ref = "13124124"
# Add ref
data = {"reference_number_on_information_form": new_ref, "have_you_been_informed": "yes"}
response = self.client.put(url, data, **self.exporter_headers)
application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
application.reference_number_on_information_form, data["reference_number_on_information_form"],
)
self.assertNotEqual(application.updated_at, updated_at)
# Check add audit
self.assertEqual(audit_qs.count(), 2)
self.assertEqual(AuditType(audit_qs.first().verb), AuditType.UPDATE_APPLICATION_LETTER_REFERENCE)
self.assertEqual(audit_qs.first().payload, {"old_ref_number": "no reference", "new_ref_number": new_ref})
# Update ref
data = {"reference_number_on_information_form": update_ref, "have_you_been_informed": "yes"}
response = self.client.put(url, data, **self.exporter_headers)
# Check update audit
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(audit_qs.count(), 3)
self.assertEqual(AuditType(audit_qs.first().verb), AuditType.UPDATE_APPLICATION_LETTER_REFERENCE)
self.assertEqual(audit_qs.first().payload, {"old_ref_number": new_ref, "new_ref_number": update_ref})
# Update ref with no reference
data = {"reference_number_on_information_form": "", "have_you_been_informed": "yes"}
response = self.client.put(url, data, **self.exporter_headers)
# Check update
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(audit_qs.count(), 4)
self.assertEqual(AuditType(audit_qs.first().verb), AuditType.UPDATE_APPLICATION_LETTER_REFERENCE)
self.assertEqual(audit_qs.first().payload, {"old_ref_number": update_ref, "new_ref_number": "no reference"})
# Remove ref
data = {"reference_number_on_information_form": "", "have_you_been_informed": "no"}
response = self.client.put(url, data, **self.exporter_headers)
# Check update
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(audit_qs.count(), 5)
self.assertEqual(AuditType(audit_qs.first().verb), AuditType.REMOVED_APPLICATION_LETTER_REFERENCE)
self.assertEqual(audit_qs.first().payload, {"old_ref_number": "no reference"})
@parameterized_class(
"case_type", [(CaseTypeEnum.EXHIBITION,), (CaseTypeEnum.GIFTING,), (CaseTypeEnum.F680,),],
)
class EditMODClearanceApplicationsTests(DataTestClient):
def setUp(self):
super().setUp()
self.application = self.create_mod_clearance_application(self.organisation, case_type=self.case_type)
self.url = reverse("applications:application", kwargs={"pk": self.application.id})
self.data = {"name": "abc"}
def test_edit_unsubmitted_application_name_success(self):
updated_at = self.application.updated_at
response = self.client.put(self.url, self.data, **self.exporter_headers)
self.application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.application.name, self.data["name"])
self.assertNotEqual(self.application.updated_at, updated_at)
# Unsubmitted (draft) applications should not create audit entries when edited
self.assertEqual(Audit.objects.count(), 0)
@parameterized.expand(get_case_statuses(read_only=False))
def test_edit_application_name_in_editable_status_success(self, editable_status):
old_name = self.application.name
self.submit_application(self.application)
self.application.status = get_case_status_by_status(editable_status)
self.application.save()
updated_at = self.application.updated_at
response = self.client.put(self.url, self.data, **self.exporter_headers)
self.application.refresh_from_db()
audit_qs = Audit.objects.all()
audit_object = audit_qs.first()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.application.name, self.data["name"])
self.assertNotEqual(self.application.updated_at, updated_at)
self.assertEqual(audit_qs.count(), 2)
self.assertEqual(audit_object.payload, {"new_name": self.data["name"], "old_name": old_name})
@parameterized.expand(get_case_statuses(read_only=True))
def test_edit_application_name_in_read_only_status_failure(self, read_only_status):
self.submit_application(self.application)
self.application.status = get_case_status_by_status(read_only_status)
self.application.save()
response = self.client.put(self.url, self.data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class EditF680ApplicationsTests(DataTestClient):
def setUp(self):
super().setUp()
self.application = self.create_mod_clearance_application(self.organisation, case_type=CaseTypeEnum.F680)
self.url = reverse("applications:application", kwargs={"pk": self.application.id})
@parameterized.expand(["", "1", "2", "clearance"])
def test_add_clearance_level_invalid_inputs(self, level):
data = {"clearance_level": level}
response = self.client.put(self.url, data=data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@parameterized.expand([p[0] for p in PvGrading.choices])
def test_add_clearance_level_success(self, level):
data = {"clearance_level": level}
response = self.client.put(self.url, data=data, **self.exporter_headers)
self.application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.application.clearance_level, level)
def test_edit_submitted_application_clearance_level_minor_fail(self):
""" Test successful editing of an application's reference number when the application's status
is non read-only.
"""
application = self.create_mod_clearance_application(self.organisation, CaseTypeEnum.F680)
url = reverse("applications:application", kwargs={"pk": application.id})
self.submit_application(application)
data = {"clearance_level": PvGrading.NATO_CONFIDENTIAL}
response = self.client.put(url, data=data, **self.exporter_headers)
self.application.refresh_from_db()
self.assertEqual(
response.json()["errors"], {"clearance_level": [strings.Applications.Generic.NOT_POSSIBLE_ON_MINOR_EDIT]}
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_edit_submitted_application_clearance_level_major_success(self):
""" Test successful editing of an application's reference number when the application's status
is non read-only.
"""
application = self.create_mod_clearance_application(self.organisation, CaseTypeEnum.F680)
url = reverse("applications:application", kwargs={"pk": application.id})
self.submit_application(application)
application.status = get_case_status_by_status(CaseStatusEnum.APPLICANT_EDITING)
application.save()
data = {"clearance_level": PvGrading.NATO_CONFIDENTIAL}
response = self.client.put(url, data=data, **self.exporter_headers)
application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(application.clearance_level, data["clearance_level"])
def test_edit_submitted_application_clearance_type_minor_fail(self):
application = self.create_mod_clearance_application(self.organisation, CaseTypeEnum.F680)
url = reverse("applications:application", kwargs={"pk": application.id})
self.submit_application(application)
data = {"types": [F680ClearanceTypeEnum.MARKET_SURVEY]}
response = self.client.put(url, data=data, **self.exporter_headers)
self.application.refresh_from_db()
self.assertEqual(
response.json()["errors"], {"types": [strings.Applications.Generic.NOT_POSSIBLE_ON_MINOR_EDIT]}
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_edit_submitted_application_clearance_type_major_success(self):
application = self.create_mod_clearance_application(self.organisation, CaseTypeEnum.F680)
url = reverse("applications:application", kwargs={"pk": application.id})
self.submit_application(application)
application.status = get_case_status_by_status(CaseStatusEnum.APPLICANT_EDITING)
application.save()
data = {"types": [F680ClearanceTypeEnum.DEMONSTRATION_IN_THE_UK_TO_OVERSEAS_CUSTOMERS]}
response = self.client.put(url, data=data, **self.exporter_headers)
application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
application.types.get().name, F680ClearanceTypeEnum.DEMONSTRATION_IN_THE_UK_TO_OVERSEAS_CUSTOMERS
)
# Check add audit
self.assertEqual(Audit.objects.all().count(), 2)
audit = Audit.objects.all().first()
self.assertEqual(AuditType(audit.verb), AuditType.UPDATE_APPLICATION_F680_CLEARANCE_TYPES)
self.assertEqual(
audit.payload,
{
"old_types": [F680ClearanceTypeEnum.get_text(F680ClearanceTypeEnum.MARKET_SURVEY)],
"new_types": [F680ClearanceTypeEnum.get_text(type) for type in data["types"]],
},
)
def test_edit_submitted_application_clearance_type_no_data_failure(self):
application = self.create_mod_clearance_application(self.organisation, CaseTypeEnum.F680)
url = reverse("applications:application", kwargs={"pk": application.id})
self.submit_application(application)
application.status = get_case_status_by_status(CaseStatusEnum.APPLICANT_EDITING)
application.save()
data = {"types": []}
response = self.client.put(url, data=data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json()["errors"], {"types": [strings.Applications.F680.NO_CLEARANCE_TYPE]},
)
def test_add_party_to_f680_success(self):
party = {
"type": PartyType.THIRD_PARTY,
"name": "Government of Paraguay",
"address": "Asuncion",
"country": "PY",
"sub_type": SubType.GOVERNMENT,
"website": "https://www.gov.py",
"role": "agent",
"clearance_level": PvGrading.UK_OFFICIAL,
}
url = reverse("applications:parties", kwargs={"pk": self.application.id})
response = self.client.post(url, data=party, **self.exporter_headers)
self.application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_add_party_no_clearance_to_f680_failure(self):
party = {
"type": PartyType.THIRD_PARTY,
"name": "Government of Paraguay",
"address": "Asuncion",
"country": "PY",
"sub_type": "government",
"website": "https://www.gov.py",
"role": "agent",
}
url = reverse("applications:parties", kwargs={"pk": self.application.id})
response = self.client.post(url, data=party, **self.exporter_headers)
self.application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json()["errors"], {"clearance_level": ["This field is required."]})
class EditExhibitionApplicationsTests(DataTestClient):
def setUp(self):
super().setUp()
self.application = self.create_mod_clearance_application(self.organisation, case_type=CaseTypeEnum.EXHIBITION)
self.exhibition_url = reverse("applications:exhibition", kwargs={"pk": self.application.id})
def test_edit_exhibition_title_in_draft_success(self):
data = {
"title": "new_title",
"required_by_date": self.application.required_by_date,
"first_exhibition_date": self.application.first_exhibition_date,
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["application"]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_data["title"], data["title"])
def test_edit_exhibition_title_in_draft_failure_blank(self):
data = {
"title": "",
"required_by_date": self.application.required_by_date,
"first_exhibition_date": self.application.first_exhibition_date,
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["errors"]
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response_data["title"][0], strings.Applications.Exhibition.Error.NO_EXHIBITION_NAME)
def test_edit_exhibition_title_in_draft_failure_none(self):
data = {
"title": None,
"required_by_date": self.application.required_by_date,
"first_exhibition_date": self.application.first_exhibition_date,
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["errors"]
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response_data["title"][0], strings.Applications.Exhibition.Error.NO_EXHIBITION_NAME)
def test_edit_exhibition_title_in_draft_failure_not_given(self):
data = {
"required_by_date": self.application.required_by_date,
"first_exhibition_date": self.application.first_exhibition_date,
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["errors"]
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response_data["title"][0], strings.Applications.Exhibition.Error.NO_EXHIBITION_NAME)
def test_edit_exhibition_required_by_date_draft_success(self):
data = {
"title": self.application.title,
"required_by_date": "2022-05-15",
"first_exhibition_date": self.application.first_exhibition_date,
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["application"]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_data["required_by_date"], data["required_by_date"])
def test_edit_exhibition_required_by_date_later_than_first_exhibition_date_draft_failure(self):
data = {
"title": self.application.title,
"required_by_date": "2220-05-15",
"first_exhibition_date": self.application.first_exhibition_date,
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["errors"]
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response_data["first_exhibition_date"][0],
strings.Applications.Exhibition.Error.REQUIRED_BY_BEFORE_FIRST_EXHIBITION_DATE,
)
def test_edit_exhibition_required_by_date_draft_failure_blank(self):
data = {
"title": self.application.title,
"required_by_date": "",
"first_exhibition_date": self.application.first_exhibition_date,
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["errors"]
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response_data["required_by_date"][0], strings.Applications.Exhibition.Error.BLANK_REQUIRED_BY_DATE,
)
def test_edit_exhibition_required_by_date_draft_failure_not_given(self):
data = {
"title": self.application.title,
"first_exhibition_date": self.application.first_exhibition_date,
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["errors"]
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response_data["required_by_date"][0], strings.Applications.Exhibition.Error.NO_REQUIRED_BY_DATE,
)
def test_edit_exhibition_required_by_date_draft_failure_none(self):
data = {
"title": self.application.title,
"first_exhibition_date": self.application.first_exhibition_date,
"required_by_date": None,
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["errors"]
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response_data["required_by_date"][0], strings.Applications.Exhibition.Error.NO_REQUIRED_BY_DATE,
)
def test_edit_exhibition_first_exhibition_date_draft_success(self):
data = {
"title": self.application.title,
"required_by_date": self.application.required_by_date,
"first_exhibition_date": "2030-08-03",
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["application"]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_data["first_exhibition_date"], data["first_exhibition_date"])
def test_edit_exhibition_first_exhibition_date_draft_failure_before_today(self):
data = {
"title": self.application.title,
"required_by_date": self.application.required_by_date,
"first_exhibition_date": "2018-05-03",
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["errors"]
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response_data["first_exhibition_date"][0],
strings.Applications.Exhibition.Error.FIRST_EXHIBITION_DATE_FUTURE,
)
def test_can_not_edit_exhibition_details_in_minor_edit(self):
self.submit_application(self.application)
# same data as success
data = {
"title": "new_title",
"required_by_date": self.application.required_by_date,
"first_exhibition_date": self.application.first_exhibition_date,
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["errors"]["non_field_errors"]
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response_data, [strings.Applications.Generic.INVALID_OPERATION_FOR_NON_DRAFT_OR_MAJOR_EDIT_CASE_ERROR],
)
def test_can_edit_exhibition_details_in_major_edit(self):
self.submit_application(self.application)
self.application.status = get_case_status_by_status(CaseStatusEnum.APPLICANT_EDITING)
self.application.save()
# same data as success
data = {
"title": "new_title",
"required_by_date": self.application.required_by_date,
"first_exhibition_date": self.application.first_exhibition_date,
}
response = self.client.post(self.exhibition_url, data=data, **self.exporter_headers)
response_data = response.json()["application"]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_data["title"], data["title"])
def test_add_third_party_exhibition_clearance_failure(self):
party = {
"type": PartyType.THIRD_PARTY,
"name": "Government of Paraguay",
"address": "Asuncion",
"country": "PY",
"sub_type": "government",
"website": "https://www.gov.py",
"role": "agent",
}
url = reverse("applications:parties", kwargs={"pk": self.application.id})
response = self.client.post(url, data=party, **self.exporter_headers)
self.application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json()["errors"], {"bad_request": strings.PartyErrors.BAD_CASE_TYPE})
def test_add_consignee_exhibition_clearance_failure(self):
party = {
"type": PartyType.CONSIGNEE,
"name": "Government of Paraguay",
"address": "Asuncion",
"country": "PY",
"sub_type": "government",
"website": "https://www.gov.py",
"role": "agent",
}
url = reverse("applications:parties", kwargs={"pk": self.application.id})
response = self.client.post(url, data=party, **self.exporter_headers)
self.application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json()["errors"], {"bad_request": strings.PartyErrors.BAD_CASE_TYPE})
def test_add_end_user_exhibition_clearance_failure(self):
party = {
"type": PartyType.END_USER,
"name": "Government of Paraguay",
"address": "Asuncion",
"signatory_name_euu": "Government of Paraguay",
"country": "PY",
"sub_type": "government",
"website": "https://www.gov.py",
"role": "agent",
}
url = reverse("applications:parties", kwargs={"pk": self.application.id})
response = self.client.post(url, data=party, **self.exporter_headers)
self.application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json()["errors"], {"bad_request": strings.PartyErrors.BAD_CASE_TYPE})
def test_add_ultimate_end_user_exhibition_clearance_failure(self):
party = {
"type": PartyType.ULTIMATE_END_USER,
"name": "Government of Paraguay",
"address": "Asuncion",
"country": "PY",
"sub_type": "government",
"website": "https://www.gov.py",
"role": "agent",
}
url = reverse("applications:parties", kwargs={"pk": self.application.id})
response = self.client.post(url, data=party, **self.exporter_headers)
self.application.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json()["errors"], {"bad_request": strings.PartyErrors.BAD_CASE_TYPE})
|
py | 1a5452df15850a9ad19477c9c5db05f609901ace | #!/usr/bin/env python
from setuptools import setup
setup(name='python-bittrex',
version='0.2.2',
url="https://github.com/ericsomdahl/python-bittrex",
packages=['bittrex'],
modules=['bittrex'],
install_requires=['requests'],
description='Python bindings for bittrex API.',
author='Eric Somdahl',
author_email='[email protected]',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Development Status :: 3 - Alpha',
'Topic :: Office/Business :: Financial',
])
|
py | 1a5452f3f68d2de928b9c7a07431e2cf76c0b433 | import os
class Config(object):
SECRET_KEY = os.environ.get("SECRET_KEY")
migration_directory = "migrations"
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = f'postgresql+psycopg2://{os.environ.get("POSTGRES_USER")}:{os.environ.get("POSTGRES_PASSWORD")}@db/{os.environ.get("POSTGRES_DB")}'
SERVER_PROTOCOL = os.environ.get("SERVER_PROTOCOL")
SERVER_HOSTNAME = os.environ.get("SERVER_HOSTNAME")
SERVER_HOME = f"{SERVER_PROTOCOL}://{SERVER_HOSTNAME}/"
TWITCH_CLIENT_ID = os.environ.get("TWITCH_CLIENT_ID")
TWITCH_CLIENT_SECRET = os.environ.get("TWITCH_CLIENT_SECRET")
|
py | 1a54532b77dcdfd161f16b9f6c7012cdfd36fb09 | """
snp2counts.py - count number SNPs in geneset
============================================
:Tags: Python
Purpose
-------
read a list of genomic point locations (SNPs) and count the number
of SNPs falling in pre-defined windows.
The windows are given in gtf format.
.. note::
The script will be able to count snps in disjoint segments
using the gene_id field in gtf format. It will not check
if these segments are non-overlapping.
In case of a gene set, make sure to first flatten the gene set by combining
all transcript/exons per gene.
Usage
-----
Type::
python <script_name>.py --help
for command line help.
Command line options
--------------------
.. note::
The script currently uses ``variant`` in two meanings:
1. a variable site (SNP/INDEL)
2. a transcript variant (a transcript sequence that differs from the wild type)
I have started calling the latter ``allele``, though it is not
consistent across the whole script. However, the output is consistent and calls
the former ``variant_site`` and the latter ``allele``.
"""
import sys
import collections
import numpy
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import pysam
import CGAT.IndexedFasta as IndexedFasta
import CGAT.IndexedGenome as IndexedGenome
import CGAT.Genomics as Genomics
import CGAT.GTF as GTF
import alignlib_lite
CdsResult = collections.namedtuple('CdsResult',
'''strand, start, end,
exon_id, exon_start, exon_end,
cds_start, cds_end, cds_phase,
intron_id, intron_start, intron_end,
prev_exon_end, next_exon_start,
cds_seq, cds_seq_start, cds_seq_end,
nc_seq, nc_start, nc_end, exon_skipping
''')
Variant = collections.namedtuple('Variant',
'code,sequence,start,end')
SpliceEffect = collections.namedtuple('SpliceEffect',
'''exon_id, orig_seq, variant_seq''' )
SpliceChange = collections.namedtuple('SpliceChange',
'''exon_id,
is_frameshift,
orig_name, orig_seq5, orig_seq3,
variant_name, variant_seq5, variant_seq3''')
CdsEffect = collections.namedtuple('CdsEffect',
'''exon_id, orig_seq, variant_seq,
codon_orig_seq, codon_variant_seq''' )
CdsVariant = collections.namedtuple('CdsVariant',
'''transcript_id,
cds_start, cds_end,
code,
reference_seq,variant_seq,
is_homozygous''' )
SpliceVariant = collections.namedtuple('SpliceVariant',
'''transcript_id,
intron_id,
nc_start, nc_end,
code,
reference_seq,variant_seq,
is_homozygous''' )
TranscriptVariant = collections.namedtuple('TransciptVariant',
'''cds_variants,splice_variants''')
TranslationEffect = collections.namedtuple( 'TranslationEffect', '''
ncodons,
ninserted_bases,
ninserted_codons,
ndeleted_bases,
ndeleted_codons,
nincomplete_codons,
noframe_codons,
nwrong_frames,
ncorrected_frames,
first_stop,
nstops,
nunaffected_codons,
nsynonymous_codons,
nnonsynonymous_codons,
nstop_codons''' )
SplicingEffect = collections.namedtuple('SplicingEffect',
'''nintrons,
ncanonical,
nframeshifts,
nnoncanonical,
nunchanged_frames,
ncorrected_frames,
nuncorrected_frames,
nunchanged,
nsynonymous,
nnonsynonymous,
ndisrupted,
nnovel,
nnunknown,
ninserted_codons,
codes,
last_exon''' )
def iterateOverFrames(variant_seq):
'''return tuples of segments within/without
frame.
Yields only coordinates in frame (multiples of 3)
Everything that is out-of-frame is yielded together.
'''
frame_at_start = len("".join(variant_seq[:3])) % 3
frame_at_end = 0
start = 0
for x in range(3, len(variant_seq), 3):
var_codon = "".join(variant_seq[x:x + 3]).upper()
lvar = len(var_codon)
frame_at_end = (frame_at_start + lvar) % 3
# print x, frame_at_start, frame_at_end, start
# check for frame change
if frame_at_end != frame_at_start:
if frame_at_start == 0:
# exclude current codon
yield((True, start, x))
start = x
elif frame_at_start != 0 and frame_at_end == 0:
# include current codon
yield((False, start, x + 3))
start = x + 3
else:
# nothing to be done if frame changes
# between out-of-frame frames
pass
frame_at_start = frame_at_end
if start != len(variant_seq):
yield((frame_at_end == 0, start, len(variant_seq)))
def countEffectsOnTranscript(var_seq, ref_seq,
is_seleno=False):
'''count effects on transcript.
*var_seq* is a list of characters according to a known cds and
thus determines the reference frame.
Insertions contain more than one character at a position, deletions
are empty.
The function returns a namedtuple of type TranscriptEffect. Counts are in terms of base/codons
in the reference sequence.
Counting will continue after a stop-codon is encountered.
Note that codons inserted within a codon do not count as a frame shift. Instead
these will be recorded as an inserted codon.
ncodons
number of codons in transcript
ninserted_bases
number of inserted bases
ninserted_codons
number of fully inserted codons
ndeleted_bases
number of deleted bases
nincomplete_codons
number of incomplete codons at the end
ndeleted_codons
number of fully deleted codons
noframe_codons
number of codons that are out-of-frame. This will include all codons where
at least one base is out-of-frame. In case of an in+del, the codon will
still be out-of-frame.
nwrong_frames
number of times the sequence gets out of frame
ncorrected_frames
number of times the frame is recovered
nstops
number of stop codons in translation
nsynonymous_codons:
number of reference codons that have synonymous mutations
nnonsynonymous_codons:
number of referenec codons that have non-synonymous mutations
nstop_codons
number of reference codons that now encode for a stop
nunaffected_codons
number of reference codons that are still the same
first_stop
codon position of first stop codon in variant sequence
'''
assert len(var_seq) == len(ref_seq)
# values to fill
ncodons = 0
ninserted_bases, ndeleted_bases = 0, 0
ninserted_codons, ndeleted_codons = 0, 0
nincomplete_codons = 0
noframe_bases, noframe_codons = 0, 0
nwrong_frames, ncorrected_frames = 0, 0
nsynonymous_codons, nnonsynonymous_codons, nunaffected_codons = 0, 0, 0
nstop_codons = 0
last_exon_start = 0
# build sequences
var_seq_na = "".join(var_seq).upper()
ref_seq_na = "".join(ref_seq).upper()
lrefseq = len(ref_seq)
lvarseq = len(var_seq_na)
ncodons = lrefseq // 3
# truncate incomplete base at end of reference sequence
if lvarseq % 3 != 0:
var_seq_na = var_seq_na[:-(lvarseq % 3)]
var_seq_aa = Genomics.translate(var_seq_na,
is_seleno=is_seleno)
# check protein coding sequence for the first stop
nstops = 0
first_stop = len(var_seq_aa)
ntruncated_codons_stop = 0
for pos, c in enumerate(var_seq_aa):
if c == "X":
nstops += 1
first_stop = min(pos, first_stop)
# start position for out-of-frame region
var_pos = 0
map_ref2var = alignlib_lite.py_makeAlignmentVector()
alignator = alignlib_lite.py_makeAlignatorDPFull(alignlib_lite.py_ALIGNMENT_GLOBAL,
-10.0,
-2.0)
was_in_frame = True
for in_frame, start, end in iterateOverFrames(var_seq):
varseq = "".join(var_seq[start:end]).upper()
refseq = "".join(ref_seq[start:end]).upper()
# print in_frame, start, end
if in_frame:
for x in range(start, end, 3):
# ignore incomplete codons at the end:
if x + 3 > lrefseq:
break
var_codon = "".join(var_seq[x:x + 3]).upper()
assert len(var_codon) % 3 == 0
ref_codon = "".join(ref_seq[x:x + 3]).upper()
assert len(ref_codon) == 3
d = len(var_codon) - 3
for y in var_seq[x:x + 3]:
if y == "":
ndeleted_bases += 1
if len(y) > 1:
ninserted_bases += len(y) - 1
if var_codon == "":
ndeleted_codons -= d // 3
elif len(var_codon) > len(ref_codon):
# deal with in-frame inserted codons
ninserted_codons += d // 3
nunaffected_codons += 1
elif var_codon == ref_codon:
nunaffected_codons += 1
else:
var_aa = Genomics.translate(var_codon)
ref_aa = Genomics.translate(ref_codon)
if var_aa == "X":
nstop_codons += 1
elif ref_aa == var_aa:
nsynonymous_codons += 1
else:
nnonsynonymous_codons += 1
var_pos += len(var_codon)
else:
# count deletions/insertions in the variant
for x in range(start, end, 3):
var_codon = "".join(var_seq[x:x + 3]).upper()
# count insertion and deletion separately to avoid them
# compensating
for y in var_seq[x:x + 3]:
if y == "":
ndeleted_bases += 1
if len(y) > 1:
ninserted_bases += len(y) - 1
ninserted_codons += (len(y) - 1) // 3
# completely deleted codons
if var_codon == "":
ndeleted_codons += 1
else:
noframe_codons += 1
# count effect on protein coding sequence
var_frag_aa = Genomics.translate(varseq)
ref_frag_aa = Genomics.translate(refseq)
# count effect on protein coding sequence
var_s = alignlib_lite.py_makeSequence(var_frag_aa)
ref_s = alignlib_lite.py_makeSequence(ref_frag_aa)
diff_length = abs(len(ref_frag_aa) - len(var_frag_aa))
# very heuristic - might lead strange effects
alignment_band = max(10, diff_length * 2)
iterator = alignlib_lite.py_makeIterator2DBanded(
-alignment_band, +alignment_band)
alignlib_lite.py_setDefaultIterator2D(iterator)
E.debug("alignment: reference(%i) with variant(%i) (diff=%i) within diagonals %i and %i" %
(len(ref_frag_aa), len(var_frag_aa), diff_length, -alignment_band, alignment_band))
alignator.align(map_ref2var, ref_s, var_s)
# print alignlib_lite.py_AlignmentFormatExplicit( map_ref2var,
# ref_s, var_s )
for x, ref_aa in enumerate(ref_frag_aa):
p = map_ref2var.mapRowToCol(x)
if p < 0:
continue
var_aa = var_frag_aa[p]
if var_aa == ref_aa:
nsynonymous_codons += 1
else:
nnonsynonymous_codons += 1
nwrong_frames += 1
ncorrected_frames += 1
was_in_frame = in_frame
# if var_pos > first_stop * 3: break
if lvarseq % 3 != 0:
nincomplete_codons += 1
# reduce corrected frames by one if we do not end on frame
if not was_in_frame and lvarseq % 3 != 0:
ncorrected_frames -= 1
return TranslationEffect._make((ncodons,
ninserted_bases,
ninserted_codons,
ndeleted_bases,
ndeleted_codons,
nincomplete_codons,
noframe_codons,
nwrong_frames,
ncorrected_frames,
first_stop,
nstops,
nunaffected_codons,
nsynonymous_codons,
nnonsynonymous_codons,
nstop_codons))
def getCDSPosition(exons, start, end, fasta=None, lcontig=None):
'''return cds information for a (positive strand) genomic position.
exons is a list of exons in GTF format.
start, end: are the coordinates of the variant in forward strand coordinates.
if the first exon is not in frame, cds_start, cds_end will be
not multiples of 3, but that is correct, as cds_start and cds_end
start counting from 0.
If a region spans a whole intron, the region will be treated as
a single coding sequence variant. Such deletions usually concern
short frame-shifting introns.
'''
strand = exons[0].strand
contig = exons[0].contig
is_positive_strand = Genomics.IsPositiveStrand(strand)
if is_positive_strand:
coordinates = [(e.start, e.end, int(e.frame)) for e in exons]
else:
if not fasta and not lcontig:
raise ValueError(
"no fasta or lcontig option given for a negative strand transcript")
if fasta:
# convert all to negative strand coordinates
lcontig = fasta.getLength(contig)
start, end = lcontig - end, lcontig - start
coordinates = [(lcontig - e.end, lcontig - e.start, int(e.frame))
for e in exons]
coordinates.sort()
# phase is the complement to frame (i.e. position in codon, not base to
# next codon)
cds_start, cds_end, cds_phase = None, None, None
# coordinates for previous/next exons for snps spanning splice sites
prev_exon_end, next_exon_start = None, None
# intron positions
intron_start, intron_end = None, None
# start, end of feature within coding sequence
# sequence is a sequence of all codons that cover the feature
cds_seq_start, cds_seq_end, cds_seq = None, None, None
# flag to denote exon skipping
exon_skipping = False
# start, end of feature within non-coding sequence
nc_seq_start, nc_seq_end, nc_seq = None, None, None
exon_id = None
# empty result in case of no overlap
if end <= coordinates[0][0] or start >= coordinates[-1][1]:
return None
intron_id, exon_id = None, 0
nexons = len(coordinates)
last_exon = nexons - 1
start_phase = (3 - coordinates[0][2]) % 3
# start within frame
cds_start = start_phase
# find exon overlapping the region or exon immediately after it
while exon_id < len(coordinates):
exon_start, exon_end, exon_frame = coordinates[exon_id]
if start < exon_end:
break
cds_start += exon_end - exon_start
exon_id += 1
if end <= exon_start:
# overlap with intron only
cds_start, cds_end = None, None
if exon_id > 0:
intron_start, intron_end = coordinates[exon_id - 1][1], exon_start
nc_seq_start, nc_seq_end = start, end
intron_id = exon_id - 1
else:
# overlap with exon
#
# position of first complete codon in this feature:
first_full_codon_start = exon_start + exon_frame
# correction of frame at end of exon to due frame-shifting codons
frame_correction = False
# special treatment if region spans the complete intron
if exon_id < last_exon and end > coordinates[exon_id + 1][0]:
if end > coordinates[exon_id + 1][1]:
raise ValueError(
"can not deal with variants spanning multiple exons")
# simply increase the current exon
exon_end = coordinates[exon_id + 1][1]
# in order to adjust the frame, add the intron towards the exon
frame_correction = True
cds_x, cds_y = max(start, exon_start), min(end, exon_end)
cds_start += cds_x - exon_start
cds_end = cds_start + cds_y - cds_x
cds_phase = (3 - exon_frame + cds_x - exon_start) % 3
# print "exon_id=", exon_id, "start=", start, "end=", end, \
# "codon_start", cds_x, "codon_end", cds_y, \
# "cds_phase=", cds_phase, "cds_start", cds_start, "cds_end", cds_end, \
# "exon_start=",exon_start, "exon_end=", exon_end, "exon_frame=", exon_frame
# incomplete bases in a codon at 3' end of this feature:
# = frame of next feature
# (3 - ( exon_end - exon_start - exon_frame) % 3 ) % 3
if exon_id < last_exon:
last_full_codon_end = exon_end - \
(3 - coordinates[exon_id + 1][2]) % 3
if not frame_correction:
assert (3 - ( exon_end - exon_start - exon_frame + frame_correction ) % 3 ) % 3 == coordinates[exon_id + 1][2], \
"frame mismatch between exons %i and %i" % (
exon_id, exon_id + 1)
else:
last_full_codon_end = 0
# link to previous next exons in the case of split exons
is_split_start = exon_start <= cds_x < first_full_codon_start and exon_id > 0
is_split_end = last_full_codon_end < cds_y <= exon_end and exon_id < last_exon
if is_split_start:
prev_exon_end = coordinates[exon_id - 1][1]
if is_split_end:
next_exon_start = coordinates[exon_id + 1][0]
next_frame = coordinates[exon_id + 1][2]
# sort out the sequence
cds_seq = []
# position of variant in cds_sequence
cds_seq_start, cds_seq_end = cds_phase, cds_y - cds_x + cds_phase
if fasta:
# link to previous/next exons in the case of split codons
if is_split_start:
codon_start = prev_exon_end - (3 - exon_frame) % 3
codon_end = prev_exon_end
cds_seq.insert(0, fasta.getSequence(contig,
strand,
codon_start,
codon_end))
codon_start = cds_x - cds_phase
codon_end = cds_y + (3 - (cds_end % 3)) % 3
# deal with incomplete codon at start
if codon_start < exon_start and exon_id == 0:
assert exon_frame != 0
cds_seq.extend(list("X" * (exon_start - codon_start)))
# print "exon_id=", exon_id, "start=", start, "end=", end, "codon_start", codon_start, "codon_end", codon_end, "cdsx", cds_x, "cdsy", cds_y, cds_phase, "cds_start", cds_start, "cds_end", cds_end, \
# "exon_start=",exon_start, "exon_end=", exon_end, "start_phase=", start_phase, "first_start", first_full_codon_start, "last_end", last_full_codon_end, \
# "split_start", is_split_start, "split_end", is_split_end
cds_seq.extend(list(fasta.getSequence(contig,
strand,
max(exon_start,
codon_start),
min(exon_end, codon_end))))
# fill up, if last codon is incomplete
if codon_end > exon_end and exon_id == last_exon:
cds_seq.extend(list("X" * (codon_end - exon_end)))
if is_split_end:
cds_seq.append(fasta.getSequence(contig,
strand,
next_exon_start,
next_exon_start + next_frame
))
cds_seq = "".join(cds_seq)
lnoncoding = (end - start) - (cds_y - cds_x)
if start <= exon_start and end >= exon_end:
# special treatment if region spans the complete exon
if exon_id < nexons - 1:
nc_seq_start, nc_seq_end = start, end
intron_start = prev_exon_end
intron_end = next_exon_start
intron_id = exon_id - 1
else:
# unless it is last exon - truncate, but only
# it it extends into intron
if start < exon_start:
intron_start, inron_end = prev_exon_end, exon_start
nc_seq_start, nc_seq_end = start, exon_start
intron_id = exon_id - 1
exon_skipping = True
elif start < exon_start and exon_id > 0:
# disrupted intronic sequence
intron_start, intron_end = coordinates[exon_id - 1][1], exon_start
nc_seq_start, nc_seq_end = exon_start - lnoncoding, exon_start
intron_id = exon_id - 1
elif end > exon_end and exon_id < nexons - 1:
# disrupted intronic sequence
intron_start, intron_end = exon_end, coordinates[exon_id + 1][0]
nc_seq_start, nc_seq_end = exon_end, exon_end + lnoncoding
intron_id = exon_id
if fasta and nc_seq_start is not None:
nc_seq = fasta.getSequence(contig, strand, nc_seq_start, nc_seq_end)
# subtract starting frame
if cds_start is not None:
cds_start -= start_phase
cds_end -= start_phase
return CdsResult._make((strand, start, end,
exon_id, exon_start, exon_end,
cds_start, cds_end, cds_phase,
intron_id, intron_start, intron_end,
prev_exon_end, next_exon_start,
cds_seq, cds_seq_start, cds_seq_end,
nc_seq, nc_seq_start, nc_seq_end,
exon_skipping))
class Counter(object):
'''annotator for single bases in the genome.'''
mHeader = ()
def __init__(self, fasta=None, pattern="%s", *args, **kwargs):
self.mFasta = fasta
self.mFilenamePattern = pattern
def __str__(self):
return ""
def getHeader(self):
'''return header'''
return "\t".join(self.mHeader)
class CounterGenes(Counter):
'''count SNPs per gene that it overlaps with.'''
mHeader = ["exons_%s" % x for x in ("ntranscripts", "nused", "pos")]
def __init__(self, filename_exons, *args, **kwargs):
Counter.__init__(self, *args, **kwargs)
exons = IndexedGenome.IndexedGenome()
nexons = 0
inf = IOTools.openFile(filename_exons, "r")
for g in GTF.iterator(inf):
exons.add(g.contig, g.start, g.end, g)
nexons += 1
inf.close()
self.mExons = exons
E.info("indexed %i exons on %i contigs" % (nexons, len(exons)))
# create counter
self.mCounts = collections.defaultdict(int)
def update(self, snp):
'''update with snp.'''
exons = list(self.mExons.get(snp.chromosome, snp.pos, snp.pos + 1))
if exons:
for start, end, gtf in exons:
self.mCounts[gtf.gene_id] += 1
def writeTable(self, outfile):
outfile.write("gene_id\tnsnps\n")
for key in sorted(self.mCounts.keys()):
outfile.write("\t".join((key, str(self.mCounts[key]))) + "\n")
class CounterTranscripts(Counter):
'''count SNPs per transcripts that it overlaps with.
Variants are not phased, so is not always clear which of the two allelles of a transcript
is affected. Thus, the following heuristic is adopted:
1 Only homozygous variants: locus flagged as homozygous. Both alleles are assumed to be the same and
different from the wild type.
2 Only heterozygous variants: locus flagged as heterozygous. One allele is assumed to be the wild type,
the other one is a variant.
3 Mixture of homo- and heterozygous variants: locus flagged as mixture. A mixed allele is constructed with
all variants.
Columns
transcript_id
the transcript_id
cds_len
length of cds in bases
ncodons
number of codons in wild type
last_exon_start
start (cds coordinates) of last exon (useful for detecting nonsense-mediated decay)
max_variants
maximum number of variants per site
nvariant_sites
number of variable sites within the ntranscript
genotype
the genotype
nalleles
number of variants (1 = either homozygote variant or heterozygote variant/wild type)
stop_min
number of codons truncated either due to disrupted splice signal and/or stop codon.
This is the minimum between two transcripts. If the wildtype is still present,
this value will be 0.
stop_max
number of codons truncated either due to disrupted splice signal or stop codon.
This is the maximum between two transcripts.
Columns are prefixed with ``cds_`` and ``splice_`` for cds and splice variants,
respectively. Without prefix, it refers to the effecs of cds and splice variants
combined.
'''
# outfile.write(
mHeader = ["transcript_id",
"cds_len",
"ncodons",
"last_exon_start",
"cds_max_variants", "cds_nvariant_sites", "cds_genotype", "cds_nalleles",
"cds_stop_min", "cds_stop_max",
"splice_max_variants", "splice_nvariant_sites", "splice_genotype", "splice_nalleles",
"splice_stop_min", "splice_stop_max",
"max_vars", "nvariant_sites", "genotype", "nalleles",
"stop_min", "stop_max"]
# add this area to check for overlap with splice signals
# This should be larger than the longest deletion.
mSize = 500
# introns smaller than this size are considered to be frameshifts
mMinIntronSize = 5
def __init__(self, filename_exons, seleno, *args, **kwargs):
Counter.__init__(self, *args, **kwargs)
transcripts = IndexedGenome.IndexedGenome()
self.mExons = {}
nexons = 0
ntranscripts = 0
inf = IOTools.openFile(filename_exons, "r")
for gtfs in GTF.transcript_iterator(GTF.iterator(inf)):
start, end = min([x.start for x in gtfs]), max(
[x.end for x in gtfs])
transcripts.add(gtfs[0].contig, start, end, gtfs)
nexons += len(gtfs)
ntranscripts += 1
self.mExons[gtfs[0].transcript_id] = gtfs
inf.close()
self.mTranscripts = transcripts
self.mSeleno = seleno
E.info("indexed %i transcripts and %i exons on %i contigs" %
(ntranscripts, nexons, len(transcripts)))
E.info("received %i selenoprotein transcripts" % (len(self.mSeleno)))
# create counter
self.mCounts = collections.defaultdict(int)
self.mOutfileIntron = IOTools.openFile(
self.mFilenamePattern % "intron", "w")
self.mOutfileIntron.write(
"transcript_id\tcontig\tsnp_position\tvariant_type\tvariant_code\tvariant_seq\texon_id\tnexon\tcode\torig_name\torig_seq5\torig_seq3\tvariant_name\tvariant_seq5\tvariant_seq3\tintron_start\tintron_end\tstrand\tnc_start\tnc_end\n")
self.mOutfileCds = IOTools.openFile(self.mFilenamePattern % "cds", "w")
self.mOutfileCds.write("\t".join((
"transcript_id",
"contig",
"snp_position",
"reference",
"variant_type",
"variant_code",
"variant_bases",
"exon_id",
"nexons",
"code",
"orig_seq",
"orig_na",
"orig_codons",
"variant_seq",
"variant_na",
"variant_codons",
"cds_phase",
"cds_start",
"cds_end",
"cds_len")) + "\n")
self.mOutfileTranscripts = IOTools.openFile(
self.mFilenamePattern % "translation", "w")
self.mOutfileTranscripts.write(
"transcript_id\tvariant_id\tlast_exon_start\t%s\tseq_na\tseq_aa\n" % "\t".join(TranslationEffect._fields))
self.mOutfileSplicing = IOTools.openFile(
self.mFilenamePattern % "splicing", "w")
self.mOutfileSplicing.write(
"transcript_id\tvariant_id\t%s\n" % "\t".join(SplicingEffect._fields))
self.mTranscriptVariants = {}
def getVariantRange(self, snp):
'''return effective range of a variant.
The effective range is a single base in case of a SNP.
It is two bases if it is an insertion in case it is a
coding SNP.
Deletions are as large as the deletion.
'''
contig = snp.chromosome
lcontig = self.mFasta.getLength(contig)
reference_base = snp.reference_base
start, end = lcontig, 0
# process according to variant type
# indels need to be treated differently from SNPs as
# they have larger effects
if reference_base == "*":
variants = snp.genotype.split("/")
for variant in variants:
if variant[0] == "*":
continue
elif variant[0] == "+":
start = min(start, snp.pos)
end = max(end, snp.pos + 2)
elif variant[0] == "-":
# deletions are after the base denoted by snp.pos
start = min(start, snp.pos + 1)
# pos + 1 + len(var) - 1 = pos + len(var)
end = max(end, snp.pos + len(variant))
else:
raise ValueError("unknown variant sign '%s'" % variant[0])
else:
# a single base SNP
start = min(start, snp.pos)
end = max(end, snp.pos + 1)
start, end = max(0, start), min(end, lcontig)
if start == end:
return None, None
else:
return start, end
def getSequence(self, snp, r, variant):
'''return sequence of snp taking into account strandedness of transcript.'''
contig = snp.chromosome
# collect sequences (resolving strandedness)
reference_base = snp.reference_base
if reference_base != "*":
variant_bases = Genomics.resolveAmbiguousNA(variant.sequence)
assert len(variant_bases) == 1
else:
variant_bases = []
variant_seq = variant.sequence
if not Genomics.IsPositiveStrand(r.strand):
variant_seq = Genomics.complement(variant_seq)
variant_bases = [
Genomics.complement(base) for base in variant_bases]
reference_base = Genomics.complement(reference_base)
return reference_base, variant_seq, variant_bases
def collectSplicingEffects(self, snp, r, variant, reference_base, variant_seq, variant_bases):
'''compute effects of a variant on a transcript.
The effects are independent of any other variants.
return a list of splicing effects.
'''
intron_effects, intron_changes = [], []
# collect splicing effects only
if r.nc_start is None:
return intron_effects, intron_changes
contig = snp.chromosome
lvariant = len(variant_seq)
intron_seq = self.mFasta.getSequence(
contig, r.strand, r.intron_start, r.intron_end).upper()
is_frameshift = len(intron_seq) < self.mMinIntronSize
intron_name, intron_seq5, intron_seq3 = Genomics.GetIntronType(
intron_seq)
variant_introns = []
if (r.nc_start - r.intron_start) >= len(intron_seq5) and (r.intron_end - r.nc_end) >= len(intron_seq3):
# intronic variant - ignore if not actually overlapping with splice
# site
pass
else:
E.debug("cds=%s, variant=%s" % (str(r), str(snp)))
variant_intron_seq = list(intron_seq)
x, y = r.nc_start - r.intron_start, r.nc_end - r.intron_start
if variant.code == "=":
# add SNP
assert y - x == 1, "expect only single base substitutions"
if intron_seq[x:y] != reference_base:
raise ValueError("expected=%s, got=%s:%s:%s, snp=%s, cds=%s" %
(reference_base,
intron_seq[x - 3:x],
intron_seq[x:y],
intron_seq[y:y + 3],
str(snp), str(r)))
# record multiple substitutions
for base in variant_bases:
if base != reference_base:
variant_intron_seq[x:y] = base
variant_introns.append("".join(variant_intron_seq))
intron_effects.append(SpliceEffect._make((r.intron_id,
reference_base,
base)))
elif variant.code == "+":
# add insertion
# If the insertion is at an intron/exon boundary
# y -x = 1. In this case attribute this to a
# coding sequence change and ignore
if y - x == 2:
# python inserts before the index
variant_intron_seq[y:y] = list(variant_seq)
variant_introns.append("".join(variant_intron_seq))
intron_effects.append(SpliceEffect._make((r.intron_id,
"",
variant_seq)))
else:
if y - x != 1:
raise ValueError(
"expected an insert of length 1 or 2, got %i for %s" % (y - x, str(snp)))
elif variant.code == "-":
# add deletion
if x == 0 and y == r.intron_end - r.intron_start:
# deletion covers full length of intron
if r.intron_id < r.exon_id:
# truncate from start if intron preceding exon
xx, yy = 0, y - x
else:
# truncate from end if intron succceding exon
xx, yy = lvariant - (y - x), lvariant
elif x == 0:
# deletion at 3' end of intron: truncate from the end
xx, yy = lvariant - (y - x), lvariant
else:
xx, yy = 0, y - x
if intron_seq[x:y] != variant_seq[xx:yy]:
raise ValueError("expected=%s, got=%s:%s:%s, %i:%i, %i:%i, snp=%s, cds=%s" %
(variant_seq[xx:yy],
intron_seq[x - 3:x],
intron_seq[x:y],
intron_seq[y:y + 3],
x, y,
xx, yy,
str(snp), str(r)))
intron_effects.append(SpliceEffect._make((r.intron_id,
variant_intron_seq[
x:y],
"")))
del variant_intron_seq[x:y]
variant_introns.append("".join(variant_intron_seq))
for variant_intron_seq in variant_introns:
variant_intron_name, variant_intron_seq5, variant_intron_seq3 = Genomics.GetIntronType(
variant_intron_seq)
# if intron is a frameshift, the full intron seq is returned
#if is_frameshift: reference_seq, variant_seq = intron_seq, variant_inseq
intron_changes.append(SpliceChange._make((r.exon_id - 1,
is_frameshift,
intron_name, intron_seq5, intron_seq3,
variant_intron_name, variant_intron_seq5, variant_intron_seq3)))
return intron_effects, intron_changes
def collectCodingEffects(self, snp, r, variant, reference_base, variant_seq, variant_bases):
'''compute effects of a variant on a transcript.
The effects are independent of any other variants.
return a list of cds effects
'''
coding_effects = []
# process coding effects, return empty if none
if r.cds_start is None:
return coding_effects
contig = snp.chromosome
lvariant = len(variant_seq)
cds_seq = r.cds_seq.upper()
variant_cds_seq = list(cds_seq)
x, y = r.cds_seq_start, r.cds_seq_end
if len(cds_seq) % 3 != 0:
raise ValueError("expected codon sequence, got=%s (%i), %s:%s:%s, %i:%i, snp=%s, cds=%s" %
(cds_seq,
len(cds_seq),
cds_seq[:x],
cds_seq[x:y],
cds_seq[y:],
x, y,
str(snp), str(r)))
if variant.code == "=":
# process substitution
assert y - x == 1, "expect only single base substitutions"
if cds_seq[x:y] != reference_base:
raise ValueError("expected=%s, got=%s:%s:%s, %i:%i, snp=%s, cds=%s" %
(reference_base,
cds_seq[:x],
cds_seq[x:y],
cds_seq[y:],
x, y,
str(snp), str(r)))
# record multiple substitutions
for base in variant_bases:
if base != reference_base:
variant_cds_seq[x] = base
coding_effects.append(CdsEffect._make((r.exon_id,
reference_base,
base,
cds_seq,
"".join(
variant_cds_seq),
)))
elif variant.code == "+":
# add insertion - python inserts before index
variant_cds_seq[y:y] = variant_seq
coding_effects.append(CdsEffect._make((r.exon_id,
"",
variant_seq,
cds_seq,
"".join(variant_cds_seq))))
elif variant.code == "-":
# add deletion
if r.exon_skipping:
xx, yy = r.exon_start - r.nc_start, lvariant - \
(r.nc_end - r.exon_end)
elif r.nc_start is not None:
# deletion at exon boundary
if r.intron_id < r.exon_id:
# deletion at 5' end of exon, take only 3' bases of variant
xx, yy = lvariant - (y - x), lvariant
else:
# deletion at 3' end of exon, take only 5' bases of variant
xx, yy = 0, y - x
# removed the following condition: "and r.nc_start != r.intron_start:"
# deletion at 3' end of intron boundary - delete last bases
# xx, yy = lvariant - (y-x), lvariant
elif r.cds_start == 0:
# deletion at first codon - take only 3' bases of variant
xx, yy = lvariant - (y - x), lvariant
else:
# deletion after - delete last bases
xx, yy = 0, y - x
if cds_seq[x:y] != variant_seq[xx:yy]:
raise ValueError("expected=%s, got=%s:%s:%s, %i:%i, %i:%i, snp=%s, cds=%s" %
(variant_seq[xx:yy],
cds_seq[:x],
cds_seq[x:y],
cds_seq[y:],
x, y,
xx, yy,
str(snp), str(r)))
del variant_cds_seq[x:y]
coding_effects.append(CdsEffect._make((r.exon_id,
cds_seq[x:y],
"",
cds_seq,
"".join(variant_cds_seq))))
return coding_effects
def update(self, snp):
'''update with snp.'''
# get effective range of snp
snp_start, snp_end = self.getVariantRange(snp)
# ignore snps that are out-of-range
if snp_start is None:
return
contig = snp.chromosome
transcripts = list(
self.mTranscripts.get(snp.chromosome, snp_start, snp_end))
if not transcripts:
return
reference_base = snp.reference_base
# collect all variants at this position
# indels and deletions might effect more than this
# position
variants_to_test = []
variant_types = []
is_homozygous = True
if reference_base == "*":
variants = snp.genotype.split("/")
codes = [x[0] for x in variants]
# variant is hetorozygous if wildtype is present, codes/sequences of
# variants are not identical.
if ("*" in codes) or (variants[0] != variants[1]):
is_homozygous = False
# note that I found an inconsistency between the genotype field and the second-allele field
# genotype='-GGG/-GGG', first_allelle='-GGG', second_allele='-GGGG'
# In other cases it is correct, even with longer deletions.
for variant in set(variants):
if variant[0] == "*":
variant_types.append("W")
elif variant[0] == "+":
variant_types.append("I")
# insertions affect the base before and after the insertion
variants_to_test.append(
Variant._make((variant[0], variant[1:], snp.pos, snp.pos + 1)))
elif variant[0] == "-":
variant_types.append("D")
# deletions are after the base denoted by snp.pos
start = snp.pos + 1
# pos + 1 + len(var) - 1 = pos + len(var)
end = snp.pos + len(variant)
variants_to_test.append(
Variant._make((variant[0], variant[1:], start, end)))
else:
if snp.genotype in 'ACGTacgt':
# homozygous substitution
variant_types.append("O")
else:
# heterozygous substitution
variant_types.append("E")
is_homozygous = False
for base in Genomics.resolveAmbiguousNA(snp.genotype).upper():
if base == snp.reference_base:
continue
variants_to_test.append(
Variant._make(("=", base, snp.pos, snp.pos + 1)))
self.mVariantTypes = variant_types
E.debug("snp: %s:%i variants_to_test=%i, transcripts=%i, is_homozygous=%s" %
(snp.chromosome, snp.pos,
len(variants_to_test), len(transcripts), str(is_homozygous)))
counts = E.Counter()
# intersect all transcripts in the gene with the possible substitutions
for transcript_start, transcript_end, exons in transcripts:
transcript_id = exons[0].transcript_id
all_splice_changes, all_splice_effects, all_cds_effects = [
], [], []
for variant in variants_to_test:
E.debug("snp: %s:%i variant=%i:%i:%s:%s, transcript=%s" % (snp.chromosome, snp.pos,
variant.start,
variant.end,
variant.code,
variant.sequence,
transcript_id))
r = getCDSPosition(exons,
variant.start, variant.end,
self.mFasta)
if not r:
continue
reference_base, variant_seq, variant_bases = self.getSequence(
snp, r, variant)
# assert variant_seq.lower() in r.cds_seq.lower(), \
# "variant sequence %s not in cds seq %s: %s" % (variant_seq, r.cds_seq, str(r))
cds_effects = self.collectCodingEffects(snp, r, variant,
reference_base, variant_seq, variant_bases)
splice_effects, splice_changes = self.collectSplicingEffects(snp, r, variant,
reference_base, variant_seq, variant_bases)
if len(splice_effects) + len(cds_effects) == 0:
counts.no_effect += 1
continue
all_splice_effects.extend(splice_effects)
all_cds_effects.extend(cds_effects)
all_splice_changes.extend(splice_changes)
if all_splice_changes:
self.outputSpliceEffects(
snp, exons, variant, all_splice_changes, r)
if all_cds_effects:
self.outputCDSEffects(snp, exons, variant, all_cds_effects, r)
if len(all_splice_effects) + len(all_cds_effects) == 0:
continue
self.updateVariantTranscripts(transcript_id, snp,
exons, variant,
all_splice_effects,
all_cds_effects,
r, is_homozygous)
def updateVariantTranscripts(self, transcript_id, snp, exons, variant, splice_effects, cds_effects, r, is_homozygous):
'''collect variation for each transcript.
'''
if transcript_id not in self.mTranscriptVariants:
self.mTranscriptVariants[
transcript_id] = TranscriptVariant._make(([], []))
v = self.mTranscriptVariants[transcript_id]
for e in cds_effects:
# splice variants cause all residues after a modified splice site
# to be deleted
v.cds_variants.append(
CdsVariant._make((transcript_id,
r.cds_start, r.cds_end,
variant.code,
e.orig_seq, e.variant_seq,
is_homozygous)))
for e in splice_effects:
# for splice effects save the full snps to sort out the intron sequence later.
# due to deletions, etc, the resolving might be difficult.
v.splice_variants.append(
SpliceVariant._make((transcript_id,
e.exon_id,
r.nc_start - r.intron_start, r.nc_end -
r.intron_start,
variant.code,
e.orig_seq, e.variant_seq,
is_homozygous)))
def getSpliceCode(self, splice_name, new_splice_name):
'''assign one-letter code to a splice-signal change.'''
if splice_name == "unknown" and new_splice_name == "unknown":
# unknown splice signal
code = "U"
elif new_splice_name == "unknown":
# disrupted splice site
code = "D"
elif splice_name == "unknown":
# newly created splice site
code = "C"
elif splice_name == new_splice_name:
# synonymous change
code = "S"
elif splice_name != new_splice_name:
# non-synonymous change
code = "N"
return code
def outputSpliceEffects(self, snp, exons, variant, splice_effects, r):
'''output effects of variants affecting splice sites.'''
for e in splice_effects:
self.mOutfileIntron.write("%s\n" % "\t".join(
(exons[0].transcript_id,
snp.chromosome,
"%i" % snp.pos,
",".join(self.mVariantTypes),
variant.code,
variant.sequence,
"%i" % e.exon_id,
"%i" % len(exons),
self.getSpliceCode(e.orig_name, e.variant_name),
str(e.orig_name),
e.orig_seq5,
e.orig_seq3,
str(e.variant_name),
e.variant_seq5,
e.variant_seq3,
"%i" % r.intron_start,
"%i" % r.intron_end,
r.strand,
"%i" % r.nc_start,
"%i" % r.nc_end,
)))
def getSubstitutionCode(self, original_codons, variant_codons):
'''assign one-letter code codon change.
'''
if variant_codons == "!":
# variant creates a frameshift
code = "F"
elif original_codons == variant_codons:
# a synonymous substitution
code = "S"
elif "X" in variant_codons:
# variant creates a stop codon
code = "X"
elif "U" in variant_codons:
# variant creates a stop codon - that might a selenocysteine
code = "U"
elif original_codons in variant_codons:
# a synonymous insertion
code = "I"
elif len(variant_codons) == 0 or variant_codons in original_codons:
# a synonymous deletion
code = "D"
else:
# a non-synonymous variant (substition or indel)
# removing the original codon and replacing it with others
code = "N"
return code
def outputCDSEffects(self, snp, exons, variant, cds_effects, r):
cds_len = sum([x.end - x.start for x in exons])
is_seleno = exons[0].transcript_id in self.mSeleno
for e in cds_effects:
assert len(e.codon_orig_seq) % 3 == 0
assert e.codon_orig_seq != e.codon_variant_seq
orig_codons = Genomics.translate(e.codon_orig_seq,
is_seleno=is_seleno)
if len(e.codon_variant_seq) % 3 == 0:
variant_codons = Genomics.translate(e.codon_variant_seq,
is_seleno=is_seleno)
else:
variant_codons = "!"
self.mOutfileCds.write("%s\n" % "\t".join(
(exons[0].transcript_id,
snp.chromosome,
"%i" % snp.pos,
snp.reference_base,
",".join(self.mVariantTypes),
variant.code,
variant.sequence,
"%i" % e.exon_id,
"%i" % len(exons),
self.getSubstitutionCode(orig_codons, variant_codons),
str(e.orig_seq),
str(e.codon_orig_seq),
orig_codons,
str(e.variant_seq),
str(e.codon_variant_seq),
variant_codons,
"%i" % r.cds_phase,
"%i" % r.cds_start,
"%i" % r.cds_end,
"%i" % cds_len)))
def buildCDSVariantsPerPosition(self, transcript_id, cds_variants, cds_len):
'''count the number of variants.
'''
variants_per_position = numpy.zeros(cds_len)
ncds_variants = len(cds_variants)
for v in cds_variants:
assert v.cds_end <= cds_len
variants_per_position[v.cds_start:v.cds_end] += 1
return variants_per_position
def buildIntronsVariantsPerPosition(self, transcript_id, variants, intron_seqs):
'''count the number of within introns
(variants have already been filtered to only include those that
affect splicing).
'''
s = self.mSize
lengths = [len(x) for x in intron_seqs]
# only count 2 * s positions within intron
variants_per_position = numpy.zeros(2 * s * len(lengths))
nvar = len(variants)
for v in variants:
offset = v.intron_id * 2 * s
l = lengths[v.intron_id]
start, end = v.nc_start, v.nc_end
if start < s:
assert end < s, "variant (%i) larger than mSize (%i)" % (
end, s)
elif l - end < s:
assert l - \
start < s, "variant (%i) larger than mSize (%i)" % (
l - start, s)
offset += s
start, end = l - end, l - start
else:
raise ValueError("count out of range")
variants_per_position[offset + start:offset + end] += 1
return variants_per_position
def getGenotype(self, variants, variants_per_position, counts):
'''compute the genotype and number of variants.
*variants_per_position* is a vector of variants affecting a position.
returns a genotype and the number of variants.
'''
max_variants_per_position = max(variants_per_position)
nvar = len(variants)
homo = [x.is_homozygous for x in variants]
nhomo = len([x for x in homo if x])
nhetero = len(homo) - nhomo
if nhomo == nvar and max_variants_per_position == 1:
# all are homozygous, one variant only
genotype = "O"
counts.is_homozygous += 1
counts.is_resolvable += 1
nvariants = 1
elif nhomo == 0 and nvar == 1 and max_variants_per_position == 1:
# one heterozygous position, rest is wild type
genotype = "W"
counts.is_heterozygous += 1
counts.is_resolvable += 1
nvariants = 1
elif nhomo == nvar - 1 and max_variants_per_position == 1:
# one heterozygous allowed if the rest are homozygous
genotype = "E"
counts.is_heterozygous += 1
counts.is_resolvable += 1
nvariants = 2
elif nvar == 1 and max_variants_per_position == 2:
# if there is only one heterozygous variant, which does not include
# the wild type
genotype = "E"
counts.is_heterozygous += 1
counts.is_resolvable += 1
nvariants = 2
elif nhetero == nvar and max_variants_per_position == 1:
# if all are heterozygous and one allele is always the wild type
# resolve towards one allele, though it might be ambiguous
genotype = "V"
counts.is_heterozygous += 1
counts.is_ambiguous += 1
nvariants = 1
elif max_variants_per_position == 1:
# if there is only one variant at each position but more than two
# heterozygous variants in total
# resolve towards two alleles
genotype = "v"
counts.is_heterozygous += 1
counts.is_ambiguous += 1
nvariants = 2
else:
genotype = "M"
counts.is_mixture += 1
counts.is_unresolvable += 1
nvariants = 2
return genotype, nvariants, max_variants_per_position
def buildCDSVariants(self,
transcript_id,
cds_variants,
reference_seq_na,
offset,
nvariants):
'''build variants for the coding sequence.
offset: offset to correct for starting frame != 0
'''
variant_cds_seqs = []
# the following code works with two variants at most
assert 0 < nvariants <= 2, "expected 1 or 2 variants, got %i" % nvariants
for x in range(nvariants):
variant_cds_seqs.append(list(reference_seq_na))
n = 0
for v in cds_variants:
# ignore variants at incomplete codons
if v.cds_start + offset < 0:
E.warn("skipping variant in %s in first out-frame codon: %s." %
(transcript_id, str(v)))
continue
if v.is_homozygous:
toupdate = list(range(nvariants))
else:
toupdate = (0,)
if v.code == "=":
assert len(v.variant_seq) == 1
assert reference_seq_na[v.cds_start + offset] == v.reference_seq.lower(), "transcript %s: base mismatch: %s != %s at %i, %s" %\
(transcript_id, reference_seq_na[
v.cds_start + offset], v.reference_seq.lower(), v.cds_start, str(v))
for x in toupdate:
variant_cds_seqs[x][v.cds_start + offset] = v.variant_seq
elif v.code == "+":
# indels are done without disrupting the frame
# prepend.
for x in toupdate:
variant_cds_seqs[x][
v.cds_start + offset] = v.variant_seq + variant_cds_seqs[x][v.cds_start + offset]
elif v.code == "-":
# indels are done without disrupting the frame
for x in toupdate:
for y in range(v.cds_start, v.cds_end):
variant_cds_seqs[x][y + offset] = ""
n += 1
if E.global_options.loglevel >= 10:
for x in range(nvariants):
Genomics.printPrettyAlignment(reference_seq_na,
variant_cds_seqs[x])
return variant_cds_seqs
def buildIntronVariants(self, transcript_id, splice_variants,
reference_seqs_na, nvariants):
'''build all intron variants.
Returns a list of variants. Each variant is a list of introns. Introns that are unchanged
are None.
The first entry in the list is the wildtype.
returns a list of variants.
'''
variant_intron_seqs = []
# the following code works with one or two variants
assert 0 < nvariants <= 2, "expected 1 or 2 variants, got %i" % nvariants
nintrons = len(reference_seqs_na)
for x in range(nvariants):
variant_intron_seqs.append([None for y in reference_seqs_na])
n = 0
for v in splice_variants:
E.debug("transcript_id=%s: splice=%s" % (transcript_id, str(v)))
if v.is_homozygous:
toupdate = list(range(nvariants))
else:
toupdate = (0,)
intron_id = v.intron_id
assert 0 <= intron_id < len(
reference_seqs_na), "intron id `%i` out of range" % intron_id
# instantiate intron sequence
for x in toupdate:
if variant_intron_seqs[x][intron_id] is None:
variant_intron_seqs[x][intron_id] = list(
reference_seqs_na[intron_id])
if v.code == "=":
assert len(v.variant_seq) == 1
assert reference_seqs_na[intron_id][v.nc_start] == v.reference_seq.lower(), \
"transcript %s: base mismatch: %s != %s at %i:%i" %\
(transcript_id, reference_seqs_na[v.nc_start], v.reference_seq.lower(),
v.intron_id, v.nc_start)
for x in toupdate:
variant_intron_seqs[x][intron_id][
v.nc_start] = v.variant_seq
elif v.code == "+":
# indels are done without disrupting the frame
# prepend to second residue
assert (v.nc_end - v.nc_start) == 2
for x in toupdate:
variant_intron_seqs[x][intron_id][v.nc_end] = v.variant_seq + \
variant_intron_seqs[x][intron_id][v.nc_end]
elif v.code == "-":
# indels are done without disrupting the frame
for x in toupdate:
for y in range(v.nc_start, v.nc_end):
variant_intron_seqs[x][intron_id][y] = ""
n += 1
return variant_intron_seqs
def countEffectsOnSplicing(self, variant_intron_seqs, reference_intron_seqs, min_intron_size=5):
'''collect all effects per intron
return a count for each intron.
'''
nintrons = len(reference_intron_seqs)
ncorrected_frames, nsynonymous, nnonsynonymous, ncanonical = 0, 0, 0, 0
ndisrupted, nunknown, nunchanged, nnovel = 0, 0, 0, 0
ncorrected_frames, nuncorrected_frames = 0, 0
nframeshifts, ninserted_codons, nunchanged_frames = 0, 0, 0
nnoncanonical = 0
codes = []
last_exon = nintrons + 1
for intron_id, reference_seq in enumerate(reference_intron_seqs):
reference_name, reference_seq5, reference_seq3 = Genomics.GetIntronType(
reference_seq)
e = 0
variant_seq = variant_intron_seqs[intron_id]
# process frameshift introns
if len(reference_seq) < min_intron_size:
nframeshifts += 1
if variant_seq is None:
variant_name, variant_seq5, variant_seq3 = reference_name, reference_seq5, reference_seq3
nunchanged_frames += 1
codes.append(".")
continue
variant_seq = "".join(variant_seq)
# there might be both sequences of mod 3 and not
fullseq = "".join(variant_seq)
if len(variant_seq) % 3 == 0:
# a fixed frame shift
ncorrected_frames += 1
# note that the inserted codon sequence might contian stops
# needs to be tested with the other exons as it might not be
# in frame.
code = "F"
ninserted_codons += len(variant_seq) // 3
else:
code = "P"
nuncorrected_frames += 1
# process real introns
else:
if reference_name != "unknown":
ncanonical += 1
else:
nnoncanonical += 1
if variant_seq is None:
variant_name, variant_seq5, variant_seq3 = reference_name, reference_seq5, reference_seq3
nunchanged += 1
codes.append(".")
continue
variant_seq = "".join(variant_seq)
variant_name, variant_seq5, variant_seq3 = Genomics.GetIntronType(
variant_seq)
code = self.getSpliceCode(reference_name, variant_name)
if code == "D":
last_exon = min(last_exon, intron_id)
ndisrupted += 1
elif code == "C":
nnovel += 1
elif code == "N":
nnonsynonymous += 1
elif code == "S":
nsynonymous += 1
elif code == "U":
nunknown += 1
codes.append(code)
return SplicingEffect._make((nintrons,
ncanonical,
nframeshifts,
nnoncanonical,
nunchanged_frames,
ncorrected_frames,
nuncorrected_frames,
nunchanged,
nsynonymous,
nnonsynonymous,
ndisrupted,
nnovel,
nunknown,
ninserted_codons,
"".join(codes),
last_exon))
def getTruncatedCodons(self, is_homozygous, stops, ncodons):
'''return codons that are truncated due to stop codons.
Note that if two variants are present and there is
a homozygous variant causing a stop codon, both variants
will have the same stop codon registered automatically.
return for each variant.
'''
if len(stops) == 0:
return 0, 0
# one stop - one variant
if len(stops) == 1:
# if homozygous: both allelles have the stop
if is_homozygous:
stop_min = stop_max = ncodons - stops[0]
else: # wildtype still present
stop_min, stop_max = 0, ncodons - stops[0]
else:
stop_min, stop_max = ncodons - max(stops), ncodons - min(stops)
return max(0, stop_min), max(0, stop_max)
def fixCDSTermini(self, variant_cds_seqs, contig, strand, start, end):
'''if the first codon in a sequence has been deleted, add
sequence from the UTR.
Not implemented yet - needs to take into account indels in
the UTR as well.
'''
return variant_cds_seqs
for vairant_cds_seq in variant_cds_seqs:
x = 0
# find first base that is not deleted
while x < len(variant_cds_seq) and variant_cds_seq[x] == "":
x += 1
# note: to be correct, this should take into account indels as
# well.
extra_seq = self.mFasta.getSequence(
contig, strand, start - x, start)
for xx in range(0, xx):
pass
def writeTable(self, outfile):
'''output summary for each transcript.
Output three tables;
1. mOutfileTranscripts: translation information
2. mOutfileSplicing: splicing
3. mOutfile: counts
'''
cds_counts = E.Counter()
splice_counts = E.Counter()
all_counts = E.Counter()
# TODO: the current code is not consistent when it comes
# to counting premature stop codons as it also includes
# the wild type as variant 0.
for transcript_id, exons in self.mExons.items():
###################################################
###################################################
###################################################
# sort out exons and get some chromosomal coordinates
exons = self.mExons[transcript_id]
exons.sort(key=lambda x: x.start)
cds_len = sum([x.end - x.start for x in exons])
ncodons = cds_len // 3
contig = exons[0].contig
lcontig = self.mFasta.getLength(contig)
strand = exons[0].strand
is_positive_strand = Genomics.IsPositiveStrand(strand)
# obtain cds sequences
reference_seq_na = GTF.toSequence(exons, self.mFasta).lower()
# obtain intron sequences
intron_intervals = GTF.toIntronIntervals(exons)
if not is_positive_strand:
intron_intervals = [(lcontig - end, lcontig - start)
for start, end in intron_intervals]
intron_intervals.reverse()
intron_sequences = [self.mFasta.getSequence(
contig, strand, x[0], x[1]).lower() for x in intron_intervals]
nintrons = len(intron_intervals)
is_seleno = transcript_id in self.mSeleno
# result variables - set to wildtype
all_genotype, all_nalleles, all_max_variants = "", 0, 0
cds_genotype, cds_nalleles, cds_max_variants = "", 0, 0
splice_genotype, splice_nalleles, splice_max_variants = "", 0, 0
cds_nvariant_positions, splice_nvariant_positions = 0, 0
variant_intron_seqs, splice_variants_per_position = [], []
variant_cds_seqs, cds_variants_per_position = [], []
exon2cds = []
if is_positive_strand:
frame = int(exons[0].frame)
cds_pos = frame
for x in exons:
exon2cds.append(cds_pos)
cds_pos += x.end - x.start
else:
frame = int(exons[-1].frame)
cds_pos = frame
for x in exons[::-1]:
exon2cds.append(cds_pos)
cds_pos += x.end - x.start
last_exon_start = exon2cds[-1]
exon2cds.append(cds_len)
if transcript_id in self.mTranscriptVariants:
variants = self.mTranscriptVariants[transcript_id]
E.debug("processing %s with %i cds effects and %i splice effects started" %
(transcript_id, len(variants.cds_variants), len(variants.splice_variants)))
# we should have some variants
assert len(variants.cds_variants) + \
len(variants.splice_variants) > 0
# correct for frame at start - truncate the reference_seq_na
if frame != 0:
E.debug("transcript_id %s - correcting frame %i" %
(transcript_id, frame))
reference_seq_na = reference_seq_na[frame:]
# all coordinates need to modified by this amount
offset = -frame
else:
offset = 0
reference_seq_aa = Genomics.translate(reference_seq_na,
is_seleno=is_seleno)
cds_nvariant_positions = len(variants.cds_variants)
splice_nvariant_positions = len(variants.splice_variants)
###################################################
###################################################
###################################################
# build coding sequence variants
if len(variants.cds_variants) > 0:
###################################################
###################################################
###################################################
# decide what variants to build
# 1. homozygous: 1 variant per position, all also flagged as homozygous
# 2. heterozygous + wildtype: only 1 variant per position, all flagged as heterozygous
# 3. heterozygous: 2 variants per position, but only if there is only one position modified
# 4: mixture: rest
cds_variants_per_position = self.buildCDSVariantsPerPosition(
transcript_id,
variants.cds_variants,
cds_len)
cds_genotype, cds_nalleles, cds_max_variants = self.getGenotype(
variants.cds_variants,
cds_variants_per_position,
cds_counts)
variant_cds_seqs = self.buildCDSVariants(transcript_id,
variants.cds_variants,
reference_seq_na,
offset,
cds_nalleles)
###################################################
###################################################
###################################################
# build intron variants
###################################################
###################################################
###################################################
# collect all intron sequences
if len(variants.splice_variants) > 0:
###################################################
###################################################
###################################################
# collect genotype and variants to build
splice_variants_per_position = self.buildIntronsVariantsPerPosition(
transcript_id,
variants.splice_variants,
intron_sequences)
splice_genotype, splice_nalleles, splice_max_variants = self.getGenotype(
variants.splice_variants,
splice_variants_per_position,
splice_counts)
variant_intron_seqs = self.buildIntronVariants(
transcript_id,
variants.splice_variants,
intron_sequences,
splice_nalleles)
###################################################
###################################################
###################################################
# collect overall genotype
all_genotype, all_nalleles, all_max_variants = self.getGenotype(
variants.cds_variants + variants.splice_variants,
numpy.concatenate(
(cds_variants_per_position, splice_variants_per_position)),
all_counts)
###################################################
###################################################
###################################################
# add the wild type at top of both cds and intron variants
#
# This is necessary so that stop codons originally present
# in the sequence will be taken into account.
#
# Note that this invalidates the cds_stop_min below.
#
# A better way would be to merge variants and only
# add the wild type if there is only variant allele.
#
# Then, treat the wildtype separately to get numbers for
# for the wildtype.
if len(variant_cds_seqs) == 0:
variant_cds_seqs = [list(reference_seq_na),
list(reference_seq_na)]
elif len(variant_cds_seqs) == 1:
if cds_genotype == "O":
# is homozygous - duplicate allele
variant_cds_seqs.append(variant_cds_seqs[0])
else:
# add wildtype
variant_cds_seqs[0:0] = [list(reference_seq_na), ]
if len(variant_intron_seqs) == 0:
variant_intron_seqs = [[None for x in range(nintrons)],
[None for x in range(nintrons)]]
elif len(variant_intron_seqs) == 1:
if splice_genotype == "O":
# is homozygous - duplicate allele
variant_intron_seqs.append(variant_intron_seqs[0])
else:
# add wildtype
variant_intron_seqs[0:0] = [
[None for x in range(nintrons)], ]
assert len(variant_cds_seqs) == 2
assert len(variant_intron_seqs) == 2
###################################################
###################################################
###################################################
# output information on splice/cds variants per transcript
# output also the wild type (variant_id = 0)
###################################################
cds_stops, splice_stops = [], []
for variant_id, variant_seq in enumerate(variant_intron_seqs):
variant_result = self.countEffectsOnSplicing(variant_seq,
intron_sequences)
self.mOutfileSplicing.write("%s\t%i\t%s\n" %
(transcript_id,
variant_id,
"\t".join(map(str, variant_result))))
splice_stops.append(exon2cds[variant_result.last_exon] // 3)
# estimate effect on protein coding sequence for each variant and
# output
for variant_id, variant_seq in enumerate(variant_cds_seqs):
variant_result = countEffectsOnTranscript(variant_seq,
reference_seq_na,
is_seleno=is_seleno)
s = "".join(variant_seq)
self.mOutfileTranscripts.write(
"%s\t%i\t%i\t%s\t%s\t%s\n" %
(transcript_id,
variant_id,
last_exon_start,
"\t".join(map(str, variant_result)),
"".join(s),
Genomics.translate(s, is_seleno=is_seleno),
))
cds_stops.append(variant_result.first_stop)
###################################################
###################################################
###################################################
# compute the shortest transcript variants
# due to splicing and cds changes separately and
# combined.
###################################################
if splice_nalleles > 0:
splice_stop_min, splice_stop_max = \
self.getTruncatedCodons(
splice_genotype == "O", splice_stops, ncodons)
else:
splice_stop_min, splice_stop_max = 0, 0
if cds_nalleles > 0:
cds_stop_min, cds_stop_max = \
self.getTruncatedCodons(
cds_genotype == "O", cds_stops, ncodons)
else:
cds_stop_min, cds_stop_max = 0, 0
# combine stops between cds and slice variants
# the two variants will have the overall maxima
all_stop_min, all_stop_max = (max(splice_stop_min, cds_stop_min),
max(splice_stop_max, cds_stop_max))
###################################################
###################################################
###################################################
# output stats per transcript
###################################################
outfile.write("%s\n" % "\t".join((
transcript_id,
"%i" % cds_len,
"%i" % ncodons,
"%i" % last_exon_start,
"%i" % cds_max_variants,
"%i" % cds_nvariant_positions,
"%s" % cds_genotype,
"%i" % cds_nalleles,
"%i" % cds_stop_min,
"%i" % cds_stop_max,
"%i" % splice_max_variants,
"%i" % splice_nvariant_positions,
"%s" % splice_genotype,
"%i" % splice_nalleles,
"%i" % splice_stop_min,
"%i" % splice_stop_max,
"%i" % all_max_variants,
"%i" % (cds_nvariant_positions + splice_nvariant_positions),
"%s" % all_genotype,
"%i" % all_nalleles,
"%i" % all_stop_min,
"%i" % all_stop_max,
)))
E.debug("processing %s with %i cds effects and %i splice effects finished" %
(transcript_id, cds_nvariant_positions, splice_nvariant_positions))
E.info("cds counts: %s" % (str(cds_counts)))
E.info("splice counts: %s" % (str(splice_counts)))
E.info("combined counts: %s" % (str(all_counts)))
class CounterContigs(Counter):
'''count variants across the genome per chromosome.'''
mHeader = ["genome_%s" % x for x in ("ntranscripts", "nused", "pos")]
def __init__(self, *args, **kwargs):
Counter.__init__(self, *args, **kwargs)
# create counter
self.mCountsSNPs = collections.defaultdict(int)
self.mCountsIndels = collections.defaultdict(int)
def update(self, snp):
'''update with snp.'''
if snp.reference_base == "*":
self.mCountsIndels[snp.chromosome] += 1
else:
self.mCountsSNPs[snp.chromosome] += 1
def writeTable(self, outfile):
outfile.write("contig\tsize\tnindels\tnsnps\n")
total_snps, total_indels, total_length = 0, 0, 0
for key in sorted(self.mCountsSNPs.keys()):
total_snps += self.mCountsSNPs[key]
total_indels += self.mCountsIndels[key]
total_length += self.mFasta.getLength(key)
outfile.write("\t".join((key,
"%i" % self.mFasta.getLength(key),
"%i" % self.mCountsIndels[key],
"%i" % self.mCountsSNPs[key])) + "\n")
outfile.write("\t".join(("total",
"%i" % total_length,
"%i" % total_indels,
"%i" % total_snps)) + "\n")
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id: snp2counts.py 2872 2010-03-03 10:21:13Z andreas $", usage=globals()["__doc__"])
parser.add_option("-g", "--genome-file", dest="genome_file", type="string",
help="filename with genome [default=%default].")
parser.add_option("-f", "--exons-file", dest="filename_exons", type="string",
help="filename with exon information (gtf formatted file) [default=%default].")
parser.add_option("-s", "--seleno-tsv-file", dest="filename_seleno", type="string",
help="filename of a list of transcript ids that are selenoproteins [default=%default].")
parser.add_option("-c", "--vcf-file", dest="filename_vcf", type="string",
help="vcf file to parse [default=%default].")
parser.add_option("-m", "--module", dest="modules", type="choice", action="append",
choices=(
"gene-counts", "transcript-effects", "contig-counts"),
help="modules to apply [default=%default].")
parser.add_option("-i", "--input-format", dest="input_format", type="choice",
choices=("pileup", "vcf"),
help="input format [default=%default].")
parser.add_option("--vcf-sample", dest="vcf_sample", type="string",
help="sample id in vcf file to analyse [default=%default].")
parser.set_defaults(
genome_file=None,
filename_exons=None,
filename_seleno=None,
filename_vcf=None,
modules=[],
input_format="pileup",
vcf_sample=None,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv, add_output_options=True)
ninput, nskipped, noutput = 0, 0, 0
################################
if options.genome_file:
fasta = IndexedFasta.IndexedFasta(options.genome_file)
else:
fasta = None
if options.filename_seleno:
seleno = set(
IOTools.readList(IOTools.openFile(options.filename_seleno, "r")))
else:
seleno = {}
# setup iterator
if options.input_format == "pileup":
iterator = pysam.Pileup.iterate(options.stdin)
elif options.input_format == "vcf":
if not options.vcf_sample:
raise ValueError(
"vcf format requires sample id (--vcf-sample) to be set")
if not options.filename_vcf:
raise ValueError(
"reading from vcf requires vcf filename (--filename-vcf) to be set)")
iterator = pysam.Pileup.iterate_from_vcf(
options.filename_vcf, options.vcf_sample)
################################
modules = []
for module in options.modules:
if module == "gene-counts":
if not options.filename_exons:
raise ValueError(
"please supply exon information (--filename-exons)")
modules.append(CounterGenes(options.filename_exons, fasta=fasta))
elif module == "transcript-effects":
if not options.filename_exons:
raise ValueError(
"please supply exon information (--filename-exons)")
modules.append(CounterTranscripts(options.filename_exons, fasta=fasta,
pattern=options.output_filename_pattern,
seleno=seleno))
elif module == "contig-counts":
modules.append(CounterContigs(fasta=fasta))
options.stdout.write("\t".join([x.getHeader() for x in modules]) + "\n")
for snp in iterator:
ninput += 1
# translate chromosome according to fasta
if fasta:
snp = snp._replace(chromosome=fasta.getToken(snp.chromosome))
for module in modules:
module.update(snp)
# if ninput > 1000: break
for module in modules:
module.writeTable(options.stdout)
E.info("ninput=%i, noutput=%i, nskipped=%i" % (ninput, noutput, nskipped))
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
py | 1a54538c050f7bb1d61c79c1d00703ea8232fbb9 | # coding=utf-8
__author__ = "Dimitrios Karkalousos"
from typing import Union
import torch
from torch import nn
from mridc import ifft2c, complex_mul, complex_conj
from .e2evn import SensitivityModel
from .rim.rim_block import RIMBlock
from ..data.transforms import center_crop_to_smallest
class CIRIM(nn.Module):
"""Cascades of RIM blocks."""
def __init__(
self,
recurrent_layer: str = "IndRNN",
conv_filters=None,
conv_kernels=None,
conv_dilations=None,
conv_bias=None,
recurrent_filters=None,
recurrent_kernels=None,
recurrent_dilations=None,
recurrent_bias=None,
depth: int = 2,
time_steps: int = 8,
conv_dim: int = 2,
loss_fn: Union[nn.Module, str] = "l1",
num_cascades: int = 1,
no_dc: bool = False,
keep_eta: bool = False,
use_sens_net: bool = False,
sens_chans: int = 8,
sens_pools: int = 4,
sens_normalize: bool = True,
sens_mask_type: str = "2D",
fft_type: str = "orthogonal",
output_type: str = "SENSE",
):
"""
Args:
recurrent_layer: Recurrent Layer selected from rnn_cells
conv_filters: Number of filters in the convolutional layers
conv_kernels: Kernel size in the convolutional layers
conv_dilations: Dilation in the convolutional layers
conv_biased: Whether to use bias in the convolutional layers
recurrent_filters: Number of filters in the recurrent layers
recurrent_kernels: Kernel size in the recurrent layers
recurrent_dilations: Dilation in the recurrent layers
recurrent_biased: Whether to use bias in the recurrent layers
depth: Number of layers in the network
time_steps: Number of time steps in the input
conv_dim: Dimension of the input
loss_fn: Loss function to use
num_cascades: Number of cascades
no_dc: Whether to remove the DC component
keep_eta: Whether to keep the eta term
use_sens_net: Whether to use the sensitivity network
sens_chans: Number of channels in the sensitivity network
sens_pools: Number of pools in the sensitivity network
sens_normalize: Whether to normalize the sensitivity network
sens_mask_type: Type of mask to use for the sensitivity network, 1D or 2D
fft_type: Type of FFT to use, data/orthogonal or numpy-like
output_type: Type of output to use, SENSE or RSS
"""
super(CIRIM, self).__init__()
# Initialize the cascades with RIM blocks
if recurrent_bias is None:
recurrent_bias = [True, True, False]
if recurrent_dilations is None:
recurrent_dilations = [1, 1, 0]
if recurrent_kernels is None:
recurrent_kernels = [1, 1, 0]
if recurrent_filters is None:
recurrent_filters = [64, 64, 0]
if conv_bias is None:
conv_bias = [True, True, False]
if conv_dilations is None:
conv_dilations = [1, 2, 1]
if conv_kernels is None:
conv_kernels = [5, 3, 3]
if conv_filters is None:
conv_filters = [64, 64, 2]
self.fft_type = fft_type
self.no_dc = no_dc
self.time_steps = time_steps
self.cascades = nn.ModuleList(
[
RIMBlock(
recurrent_layer=recurrent_layer,
conv_filters=conv_filters,
conv_kernels=conv_kernels,
conv_dilations=conv_dilations,
conv_bias=conv_bias,
recurrent_filters=recurrent_filters,
recurrent_kernels=recurrent_kernels,
recurrent_dilations=recurrent_dilations,
recurrent_bias=recurrent_bias,
depth=depth,
time_steps=self.time_steps,
conv_dim=conv_dim,
no_dc=self.no_dc,
fft_type=self.fft_type,
)
for _ in range(num_cascades)
]
)
# Initialize the sensitivity network if use_sens_net is True
self.use_sens_net = use_sens_net
if self.use_sens_net:
self.sens_net = SensitivityModel(
sens_chans, sens_pools, fft_type=self.fft_type, mask_type=sens_mask_type, normalize=sens_normalize
)
self.loss_fn = loss_fn
# Initialize data consistency term
self.dc_weight = nn.Parameter(torch.ones(1))
# Keep estimation through the cascades if keep_eta is True or re-estimate it if False.
self.keep_eta = keep_eta
# Initialize the output layer
self.output_type = output_type
# TODO: replace print with logger
print("No of parameters: {:,d}".format(self.get_num_params()))
def get_num_params(self):
"""
Get the number of parameters in the model.
Returns:
Number of parameters in the model.
"""
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def forward(
self,
masked_kspace: torch.Tensor,
sense: torch.Tensor,
mask: torch.Tensor,
eta: torch.Tensor = None,
hx: torch.Tensor = None,
target: torch.Tensor = None,
max_value: float = 1.0,
sigma: float = 1.0,
accumulate_loss: bool = False,
) -> torch.Tensor:
"""
Forward pass of the network.
Args:
masked_kspace: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], masked kspace data
sense: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], coil sensitivity maps
mask: torch.Tensor, shape [1, 1, n_x, n_y, 1], sampling mask
eta: torch.Tensor, shape [batch_size, n_x, n_y, 2], initial guess for eta
hx: torch.Tensor, shape [batch_size, n_x, n_y, 2], initial guess for hx
target: torch.Tensor, shape [batch_size, n_x, n_y, 2], target data
max_value: float, maximum value of the data
sigma: float, noise level
accumulate_loss: bool, accumulate loss or not
Returns:
eta: torch.Tensor, shape [batch_size, n_x, n_y, 2], estimated eta
hx: torch.Tensor, shape [batch_size, n_x, n_y, 2], estimated hx
loss: torch.Tensor, shape [1], loss value
"""
sense = self.sens_net(masked_kspace, mask) if self.use_sens_net and self.sens_net is not None else sense
pred = masked_kspace.clone()
# Accumulate loss over cascades
cascade_time_steps_loss = []
for i, cascade in enumerate(self.cascades):
# Forward pass through cascade
pred, hx = cascade(
pred, masked_kspace, sense, mask, eta, hx, sigma, keep_eta=False if i == 0 else self.keep_eta
)
# Accumulate loss over time steps
if accumulate_loss:
time_steps_loss = []
for p in pred:
if self.no_dc is False and self.keep_eta is False:
p = ifft2c(p, fft_type=self.fft_type)
if self.output_type == "SENSE":
p = complex_mul(p, complex_conj(sense)).sum(dim=1)
elif self.output_type == "RSS":
p = torch.sqrt((p ** 2).sum(dim=1))
else:
raise ValueError("Output type not supported.")
output = torch.view_as_complex(p)
target, output = center_crop_to_smallest(target, output)
loss = (
self.loss_fn(output.unsqueeze(1), target.unsqueeze(1), data_range=max_value) # type: ignore
if "ssim" in str(self.loss_fn).lower()
else self.loss_fn(output, target) # type: ignore
)
time_steps_loss.append(loss)
# Add weighted loss for each cascade. Loss is weighted for total number of time-steps on range 0-1.
_loss = [
x * torch.logspace(-1, 0, steps=self.time_steps).to(time_steps_loss[0]) for x in time_steps_loss
]
# Take average of all time-steps loss
cascade_time_steps_loss.append(sum(sum(_loss) / self.time_steps)) # type: ignore
# Take average of all cascades loss
if accumulate_loss:
loss = sum(list(cascade_time_steps_loss)) / len(self.cascades)
yield loss
else:
if isinstance(pred, list):
# Use the prediction of the last time-step.
pred = pred[-1].detach()
if self.no_dc is False and self.keep_eta is False:
pred = ifft2c(pred, fft_type=self.fft_type)
if self.output_type == "SENSE":
pred = complex_mul(pred, complex_conj(sense)).sum(dim=1)
elif self.output_type == "RSS":
pred = torch.sqrt((pred ** 2).sum(dim=1))
else:
raise ValueError("Output type not supported.")
pred = torch.view_as_complex(pred)
pred = torch.abs(pred / torch.max(torch.abs(pred)))
return pred
def inference(
self,
masked_kspace: torch.Tensor,
sense: torch.Tensor,
mask: torch.Tensor,
eta: torch.Tensor = None,
hx: torch.Tensor = None,
sigma: float = 1.0,
accumulate_estimates: bool = False,
) -> torch.Tensor:
"""
Inference step of the model.
Args:
masked_kspace: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], masked kspace data
sense: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], coil sensitivity maps
mask: torch.Tensor, shape [1, 1, n_x, n_y, 1], sampling mask
eta: torch.Tensor, shape [batch_size, n_x, n_y, 2], initial guess for eta
hx: torch.Tensor, shape [batch_size, n_x, n_y, 2], initial guess for hx
sigma: float, noise level
accumulate_estimates: bool, if True, accumulate estimates for all time-steps
Returns
-------
pred: torch.Tensor, shape [batch_size, n_x, n_y, 2], predicted kspace data
"""
sense = self.sens_net(masked_kspace, mask) if self.use_sens_net and self.sens_net is not None else sense
preds = []
pred = masked_kspace.clone()
for i, cascade in enumerate(self.cascades):
pred, hx = cascade(
pred, masked_kspace, sense, mask, eta, hx, sigma, keep_eta=False if i == 0 else self.keep_eta
)
if self.no_dc is False and self.keep_eta is False:
output = []
for p in pred:
p = ifft2c(p, fft_type=self.fft_type)
if self.output_type == "SENSE":
p = complex_mul(p, complex_conj(sense)).sum(dim=1)
elif self.output_type == "RSS":
p = torch.sqrt((p ** 2).sum(dim=1))
else:
raise ValueError("Output type not supported.")
output.append(p)
pred = output
if accumulate_estimates:
preds.append(pred)
pred = pred[-1].detach()
if accumulate_estimates:
yield preds
else:
return torch.view_as_complex(pred)
|
py | 1a54554cc5536e395e5693683335d1943da88836 | #!/usr/bin/env python
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
'''This file realize the function of run systembandwidth script.
for example this contain two part first run_script,
second is algorithm, this part is about how to judge the bottlenecks.
This test is using yardstick as a tool to begin test.'''
import os
import time
import uuid
import json
import utils.logger as log
from utils.parser import Parser as conf_parser
import utils.env_prepare.stack_prepare as stack_prepare
import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard
import utils.infra_setup.runner.docker_env as docker_env
# --------------------------------------------------
# logging configuration
# --------------------------------------------------
LOG = log.Logger(__name__).getLogger()
test_dict = {
"action": "runTestCase",
"args": {
"opts": {
"task-args": {}
},
"testcase": "netperf_bottlenecks"
}
}
testfile = os.path.basename(__file__)
testcase, file_format = os.path.splitext(testfile)
def env_pre(con_dic):
LOG.info("yardstick environment prepare!")
stack_prepare._prepare_env_daemon(True)
def config_to_result(test_config, test_result):
testdata = {}
parser_result = test_result["benchmark"]["data"]
test_result.update(test_config)
test_result.update(parser_result)
test_result["throughput"] = float(test_result["throughput"])
test_result["remote_cpu_util"] = float(test_result["remote_cpu_util"])
test_result["local_cpu_util"] = float(test_result["local_cpu_util"])
test_result["mean_latency"] = float(test_result["mean_latency"])
testdata["data_body"] = test_result
testdata["testcase"] = testcase
return testdata
def testcase_parser(out_file="yardstick.out", **parameter_info):
cmd = ('yardstick task start /home/opnfv/repos/yardstick/'
'samples/netperf_bottlenecks.yaml --output-file ' + out_file)
cmd = cmd + " --task-args " + '"' + str(parameter_info) + '"'
LOG.info("yardstick test cmd is: %s" % cmd)
return cmd
def do_test(test_config, Use_Dashboard, context_conf):
yardstick_container = docker_env.yardstick_info['container']
out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
cmd = testcase_parser(out_file=out_file, **test_config)
stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
LOG.info(stdout)
loop_value = 0
while loop_value < 60:
time.sleep(2)
loop_value = loop_value + 1
with open(out_file) as f:
data = json.load(f)
if data["status"] == 1:
LOG.info("yardstick run success")
break
elif data["status"] == 2:
LOG.error("yardstick error exit")
exit()
save_data = config_to_result(test_config, data['result'][1])
if Use_Dashboard is True:
DashBoard.dashboard_send_data(context_conf, save_data)
return save_data["data_body"]
def run(test_config):
con_dic = test_config["load_manager"]
Use_Dashboard = False
env_pre(None)
if test_config["contexts"]["yardstick_ip"] is None:
con_dic["contexts"]["yardstick_ip"] =\
conf_parser.ip_parser("yardstick_test_ip")
if "dashboard" in test_config["contexts"].keys():
if test_config["contexts"]["dashboard_ip"] is None:
test_config["contexts"]["dashboard_ip"] =\
conf_parser.ip_parser("dashboard")
LOG.info("Create Dashboard data")
Use_Dashboard = True
DashBoard.dashboard_system_bandwidth(test_config["contexts"])
data = {}
rx_pkt_a = con_dic['scenarios']['rx_pkt_sizes'].split(',')
tx_pkt_a = con_dic['scenarios']['tx_pkt_sizes'].split(',')
data["rx_pkt_sizes"] = rx_pkt_a
data["tx_pkt_sizes"] = tx_pkt_a
con_dic["result_file"] = os.path.dirname(
os.path.abspath(__file__)) + "/test_case/result"
cur_role_result = 1
pre_role_result = 1
pre_reply = {}
data_return = {}
data_max = {}
data_return["throughput"] = 1
for test_x in data["tx_pkt_sizes"]:
data_max["throughput"] = 1
bandwidth_tmp = 1
for test_y in data["rx_pkt_sizes"]:
case_config = {
"tx_msg_size": float(test_x),
"rx_msg_size": float(test_y),
"test_time": con_dic['scenarios']['test_times'],
"pod_info": conf_parser.bottlenecks_config["pod_info"]
}
data_reply = do_test(case_config, Use_Dashboard,
test_config["contexts"])
conf_parser.result_to_file(data_reply, test_config["out_file"])
bandwidth = data_reply["throughput"]
if (data_max["throughput"] < bandwidth):
data_max = data_reply
if (abs(bandwidth_tmp - bandwidth) / bandwidth_tmp < 0.025):
LOG.info("this group of data has reached top output")
break
else:
pre_reply = data_reply
bandwidth_tmp = bandwidth
cur_role_result = float(pre_reply["throughput"])
if (abs(pre_role_result - cur_role_result) / pre_role_result < 0.025):
LOG.info("The performance increases slowly")
if data_return["throughput"] < data_max["throughput"]:
data_return = data_max
pre_role_result = cur_role_result
LOG.info("Find bottlenecks of this config")
LOG.info("The max data is %d", data_return["throughput"])
return data_return
|
py | 1a54565143c0eb5f8725ff2f5c989f54ff7fb148 | # -*- coding: utf-8 -*-
import unittest
from openprocurement.api.constants import SANDBOX_MODE
from openprocurement.api.tests.base import snitch
from openprocurement.tender.belowthreshold.tests.base import test_organization
from openprocurement.tender.belowthreshold.tests.contract import (
TenderContractResourceTestMixin,
TenderContractDocumentResourceTestMixin,
)
from openprocurement.tender.limited.tests.base import (
BaseTenderContentWebTest,
test_lots,
test_tender_data,
test_tender_negotiation_data,
test_tender_negotiation_quick_data,
)
from openprocurement.tender.limited.tests.contract_blanks import (
# TenderNegotiationQuickAccelerationTest
create_tender_contract_negotiation_quick,
# TenderNegotiationLot2ContractResourceTest
sign_second_contract,
create_two_contract,
# TenderNegotiationLotContractResourceTest
lot_items,
lot_award_id_change_is_not_allowed,
activate_contract_cancelled_lot,
# TenderNegotiationContractResourceTest
patch_tender_negotiation_contract,
tender_negotiation_contract_signature_date,
items,
# TenderContractResourceTest
create_tender_contract,
patch_tender_contract,
tender_contract_signature_date,
award_id_change_is_not_allowed,
create_tender_contract_document,
patch_tender_contract_document,
put_tender_contract_document,
)
from openprocurement.tender.belowthreshold.tests.contract_blanks import (
patch_tender_contract_value_vat_not_included,
patch_tender_contract_value,
)
class TenderContractResourceTest(BaseTenderContentWebTest, TenderContractResourceTestMixin):
initial_status = "active"
initial_data = test_tender_data
initial_bids = None # test_bids
def create_award(self):
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
}
},
)
award = response.json["data"]
self.award_id = award["id"]
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{"data": {"status": "active"}},
)
def setUp(self):
super(TenderContractResourceTest, self).setUp()
self.create_award()
test_create_tender_contract = snitch(create_tender_contract)
test_patch_tender_contract = snitch(patch_tender_contract)
test_patch_tender_contract_value = snitch(patch_tender_contract_value)
test_tender_contract_signature_date = snitch(tender_contract_signature_date)
test_award_id_change_is_not_allowed = snitch(award_id_change_is_not_allowed)
class TenderContractVATNotIncludedResourceTest(BaseTenderContentWebTest, TenderContractResourceTestMixin):
initial_status = "active"
initial_data = test_tender_data
initial_bids = None
def create_award(self):
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": False},
}
},
)
self.award_id = response.json["data"]["id"]
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{"data": {"status": "active"}},
)
def setUp(self):
super(TenderContractVATNotIncludedResourceTest, self).setUp()
self.create_award()
test_patch_tender_contract_value_vat_not_included = snitch(patch_tender_contract_value_vat_not_included)
class TenderNegotiationContractResourceTest(TenderContractResourceTest):
initial_data = test_tender_negotiation_data
stand_still_period_days = 10
test_patch_tender_contract = snitch(patch_tender_negotiation_contract)
test_patch_tender_contract_value = snitch(patch_tender_contract_value)
test_tender_contract_signature_date = snitch(tender_negotiation_contract_signature_date)
test_items = snitch(items)
class TenderNegotiationContractVATNotIncludedResourceTest(TenderContractVATNotIncludedResourceTest):
initial_data = test_tender_negotiation_data
class TenderNegotiationLotContractResourceTest(TenderNegotiationContractResourceTest):
initial_data = test_tender_negotiation_data
stand_still_period_days = 10
def create_award(self):
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": self.initial_data["items"]}},
)
# create lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": test_lots[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot1 = response.json["data"]
self.lot1 = lot1
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": [{"relatedLot": lot1["id"]}]}},
)
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
"lotID": lot1["id"],
}
},
)
award = response.json["data"]
self.award_id = award["id"]
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{"data": {"status": "active"}},
)
test_items = snitch(lot_items)
test_award_id_change_is_not_allowed = snitch(lot_award_id_change_is_not_allowed)
test_activate_contract_cancelled_lot = snitch(activate_contract_cancelled_lot)
class TenderNegotiationLot2ContractResourceTest(BaseTenderContentWebTest):
initial_data = test_tender_negotiation_data
stand_still_period_days = 10
def setUp(self):
super(TenderNegotiationLot2ContractResourceTest, self).setUp()
self.create_award()
def create_award(self):
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": self.initial_data["items"] * 2}},
)
# create lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": test_lots[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot1 = response.json["data"]
self.lot1 = lot1
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": test_lots[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot2 = response.json["data"]
self.lot2 = lot2
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": [{"relatedLot": lot1["id"]}, {"relatedLot": lot2["id"]}]}},
)
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
"lotID": lot1["id"],
}
},
)
award = response.json["data"]
self.award1_id = award["id"]
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award1_id, self.tender_token),
{"data": {"status": "active"}},
)
# Create another award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
"lotID": lot2["id"],
}
},
)
award = response.json["data"]
self.award2_id = award["id"]
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award2_id, self.tender_token),
{"data": {"status": "active"}},
)
test_sign_second_contract = snitch(sign_second_contract)
test_create_two_contract = snitch(create_two_contract)
class TenderNegotiationQuickContractResourceTest(TenderNegotiationContractResourceTest):
initial_data = test_tender_negotiation_quick_data
stand_still_period_days = 5
class TenderNegotiationQuickLotContractResourceTest(TenderNegotiationLotContractResourceTest):
initial_data = test_tender_negotiation_quick_data
stand_still_period_days = 5
class TenderNegotiationQuickAccelerationTest(BaseTenderContentWebTest):
initial_data = test_tender_negotiation_quick_data
stand_still_period_days = 5
accelerator = "quick,accelerator=172800" # 5 days=432000 sec; 432000/172800=2.5 sec
time_sleep_in_sec = 3 # time which reduced
def create_award(self):
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"suppliers": [test_organization], "status": "pending"}},
)
award = response.json["data"]
self.award_id = award["id"]
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{
"data": {
"status": "active",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
}
},
)
def setUp(self):
super(TenderNegotiationQuickAccelerationTest, self).setUp()
if SANDBOX_MODE:
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"procurementMethodDetails": self.accelerator}},
)
self.assertEqual(response.status, "200 OK")
self.create_award()
test_create_tender_contract_negotiation_quick = snitch(create_tender_contract_negotiation_quick)
class TenderNegotiationQuickLotAccelerationTest(TenderNegotiationQuickAccelerationTest):
initial_data = test_tender_negotiation_quick_data
stand_still_period_days = 5
accelerator = "quick,accelerator=172800" # 5 days=432000 sec; 432000/172800=2.5 sec
time_sleep_in_sec = 3 # time which reduced
def create_award(self):
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": self.initial_data["items"] * 2}},
)
# create lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": test_lots[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot1 = response.json["data"]
self.lot1 = lot1
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": [{"relatedLot": lot1["id"]}]}},
)
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
"lotID": lot1["id"],
}
},
)
award = response.json["data"]
self.award_id = award["id"]
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{"data": {"status": "active"}},
)
class TenderNegotiationAccelerationTest(TenderNegotiationQuickAccelerationTest):
stand_still_period_days = 10
time_sleep_in_sec = 6
class TenderContractDocumentResourceTest(BaseTenderContentWebTest, TenderContractDocumentResourceTestMixin):
initial_status = "active"
initial_bids = None
def create_award(self):
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"suppliers": [test_organization], "status": "pending"}},
)
award = response.json["data"]
self.award_id = award["id"]
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{
"data": {
"status": "active",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
}
},
)
def setUp(self):
super(TenderContractDocumentResourceTest, self).setUp()
self.create_award()
response = self.app.get("/tenders/{}/contracts".format(self.tender_id))
self.contract_id = response.json["data"][0]["id"]
test_create_tender_contract_document = snitch(create_tender_contract_document)
test_patch_tender_contract_document = snitch(patch_tender_contract_document)
test_put_tender_contract_document = snitch(put_tender_contract_document)
class TenderContractNegotiationDocumentResourceTest(TenderContractDocumentResourceTest):
initial_data = test_tender_negotiation_data
class TenderContractNegotiationLotDocumentResourceTest(TenderContractDocumentResourceTest):
initial_data = test_tender_negotiation_data
def create_award(self):
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": self.initial_data["items"] * 2}},
)
# create lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": test_lots[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot1 = response.json["data"]
self.lot1 = lot1
self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"items": [{"relatedLot": lot1["id"]}]}},
)
# Create award
response = self.app.post_json(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
{
"data": {
"suppliers": [test_organization],
"status": "pending",
"qualified": True,
"value": {"amount": 469, "currency": "UAH", "valueAddedTaxIncluded": True},
"lotID": lot1["id"],
}
},
)
award = response.json["data"]
self.award_id = award["id"]
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{"data": {"status": "active"}},
)
class TenderContractNegotiationQuickDocumentResourceTest(TenderContractNegotiationDocumentResourceTest):
initial_data = test_tender_negotiation_quick_data
class TenderContractNegotiationQuickLotDocumentResourceTest(TenderContractNegotiationLotDocumentResourceTest):
initial_data = test_tender_negotiation_quick_data
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderContractResourceTest))
suite.addTest(unittest.makeSuite(TenderContractDocumentResourceTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
|
py | 1a54573fc28914402f181c59a2e6a47b6c6ed509 | """expand content column
Revision ID: 6dd556a95d2b
Revises: 599d269adf7f
Create Date: 2020-10-19 18:21:14.384304
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '6dd556a95d2b'
down_revision = '599d269adf7f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('twitch_command_text', 'content',
existing_type=mysql.VARCHAR(collation='utf8_bin', length=200),
type_=sa.String(length=4000, collation='utf8_bin'),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('twitch_command_text', 'content',
existing_type=sa.String(length=4000, collation='utf8_bin'),
type_=mysql.VARCHAR(collation='utf8_bin', length=200),
existing_nullable=True)
# ### end Alembic commands ###
|
py | 1a545806877845db84f3423356a16a7e165cf99a | import csv
import logging
import zipfile
from sqlalchemy.orm import sessionmaker
from opennem.db import db_connect
from opennem.utils.pipelines import check_spider_pipeline
logger = logging.getLogger(__name__)
class TableRecordSplitter(object):
@check_spider_pipeline
def process_item(self, item, spider):
if "tables" not in item:
logger.error(item)
raise Exception("No tables passed to pipeline")
tables = item["tables"]
table = tables.pop()
records = table["records"]
for record in records:
yield record
class UnzipSingleFilePipeline(object):
@check_spider_pipeline
def process_item(self, item, spider):
if "body_stream" not in item:
return item
rs = item["body_stream"]
content = ""
with zipfile.ZipFile(rs) as zf:
zip_files = zf.namelist()
if len(zip_files) == 1:
content = zf.open(zip_files[0])
return {"file_handle": content, **item}
if len(zip_files) != 1:
raise Exception(
"Zero or more than one file in zip file. Have {}".format(
len(zip_files)
)
)
class ReadStringHandle(object):
@check_spider_pipeline
def process_item(self, item, spider):
if "file_handle" not in item:
return item
fh = item["file_handle"]
content = fh.read()
if type(content) is bytes:
content = content.decode("utf-8")
return {"content": content, **item}
class ExtractCSV(object):
@check_spider_pipeline
def process_item(self, item, spider):
if not item:
logger.error("No item to parse")
return None
if "content" not in item:
logger.error("No content in item to parse")
return item
content = item["content"]
del item["content"]
item["tables"] = {}
table = {"name": None}
content_split = content.splitlines()
datacsv = csv.reader(content_split)
for row in datacsv:
if not row or type(row) is not list or len(row) < 1:
continue
record_type = row[0]
if record_type == "C":
# @TODO csv meta stored in table
if table["name"] is not None:
table_name = table["name"]
if table_name in item["tables"]:
item["tables"][table_name]["records"] += table[
"records"
]
else:
item["tables"][table_name] = table
elif record_type == "I":
if table["name"] is not None:
table_name = table["name"]
if table_name in item["tables"]:
item["tables"][table_name]["records"] += table[
"records"
]
else:
item["tables"][table_name] = table
table = {}
table["name"] = "{}_{}".format(row[1], row[2])
table["fields"] = fields = row[4:]
table["records"] = []
elif record_type == "D":
values = row[4:]
record = dict(zip(table["fields"], values))
table["records"].append(record)
return item
class DatabaseStore(object):
def __init__(self):
engine = db_connect()
self.session = sessionmaker(bind=engine)
|
py | 1a54589f077021e8ae8b32e11fdde2953fcd57a2 | """LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
import cStringIO as StringIO
import struct
import lcmt_viewer_link_data
class lcmt_viewer_load_robot(object):
__slots__ = ["num_links", "link"]
def __init__(self):
self.num_links = 0
self.link = []
def encode(self):
buf = StringIO.StringIO()
buf.write(lcmt_viewer_load_robot._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">i", self.num_links))
for i0 in range(self.num_links):
assert self.link[i0]._get_packed_fingerprint() == lcmt_viewer_link_data.lcmt_viewer_link_data._get_packed_fingerprint()
self.link[i0]._encode_one(buf)
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = StringIO.StringIO(data)
if buf.read(8) != lcmt_viewer_load_robot._get_packed_fingerprint():
raise ValueError("Decode error")
return lcmt_viewer_load_robot._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = lcmt_viewer_load_robot()
self.num_links = struct.unpack(">i", buf.read(4))[0]
self.link = []
for i0 in range(self.num_links):
self.link.append(lcmt_viewer_link_data.lcmt_viewer_link_data._decode_one(buf))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if lcmt_viewer_load_robot in parents: return 0
newparents = parents + [lcmt_viewer_load_robot]
tmphash = (0x739e6927d8bcec39+ lcmt_viewer_link_data.lcmt_viewer_link_data._get_hash_recursive(newparents)) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if lcmt_viewer_load_robot._packed_fingerprint is None:
lcmt_viewer_load_robot._packed_fingerprint = struct.pack(">Q", lcmt_viewer_load_robot._get_hash_recursive([]))
return lcmt_viewer_load_robot._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
|
py | 1a545965496ffb70a9a3544c2115b1afecf43a54 | from flask import Blueprint, Response
export = Blueprint('ExportApp', __name__)
|
py | 1a545996a97c1d1f85ea27a89ce42de4ef3a6e28 | import argparse
import os
from PIL import Image
import numpy as np
import torch
from torchvision.transforms import Compose, Resize, ToTensor, Normalize
# import lung_segmentation.importAndProcess as iap
import importAndProcess as iap
from ..models import model as model
from ..models.unet_models import unet11, unet16
def save_mask(mask, out_dir, filename):
filter = np.asarray(np.argmax(mask, axis=0))
filter = (filter > 0).astype('uint8')
filter = filter*255
filter = np.stack((filter, filter, filter))
pil = Image.fromarray(filter)
pil = pil.save(f"{out_dir}/{filename}")
OUTDIR = '/home/dxtien/dxtien_research/COVID/CXR8_Segmentation'
parser = argparse.ArgumentParser()
parser.add_argument('img_path')
parser.add_argument('-m', '--model', choices=['unet11', 'unet16', 'resnet'], default='unet16')
parser.add_argument('-r', '--resume-from', help='resume from a specific savepoint', required=True)
parser.add_argument('-t', '--input-type', choices=['dicom', 'png'], default='dicom')
parser.add_argument('--non-montgomery', action='store_true', help='toggle this flag if you are working on a non-montgomery dataset')
parser.add_argument('--no-normalize', action='store_true')
args = parser.parse_args()
normalize = Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
if args.model == 'resnet':
model = model.segmentNetwork().cuda()
resize_dim = (400, 400)
convert_to = 'L'
elif args.model == 'unet11':
model = unet11(out_filters=3).cuda()
resize_dim = (224, 224)
convert_to = 'RGB'
elif args.model == 'unet16':
model = unet16(out_filters=3).cuda()
resize_dim = (224, 224)
convert_to = 'RGB'
if args.no_normalize:
transforms = Compose([Resize(resize_dim),ToTensor()])
else:
transforms = Compose([Resize(resize_dim),ToTensor(),normalize])
convert_to = 'RGB'
if args.input_type == 'dicom':
dataset = iap.DicomSegment(args.img_path, transforms, convert_to)
elif args.input_type == 'png' and args.non_montgomery:
#dataset = iap.LungTest(args.img_path, transforms, convert_to)
dataset = iap.MyLungTest(args.img_path, transforms, convert_to)
elif args.input_type == 'png':
dataset = iap.lungSegmentDataset(
os.path.join(args.img_path, "CXR_png"),
os.path.join(args.img_path, "ManualMask/leftMask/"),
os.path.join(args.img_path, "ManualMask/rightMask/"),
imagetransform=transforms,
labeltransform=Compose([Resize((224, 224)),ToTensor()]),
convert_to='RGB',
)
dataloader = torch.utils.data.DataLoader(dataset,batch_size=1,shuffle=False)
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(args.resume_from))
#show = iap.visualize(dataset)
with torch.no_grad():
for i, sample in enumerate(dataloader):
img = torch.autograd.Variable(sample['image']).cuda()
mask = model(img)
# if not args.non_montgomery:
# show.ImageWithGround(i,True,True,save=True)
# show.ImageWithMask(i, sample['filename'][0], mask.squeeze().cpu().numpy(), True, True, save=True)
mask_np = mask.squeeze().cpu().numpy()
filename = sample['filename']
filename = filename.split('/')[-1]
filename = filename[:-4]
save_mask(mask_np, OUTDIR, filename=filename+'_mask.png')
|
py | 1a5459ef9410ac8c969168479451987c6871c917 | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define the grad rules of neural network related operations."""
import os
from mindspore.ops.primitive import constexpr
from mindspore.common.tensor import Tensor
from mindspore.ops.operations import nn_ops as nps
from .grad_base import bprop_getters
from .. import functional as F
from .. import operations as P
from ...common import dtype as mstype
from ..composite.multitype_ops.zeros_like_impl import zeros_like
from ..operations import _grad_ops as G
from ..operations import _inner_ops as inner
from ... import context
env_force_bprop_seq = os.getenv("ENV_FORCE_BPROP_SEQ")
@bprop_getters.register(P.BiasAdd)
def get_bprop_bias_add(self):
"""Grad definition for `BiasAdd` operation."""
bias_grad = G.BiasAddGrad(self.data_format)
def bprop(x, w, out, dout):
return dout, bias_grad(dout)
return bprop
@bprop_getters.register(P.Conv2D)
def get_bprop_conv2d(self):
"""Grad definition for `Conv2D` operation."""
input_grad = P.Conv2DBackpropInput(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
filter_grad = G.Conv2DBackpropFilter(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(dout, w, get_shape(x))
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(dout, x, get_shape(w))
return dx, dw
return bprop
@bprop_getters.register(nps.Conv3D)
def get_bprop_conv3d(self):
"""Grad definition for `Conv3D` operation."""
input_grad = nps.Conv3DBackpropInput(
self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
filter_grad = G.Conv3DBackpropFilter(
self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,
pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(w, dout, get_shape(x))
dw = filter_grad(x, dout, get_shape(w))
return dx, dw
return bprop
@bprop_getters.register(nps.Conv3DTranspose)
def get_bprop_conv3d_transpose(self):
"""Grad definition for `Conv3DTranspose` operation."""
stride = (self.stride[2], self.stride[3], self.stride[4])
dilation = (self.dilation[2], self.dilation[3], self.dilation[4])
input_grad = nps.Conv3D(
out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode="pad",
pad=self.pad_list, stride=stride, dilation=dilation, group=self.group, data_format=self.data_format
)
filter_grad = G.Conv3DBackpropFilter(
out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode="pad",
pad=self.pad_list, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format
)
def bprop(x, w, out, dout):
dx = input_grad(dout, w)
dw = filter_grad(dout, x, F.shape(w))
return dx, dw, zeros_like(out)
return bprop
@bprop_getters.register(inner.ExtractImagePatches)
def get_bprop_extract_image_patches(self):
"""Grad definition for `ExtractImagePatches` operation."""
get_shape = P.Shape()
reshape = P.Reshape()
extract_image_patches = inner.ExtractImagePatches(ksizes=self.ksizes,
strides=self.strides,
rates=self.rates,
padding=self.padding)
concat = P.Concat(axis=-1)
expand_dims = P.ExpandDims()
scatter_nd = P.ScatterNd()
dtype = P.DType()
fill = P.Fill()
slice_op = P.Slice()
transpose = P.Transpose()
cast = P.Cast()
matmul = P.MatMul()
_, _, ksizes_row, ksizes_col = self.ksizes
def bprop(x, out, dout):
x_shape = get_shape(x)
x_batch, x_depth, x_row, x_col = x_shape
x_indices_num = x_row * x_col + 1
x_idx = cast(F.tuple_to_array(range(1, x_indices_num)), mstype.float32)
x_idx = reshape(x_idx, (1, 1, x_row, x_col))
x_idx_patch = cast(extract_image_patches(x_idx), mstype.int32)
x_idx_patch = transpose(x_idx_patch, (0, 2, 3, 1))
out_shape = get_shape(out)
_, _, out_row, out_col = out_shape
out_indices_num = out_row * out_col * ksizes_row * ksizes_col
out_idx = F.tuple_to_array(range(out_indices_num))
out_idx = reshape(out_idx, (1, out_row, out_col, ksizes_row * ksizes_col))
idx_tensor = concat((expand_dims(x_idx_patch, -1), expand_dims(out_idx, -1)))
idx_tensor = reshape(idx_tensor, (-1, 2))
sp_shape = (x_indices_num, out_indices_num)
sp_tensor = scatter_nd(idx_tensor, fill(dtype(dout), (out_indices_num,), 1), sp_shape)
sp_tensor = slice_op(sp_tensor, (1, 0), (x_indices_num - 1, out_indices_num))
grad = transpose(dout, (0, 2, 3, 1))
grad = reshape(grad, (x_batch, out_row, out_col, ksizes_row, ksizes_col, x_depth))
grad = transpose(grad, (1, 2, 3, 4, 0, 5))
grad = reshape(grad, (-1, x_batch * x_depth))
jac = matmul(sp_tensor, grad)
dx = reshape(jac, (x_row, x_col, x_batch, x_depth))
dx = transpose(dx, (2, 3, 0, 1))
return (dx,)
return bprop
@bprop_getters.register(P.DepthwiseConv2dNative)
def get_bprop_depthwise_conv2d_native(self):
"""Grad definition for `DepthwiseConv2dNative` operation."""
input_grad = G.DepthwiseConv2dNativeBackpropInput(
self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,
self.dilation, self.group
)
filter_grad = G.DepthwiseConv2dNativeBackpropFilter(
self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,
self.dilation, self.group
)
get_shape = P.Shape()
def bprop(x, w, out, dout):
dx = input_grad(get_shape(x), w, dout)
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(x, get_shape(w), dout)
return dx, dw
return bprop
@bprop_getters.register(P.MaxPoolWithArgmax)
def get_bprop_max_pool_with_argmax(self):
"""Grad definition for `MaxPoolWithArgmax` operation."""
maxpool_grad = G.MaxPoolGradWithArgmax(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x, out, dout):
dx = maxpool_grad(x, dout[0], out[1])
return (dx,)
return bprop
@bprop_getters.register(G.MaxPoolGrad)
def get_bprop_max_pool_grad_grad(self):
"""Grad definition for `MaxPoolGrad` operation."""
maxpool_grad_grad = G.MaxPoolGradGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
dx2 = zeros_like(x2)
dgrad = maxpool_grad_grad(x1, x2, dout)
return (dx1, dx2, dgrad)
return bprop
@bprop_getters.register(G.MaxPoolGradGrad)
def get_bprop_max_pool_grad_grad_grad(self):
"""Grad definition for `MaxPoolGradGrad` operation."""
maxpool_grad = G.MaxPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
dx2 = zeros_like(x2)
dgrad = maxpool_grad(x1, x2, dout)
return (dx1, dx2, dgrad)
return bprop
@bprop_getters.register(P.MaxPool)
def get_bprop_max_pool_grad(self):
"""Grad definition for `MaxPool` operation."""
maxpool_grad = G.MaxPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop(x, out, dout):
dx = maxpool_grad(x, out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.MaxPool3D)
def get_bprop_max_pool3d_grad(self):
"""Grad definition for `MaxPool3D` operation."""
max_pool3d_grad = G.MaxPool3DGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.data_format)
def bprop(x, out, dout):
dx = max_pool3d_grad(x, out, dout)
return (dx,)
return bprop
@bprop_getters.register(G.MaxPool3DGrad)
def get_bprop_max_pool3d_grad_grad(self):
"""Grad definition for `MaxPool3Grad` operation."""
max_pool3d_grad_grad = G.MaxPool3DGradGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.data_format)
def bprop(x, y, grad, out, dout):
dgrad = max_pool3d_grad_grad(x, y, dout)
return zeros_like(x), zeros_like(y), dgrad
return bprop
@bprop_getters.register(G.MaxPool3DGradGrad)
def get_bprop_max_pool3d_grad_grad_grad(self):
"""Grad definition for `MaxPool3GradGrad` operation."""
max_pool3d_grad = G.MaxPool3DGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.data_format)
def bprop(x, y, grad, out, dout):
dgrad = max_pool3d_grad(x, y, dout)
return zeros_like(x), zeros_like(y), dgrad
return bprop
@bprop_getters.register(P.AvgPool)
def get_bprop_avg_pool_grad(self):
"""Grad definition for `AvgPool` operation."""
avgpool_grad = G.AvgPoolGrad(
kernel_size=self.kernel_size,
strides=self.strides,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop(x, out, dout):
dx = avgpool_grad(x, out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.DropoutGenMask)
def get_bprop_dropout_gen_mask(self):
"""Grad definition for `DropoutGenMask` operation."""
def bprop(shape, keep_prob, out, dout):
return (zeros_like(shape), zeros_like(keep_prob))
return bprop
@bprop_getters.register(P.DropoutDoMask)
def get_bprop_dropout_do_mask(self):
"""Grad definition for `DropoutDoMask` operation."""
do_mask = P.DropoutDoMask()
def bprop(x, y, keep_prob, out, dout):
return (do_mask(dout, y, keep_prob), zeros_like(y), zeros_like(keep_prob))
return bprop
@bprop_getters.register(P.Mish)
def get_bprop_mish(self):
"""Grad definition for `Mish` operation."""
tanh = P.Tanh()
tanh_grad = G.TanhGrad()
softplus = P.Softplus()
softplus_grad = G.SoftplusGrad()
def bprop(x, out, dout):
dx1 = tanh(softplus(x))
dx2 = softplus_grad(tanh_grad(dx1, x * dout), x)
dx = (dx1 * dout + dx2)
return (dx,)
return bprop
@bprop_getters.register(P.SeLU)
def get_bprop_selu(self):
"""Grad definition for `SeLU` operation."""
scale = 1.0507009873554804934193349852946
elu_grad = G.EluGrad()
def bprop(x, out, dout):
dx = elu_grad(dout, out) * scale
return (dx,)
return bprop
@bprop_getters.register(P.MulNoNan)
def get_bprop_mul_no_nan(self):
"""Grad definition for `MulNoNan` operation."""
mul_no_nan = P.MulNoNan()
reduce_sum = P.ReduceSum()
reshape = P.Reshape()
def bprop(x, y, out, dout):
x_shape = F.shape(x)
y_shape = F.shape(y)
dx = mul_no_nan(dout, y)
dy = mul_no_nan(x, dout)
broadcast_x, broadcast_y = F.broadcast_gradient_args(x_shape, y_shape)
if broadcast_x != ():
dx = reshape(reduce_sum(dx, broadcast_x), x_shape)
if broadcast_y != ():
dy = reshape(reduce_sum(dy, broadcast_y), y_shape)
return dx, dy
return bprop
@bprop_getters.register(P.ReLU)
def get_bprop_relu(self):
"""Grad definition for `ReLU` operation."""
input_grad = G.ReluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, out)
return (dx,)
return bprop
@bprop_getters.register(G.ReluGrad)
def get_bprop_relu_grad(self):
"""Grad definition for `ReLUGrad` operation."""
input_grad = G.ReluGrad()
def bprop(grad, y, out, dout):
dgrad = input_grad(dout, y)
return dgrad, zeros_like(y)
return bprop
@bprop_getters.register(P.ReLU6)
def get_bprop_relu6(self):
"""Grad definition for `ReLU6` operation."""
input_grad = G.ReLU6Grad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.ReLUV2)
def get_bprop_relu_v2(self):
"""Grad definition for `ReLUV2` operation."""
input_grad = G.ReluGradV2()
def bprop(x, out, dout):
mask = out[1]
dx = input_grad(dout[0], mask)
return (dx,)
return bprop
@bprop_getters.register(P.HSwish)
def get_bprop_hswish(self):
"""Grad definition for `HSwish` operation."""
input_grad = G.HSwishGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.HSigmoid)
def get_bprop_hsigmoid(self):
"""Grad definition for `HSigmoid` operation."""
input_grad = G.HSigmoidGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.Elu)
def get_bprop_elu(self):
"""Grad definition for `Elu` operation."""
input_grad = G.EluGrad()
def bprop(x, out, dout):
dx = input_grad(dout, out)
return (dx,)
return bprop
@bprop_getters.register(P.Sigmoid)
def get_bprop_sigmoid(self):
"""Grad definition for `Sigmoid` operation."""
input_grad = G.SigmoidGrad()
def bprop(x, out, dout):
dx = input_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(G.SigmoidGrad)
def get_bprop_sigmoid_grad(self):
"""Grad definition for `SigmoidGrad` operation."""
sigmoid_grad = G.SigmoidGrad()
def bprop(y, grad, out, dout):
dy = dout * grad * (1. - 2 * y)
dgrad = sigmoid_grad(y, dout)
return dy, dgrad
return bprop
@constexpr
def _get_transpose_axis(x_shp, axis):
rank = len(x_shp)
if axis < 0:
axis += rank
reverse_axis = [i for i in range(rank)]
reverse_axis[axis] = rank - 1
reverse_axis[rank - 1] = axis
return tuple(reverse_axis)
@bprop_getters.register(P.Softmax)
def get_bprop_softmax(self):
"""Grad definition for `Softmax` operation."""
sum_func = P.ReduceSum(keep_dims=True)
sub = P.Sub()
mul = P.Mul()
get_shape = P.Shape()
transpose = P.Transpose()
axis = self.axis
if not isinstance(axis, int):
axis = axis[0]
def bprop(x, out, dout):
# dx = (dout - sum(dout * out)) * out
# This formula is correct only when the `axis` is the last dimension.
# In order to support the scenario where the `axis` is other values,
# we transpose the data of the `axis` dimension to the last dimension for calculation,
# and then transpose it back after the calculation.
reverse_axis = _get_transpose_axis(get_shape(x), axis)
out = transpose(out, reverse_axis)
dout = transpose(dout, reverse_axis)
dx = mul(out, sub(dout, sum_func(mul(out, dout), -1)))
dx = transpose(dx, reverse_axis)
return (dx,)
return bprop
@bprop_getters.register(P.LogSoftmax)
def get_bprop_log_softmax(self):
"""Grad definition for `LogSoftmax` operation."""
logsoftmax_grad = G.LogSoftmaxGrad(self.axis)
def bprop(x, out, dout):
dx = logsoftmax_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.Softplus)
def get_bprop_softplus(self):
"""Grad definition for `Softplus` operation."""
softplus_grad = G.SoftplusGrad()
def bprop(x, out, dout):
dx = softplus_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.Softsign)
def get_bprop_softsign(self):
"""Grad definition for `Softsign` operation."""
mul = P.Mul()
absolute = P.Abs()
div = P.Div()
square = P.Square()
def bprop(x, out, dout):
dx = mul(dout, div(1, square(1 + absolute(x))))
return (dx,)
return bprop
@bprop_getters.register(P.Tanh)
def get_bprop_tanh(self):
"""Grad definition for `Tanh` operation."""
tanh_grad = G.TanhGrad()
def bprop(x, out, dout):
dx = tanh_grad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(G.TanhGrad)
def get_bprop_tanh_grad(self):
"""Grad definition for `TanhGrad` operation."""
tanh_grad = G.TanhGrad()
def bprop(y, grad, out, dout):
dy = dout * -2.0 * grad * y
dgrad = tanh_grad(y, dout)
return dy, dgrad
return bprop
@bprop_getters.register(P.GeLU)
def get_bprop_gelu(self):
"""Grad definition for `GeLU` operation."""
input_grad = G.GeLUGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x, out)
return (dx,)
return bprop
@bprop_getters.register(P.Gelu)
def get_bprop_gelu_2(self):
"""Grad definition for `GeLU` operation."""
input_grad = G.GeLUGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x, out)
return (dx,)
return bprop
@bprop_getters.register(P.FastGeLU)
def get_bprop_fast_gelu(self):
"""Grad definition for `FastGeLU` operation."""
input_grad = G.FastGeLUGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.FastGelu)
def get_bprop_fast_gelu_2(self):
"""Grad definition for `FastGeLU` operation."""
input_grad = G.FastGeLUGrad()
def bprop(x, out, dout):
dx = input_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.InstanceNorm)
def get_bprop_instance_norm(self):
"""Grad definition for `InstanceNorm` operation."""
input_grad = G.InstanceNormGrad(self.epsilon, self.momentum)
def bprop(x, gamma, beta, mean, variance, out, dout):
saved_mean = out[1]
saved_variance = out[2]
out = input_grad(dout[0], x, gamma, saved_mean, saved_variance)
dx = out[0]
dgamma = out[1]
dbeta = out[2]
return dx, dgamma, dbeta, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.BatchNorm)
def get_bprop_batch_norm(self):
"""Grad definition for `BatchNorm` operation."""
is_training = self.is_training
input_grad = G.BatchNormGrad(is_training, self.epsilon, self.data_format)
def bprop(x, scale, b, mean, variance, out, dout):
if is_training:
saved_mean = out[3]
saved_variance = out[4]
reserve = out[2]
else:
saved_mean = mean
saved_variance = variance
reserve = out[2]
out = input_grad(dout[0], x, scale, saved_mean, saved_variance, reserve)
dx = out[0]
dscale = out[1]
dbias = out[2]
return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)
return bprop
@bprop_getters.register(P.LayerNorm)
def get_bprop_layer_norm(self):
"""Grad definition for `LayerNorm` operation."""
layer_norm_grad = G.LayerNormGrad(self.begin_norm_axis, self.begin_params_axis)
def bprop(x, gamma, beta, out, dout):
dx, d_gamma, d_beta = layer_norm_grad(
x, dout[0], out[2], out[1], gamma)
return dx, d_gamma, d_beta
return bprop
@bprop_getters.register(G.LayerNormGrad)
def get_bprop_layer_norm_grad(self):
"""Grad definition for `LayerNormGrad` operation."""
layer_norm_grad_grad = G.LayerNormGradGrad(self.begin_norm_axis, self.begin_params_axis)
def bprop(x, dy, variance, mean, gamma, out, dout):
d_x, d_dy, d_gamma = layer_norm_grad_grad(
x, dy, variance, mean, gamma, dout[0], dout[1], dout[2])
return d_x, d_dy, zeros_like(variance), zeros_like(mean), d_gamma
return bprop
@bprop_getters.register(P.L2Normalize)
def get_bprop_l2normalize(self):
"""Grad definition for `L2Normalize` operation."""
input_grad = G.L2NormalizeGrad(self.axis, self.epsilon)
def bprop(x, out, dout):
dx = input_grad(x, out, dout)
return (dx,)
return bprop
@bprop_getters.register(P.SoftmaxCrossEntropyWithLogits)
def get_bprop_softmax_cross_entropy_with_logits(self):
"""Grad definition for `SoftmaxCrossEntropyWithLogits` operation."""
expand = P.ExpandDims()
def bprop(logits, labels, out, dout):
grad = out[1]
grad = grad * expand(dout[0], -1)
return grad, zeros_like(labels)
return bprop
@bprop_getters.register(P.NLLLoss)
def get_bprop_nll_loss(self):
"""Grad definition for `NLLLoss` operation."""
nll_loss_grad = G.NLLLossGrad(reduction=self.reduction)
def bprop(x, target, weight, out, dout):
total_weight = out[1]
dout_x = dout[0]
dx = nll_loss_grad(x, dout_x, target, weight, total_weight)
return dx, zeros_like(target), zeros_like(weight)
return bprop
@bprop_getters.register(P.SparseSoftmaxCrossEntropyWithLogits)
def get_bprop_sparse_softmax_cross_entropy_with_logits(self):
"""Grad definition for `SparseSoftmaxCrossEntropyWithLogits` operation."""
is_grad = self.is_grad
grad_op = P.SparseSoftmaxCrossEntropyWithLogits(is_grad=True)
def bprop(logits, labels, out, dout):
grad = out[0]
if not is_grad:
# if construct use loss
grad = grad_op(logits, labels)
grad = F.depend(grad, out)
grad = grad * dout
return grad, zeros_like(labels)
return bprop
@bprop_getters.register(P.ResizeBilinear)
def get_bprop_resize_bilinear(self):
"""Grad definition for `ResizeBilinear` operation."""
resize_grad = G.ResizeBilinearGrad(self.align_corners)
def bprop(x, out, dout):
dx = resize_grad(dout, x)
return (dx,)
return bprop
@bprop_getters.register(P.OneHot)
def get_bprop_onehot(self):
"""Grad definition for `OneHot` operation."""
def bprop(indices, depth, on_value, off_value, out, dout):
return zeros_like(indices), zeros_like(depth), zeros_like(on_value), zeros_like(off_value)
return bprop
@constexpr
def _range_op(start, limit, delta, dtype):
"""helper function for Grad TopK"""
output_tensor = Tensor(list(range(start, limit, delta)), dtype)
return output_tensor
@constexpr
def _get_1d_shape(in_shape):
"""helper function for Grad TopK"""
out_shape = 1
for i in in_shape:
out_shape *= i
return (out_shape,)
@bprop_getters.register(P.TopK)
def get_bprop_top_kv2(self):
"""Grad definition for `TopK` operation."""
scatter = P.ScatterNd()
expand_dims = P.ExpandDims()
shape_op = P.Shape()
reshape_op = P.Reshape()
dtype = P.DType()
def bprop(input_x, k, out, dout):
in_shape = shape_op(input_x)
in_lastdim = in_shape[-1]
indices = out[1]
ind_shape = shape_op(indices)
ind_lastdim = ind_shape[-1]
ind_2d = reshape_op(indices, (-1, ind_lastdim))
outerdim = shape_op(ind_2d)[0]
# [0, outterdim, 2*outerdim, ..., (k-1)*outerdim]
indices_dtype = dtype(indices)
range_flatten_index = _range_op(0, outerdim * in_lastdim, in_lastdim, indices_dtype)
# expand_dims to (k, 1), then broadcast
ind = reshape_op(ind_2d + expand_dims(range_flatten_index, -1), (-1,))
in_shape_1d = _get_1d_shape(in_shape)
out_grad = reshape_op(
scatter(
expand_dims(ind, -1),
reshape_op(dout[0], (-1,)),
in_shape_1d),
in_shape)
return out_grad, zeros_like(k)
return bprop
@bprop_getters.register(P.SmoothL1Loss)
def get_bprop_smooth_l1_loss(self):
"""Grad definition for `SmoothL1Loss` operation."""
grad = G.SmoothL1LossGrad(self.beta)
def bprop(prediction, target, out, dout):
dx = grad(prediction, target, dout)
dy = grad(target, prediction, dout)
return dx, dy
return bprop
@bprop_getters.register(P.L2Loss)
def get_bprop_l2_loss(self):
"""Grad definition for `L2Loss` operation."""
def bprop(x, out, dout):
dx = x * dout
return (dx,)
return bprop
@bprop_getters.register(P.RNNTLoss)
def get_bprop_rnnt_loss(self):
"""Grad definition for `RNNTLoss` operation."""
def bprop(acts, labels, act_lens, label_lens, out, dout):
grad = out[1]
return grad, zeros_like(labels), zeros_like(act_lens), zeros_like(label_lens)
return bprop
@bprop_getters.register(P.PReLU)
def get_bprop_prelu(self):
"""Grad definition for `PReLU` operation."""
grad = G.PReLUGrad()
def bprop(x, w, out, dout):
dx, dw = grad(dout, x, w)
return dx, dw
return bprop
@bprop_getters.register(P.LSTM)
def get_bprop_lstm(self):
"""Grad definition for `LSTM` operation."""
lstm_grad_data = G.LSTMGradData(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
lstm_grad_weight = G.LSTMGradWeight(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
lstm_grad = G.LSTMGrad(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
has_bias=self.has_bias,
bidirectional=self.bidirectional,
dropout=self.dropout
)
def bprop(x, hx, cx, w, out, dout):
y, _, _, reserve, state = out
dy, dhy, dcy, _, _ = dout
dx, dhx, dcx = lstm_grad_data(y, dy, dhy, dcy, w, hx, cx, reserve, state)
dw = lstm_grad_weight(F.depend(x, dx), hx, y, reserve, state)
return dx, dhx, dcx, dw
#
def bprop_cpu(x, hx, cx, w, out, dout):
y, hy, cy, reserve, _ = out
dy, dhy, dcy, _, _ = dout
dx, dhx, dcx, dw = lstm_grad(x, hx, cx, w, y, hy, cy, dy, dhy, dcy, reserve)
return dx, dhx, dcx, dw
if context.get_context('device_target') == "CPU":
return bprop_cpu
return bprop
@bprop_getters.register(P.DynamicRNN)
def get_bprop_dynamic_rnn(self):
"""Grad definition for `DynamicRNN` operation."""
dynamic_rnn_grad = G.DynamicRNNGrad(cell_type=self.cell_type,
direction=self.direction,
cell_depth=self.cell_depth,
use_peephole=self.use_peephole,
keep_prob=self.keep_prob,
cell_clip=self.cell_clip,
num_proj=self.num_proj,
time_major=self.time_major,
forget_bias=self.forget_bias)
expand_dims = P.ExpandDims()
def bprop(x, w, b, seq_length, init_h, init_c, out, dout):
dy, dh, dc, _, _, _, _, _, = dout
dh = dh[-1]
dc = dc[-1]
y, h, c, i, j, f, o, tanhct = out
dw, db, dx, dh_prev, dc_prev = dynamic_rnn_grad(x, w, b, y, init_h[0], init_c[0], h,
c, dy, dh, dc, i, j, f, o, tanhct)
dh_prev = expand_dims(dh_prev, 0)
dc_prev = expand_dims(dc_prev, 0)
return dx, dw, db, (0), dh_prev, dc_prev
return bprop
@bprop_getters.register(P.DynamicGRUV2)
def get_bprop_dynamic_gru_v2(self):
"""Grad definition for `DynamicGRUV2` operation."""
dynamic_gru_v2_grad = G.DynamicGRUV2Grad(self.direction, self.cell_depth, self.keep_prob, self.cell_clip,
self.num_proj, self.time_major, self.gate_order,
self.reset_after)
def bprop(x, winput, whidden, binput, bhidden, seq, init_h, out, dout):
y, out_h, update, reset, new, hidden_new = out
dy, dout_h, _, _, _, _ = dout
dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev = dynamic_gru_v2_grad(x, winput, whidden, y, init_h,
out_h, dy, dout_h[-1], update,
reset, new, hidden_new, None, None)
return dx, dw_input, dw_hidden, db_input, db_hidden, (0), dh_prev
return bprop
@bprop_getters.register(P.SigmoidCrossEntropyWithLogits)
def get_bprop_sigmoid_crossentropy_with_logits(self):
"""Grad definition for `SigmoidCrossEntropyWithLogits` operation."""
op = G.SigmoidCrossEntropyWithLogitsGrad()
def bprop(x, y, out, dout):
dx = op(x, y, dout)
return (dx, zeros_like(y))
return bprop
@bprop_getters.register(P.Pad)
def get_bprop_pad(self):
"""Grad definition for `Pad` operation."""
shape_op = P.Shape()
paddings = self.paddings
def bprop(x, out, dout):
begin = ()
for item in paddings:
begin += (item[0],)
shp = shape_op(x)
dx = P.Slice()(dout, begin, shp)
return (dx,)
return bprop
@bprop_getters.register(P.MirrorPad)
def get_bprop_mirror_pad(self):
"""Grad definition for `MirrorPad` operation."""
mirror_pad_grad = G.MirrorPadGrad(self.mode)
def bprop(x, paddings, out, dout):
dx = mirror_pad_grad(dout, paddings)
return (dx, zeros_like(paddings))
return bprop
@bprop_getters.register(P.ROIAlign)
def get_bprop_roi_align(self):
"""Grad definition for `ROIAlign` operation."""
shape_op = P.Shape()
pooled_height = self.pooled_height
pooled_width = self.pooled_width
spatial_scale = self.spatial_scale
sample_num = self.sample_num
def bprop(inputs, rois, out, dout):
inputs_shape = shape_op(inputs)
dx = G.ROIAlignGrad(inputs_shape,
pooled_height,
pooled_width,
spatial_scale,
sample_num,
)(dout, rois)
return dx, zeros_like(rois)
return bprop
@bprop_getters.register(P.Conv2DBackpropInput)
def get_bprop_conv2d_backprop_input(self):
"""Grad definition for `Conv2DBackpropInput` operation."""
filter_grad = G.Conv2DBackpropFilter(
self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
input_grad = P.Conv2D(
self.out_channel, self.kernel_size, pad_mode=self.pad_mode.lower(), pad=self.pad,
dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format
)
def bprop(x, w, f_sizes, out, dout):
dx = input_grad(dout, w)
if env_force_bprop_seq == '1':
x = F.depend(x, dx)
dw = filter_grad(x, dout, F.shape(w))
return dx, dw, zeros_like(f_sizes)
return bprop
@bprop_getters.register(P.BinaryCrossEntropy)
def get_bprop_binary_cross_entropy(self):
"""Grad definition for `BinaryCrossEntropy` operation."""
grad = G.BinaryCrossEntropyGrad(self.reduction)
def bprop(x, y, weight, out, dout):
dx = grad(x, y, dout, weight)
return dx, zeros_like(y), zeros_like(weight)
return bprop
@bprop_getters.register(P.BCEWithLogitsLoss)
def get_bprop_ce_with_logits_loss(self):
"""Grad definition for `BCEWithLogitsLoss` operation."""
reduction = self.reduction
mul = P.Mul()
sigmoid = P.Sigmoid()
add = P.Add()
sub = P.Sub()
size = P.Size()
neg = P.Neg()
log = P.Log()
def bprop(predict, target, weight, pos_weight, out, dout):
sigmoid_input = sigmoid(predict)
if pos_weight is not None:
t = mul(target, pos_weight)
dx = mul(sub(mul(sub(add(t, 1), target), sigmoid_input), t), dout)
grad_target = mul(sub(log(sub(1, sigmoid_input)), mul(pos_weight, log(sigmoid_input))), dout)
else:
dx = mul((sigmoid_input - target), dout)
grad_target = mul(predict, neg(dout))
if weight is not None:
dx = mul(dx, weight)
grad_target = mul(grad_target, weight)
if reduction == 'mean':
dx = dx / size(dx)
grad_target = grad_target / size(target)
return dx, grad_target, zeros_like(weight), zeros_like(pos_weight)
return bprop
@bprop_getters.register(P.KLDivLoss)
def get_bprop_kl_div_loss(self):
"""Grad definition for `KLDivLoss` operation."""
grad = G.KLDivLossGrad(self.reduction)
def bprop(x, y, out, dout):
dx, dy = grad(x, y, dout)
return dx, dy
return bprop
@bprop_getters.register(P.Dropout)
def get_bprop_dropout(self):
"""Grad definition for `Dropout` operation."""
grad = G.DropoutGrad(self.keep_prob)
def bprop(x, out, dout):
_, mask = out
dy, _ = dout
dx = grad(dy, mask)
return (dx,)
return bprop
@bprop_getters.register(P.Dropout2D)
def get_bprop_dropout2d(self):
"""Grad definition for `Dropout2D` operation."""
dtype = P.DType()
cast = P.Cast()
mul = P.Mul()
keep_prob = self.keep_prob
def bprop(x, out, dout):
_, mask = dout
y = cast(mask, mstype.float32)
if keep_prob != 0:
y = y * (1 / keep_prob)
y = mul(x, y)
y = cast(y, dtype(x))
return (y,)
return bprop
@bprop_getters.register(P.Dropout3D)
def get_bprop_dropout3d(self):
"""Grad definition for `Dropout3D` operation."""
dtype = P.DType()
cast = P.Cast()
mul = P.Mul()
keep_prob = self.keep_prob
def bprop(x, out, dout):
_, mask = dout
y = cast(mask, mstype.float32)
if keep_prob != 0:
y = y * (1 / keep_prob)
y = mul(x, y)
y = cast(y, dtype(x))
return (y,)
return bprop
@bprop_getters.register(P.CTCLoss)
def get_bprop_ctc_loss(self):
"""Grad definition for `CTCLoss` operation"""
expand = P.ExpandDims()
def bprop(inputs, labels_indices, labels_values, sequence_length, out, dout):
grad_loss = out[1]
grad = grad_loss * expand(dout[0], -1)
return grad, zeros_like(labels_indices), zeros_like(labels_values), zeros_like(sequence_length)
return bprop
@bprop_getters.register(P.BasicLSTMCell)
def get_bprop_basic_lstm_cell(self):
"""Grad definition for `BasicLSTMCell` operation."""
basic_lstm_cell_cstate_grad = G.BasicLSTMCellCStateGrad(
forget_bias=self.forget_bias,
activation=self.activation
)
basic_lstm_cell_weight_grad = G.BasicLSTMCellWeightGrad()
basic_lstm_cell_input_grad = G.BasicLSTMCellInputGrad(keep_prob=self.keep_prob)
def bprop(x, h, c, w, b, out, dout):
_, _, it, jt, ft, ot, tanhct = out
dct, dht, _, _, _, _, _ = dout
dgate, dct_1 = basic_lstm_cell_cstate_grad(c, dht, dct, it, jt, ft, ot, tanhct)
dxt, dht = basic_lstm_cell_input_grad(dgate, w)
dw, db = basic_lstm_cell_weight_grad(F.depend(x, dxt), h, dgate)
return dxt, dht, dct_1, dw, db
return bprop
@bprop_getters.register(P.LRN)
def get_bprop_lrn(self):
"""Grad definition for `LRN` operation."""
grad = G.LRNGrad(self.depth_radius, self.bias, self.alpha, self.beta)
def bprop(x, out, dout):
dx = grad(dout, x, out)
return (dx,)
return bprop
|
py | 1a5459f11202c11c06d87fc5413d5f25c95e9d02 | import pandas as pd
import os
from .core import *
# todo: make a proper config file
_PATH = os.path.abspath(core.__file__)
# %% monkeypatch to the functions become methods on the dataframe
# could use decorators/pandas_flavor
# pandas_flavor: good, but want to eliminate dependencies
# approach below may be bloated and harder to maintain
# (must change when method names change)
series_methods = [sample_persons, count_persons, unique_codes, extract_codes,
count_codes, label, use_expression, select_persons]
frame_methods = [sample_persons, first_event, get_pids, unique_codes,
expand_codes, get_rows, count_persons, extract_codes,
count_codes, label, use_expression, select_persons, count_p]
# probably a horrible way of doing something horrible!
for method in frame_methods:
setattr(pd.DataFrame, getattr(method, "__name__"), method)
for method in series_methods:
setattr(pd.Series, getattr(method, "__name__"), method)
|
py | 1a545a5ee15c6fc1433efc0d3361fac5d5196bf0 | from typing import Iterable
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
ContextAwareLayoutTokenModelDataGenerator,
LayoutModelData
)
class NameDataGenerator(ContextAwareLayoutTokenModelDataGenerator):
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
yield token_features.get_layout_model_data([
token_features.token_text,
token_features.get_lower_token_text(),
token_features.get_prefix(1),
token_features.get_prefix(2),
token_features.get_prefix(3),
token_features.get_prefix(4),
token_features.get_suffix(1),
token_features.get_suffix(2),
token_features.get_suffix(3),
token_features.get_suffix(4),
token_features.get_line_status_with_lineend_for_single_token(),
token_features.get_capitalisation_status_using_allcap(),
token_features.get_digit_status_using_containsdigits(),
token_features.get_str_is_single_char(),
token_features.get_dummy_str_is_common_name(),
token_features.get_str_is_first_name(),
token_features.get_str_is_last_name(),
token_features.get_dummy_str_is_known_title(),
token_features.get_dummy_str_is_known_suffix(),
token_features.get_punctuation_type_feature(),
token_features.get_dummy_label()
])
|
py | 1a545a94d81eca4c088a5dbb7b6418b0ab5f6bc7 | from ginkgo4py import Ginkgo4Py
fixtures = "../fixtures"
def test_ginkgo4py_can_discover_test_files():
assert Ginkgo4Py().DiscoverFrom(fixtures).Files() == ["../fixtures/test_file.py"]
def test_given_discovered_files_discover_tests():
testFiles = Ginkgo4Py().DiscoverFrom(fixtures).Files()
assert Ginkgo4Py().DiscoverTestNamesFrom(testFiles) == ["A first test"] |
py | 1a545ab91bdc6bf12ef4df97f1a6fd5a5a7e7121 | import re
import copy
from epjson_handler import EPJSON
from expand_objects import ExpandObjects, ExpandThermostat, ExpandZone, ExpandSystem, ExpandPlantLoop, \
ExpandPlantEquipment
from custom_exceptions import InvalidTemplateException, InvalidEpJSONException, PyExpandObjectsYamlStructureException
class HVACTemplate(EPJSON):
"""
Handle HVACTemplate conversion process and connect created objects together.
Attributes:
templates: HVACTemplate objects from epJSON file
base_objects: Non-HVACTemplate objects from epJSON file
templates_zones: HVACTemplate:Zone: objects
templates_systems: HVACTemplate:System: objects
templates_plant_equipment: HVACTemplate:Plant equipment objects
templates_plant_loops: HVACTemplate:Plant: loop objects
expanded_*: List of class objects for each template type
epjson: epJSON used to store connection objects
"""
def __init__(
self,
no_schema=False,
logger_level='WARNING',
logger_name='console_only_logger',
reset_stream=True):
"""
:param no_schema: Boolean flag for skipping schema validation
"""
super().__init__(no_schema=no_schema, logger_level=logger_level, logger_name=logger_name,
reset_stream=reset_stream)
self.logger_level = logger_level
self.logger_name = logger_name
self.templates = {}
self.base_objects = {}
self.templates_systems = {}
self.templates_zones = {}
self.templates_plant_equipment = {}
self.templates_plant_loops = {}
self.templates_thermostats = {}
self.expanded_thermostats = {}
self.expanded_zones = {}
self.expanded_systems = {}
self.expanded_plant_loops = {}
self.expanded_plant_equipment = {}
self.epjson = {}
return
def _hvac_template_preprocess(self, epjson):
"""
Organize epJSON and assign template objects to specific class attributes
:param epjson: Input epJSON object
:return: organized epJSON template objects into templates, and templates_* as class attributes
"""
self.logger.info('##### HVACTemplate #####')
for object_type, object_structure in epjson.items():
if re.match('^HVACTemplate:*', object_type):
if re.match('^HVACTemplate:Thermostat$', object_type):
self.merge_epjson(
super_dictionary=self.templates_thermostats,
object_dictionary={object_type: object_structure},
unique_name_override=False)
elif re.match('^HVACTemplate:Zone:('
'ConstantVolume|BaseboardHeat|FanCoil|IdealLoadsAirSystem|PTAC|PTHP|WaterToAirHeatPump|'
'VRF|Unitary|VAV|VAV:FanPowered|VAV:HeatAndCool|DualDuct)$',
object_type):
zone_default_map = {
'HVACTemplate:Zone:BaseboardHeat': {
'baseboard_heating_type': 'HotWater',
'outdoor_air_method': 'Flow/Person'
},
'HVACTemplate:Zone:ConstantVolume': {
'outdoor_air_method': 'Flow/Person',
'zone_cooling_design_supply_air_temperature_input_method': 'SystemSupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 12.8,
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:FanCoil': {
'cooling_coil_type': 'ChilledWater',
'heating_coil_type': 'HotWater',
'outdoor_air_method': 'Flow/Person',
'supply_fan_delta_pressure': 75,
'cooling_coil_design_setpoint': 14.0,
'heating_coil_design_setpoint': 50.0,
'zone_cooling_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:PTAC': {
'outdoor_air_method': 'Flow/Person',
'supply_fan_placement': 'DrawThrough',
'cooling_coil_type': 'SingleSpeedDX',
'supply_fan_total_efficiency': 0.7,
'cooling_coil_gross_rated_cooling_cop': 3.0,
'heating_coil_type': 'Electric',
'zone_cooling_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 14.0,
'zone_cooling_design_supply_air_temperature_difference': 11.0,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:PTHP': {
'outdoor_air_method': 'Flow/Person',
'supply_fan_placement': 'DrawThrough',
'cooling_coil_type': 'SingleSpeedDX',
'cooling_coil_gross_rated_cop': 3.0,
'supply_fan_total_efficiency': 0.7,
'heat_pump_heating_coil_type': 'SingleSpeedDXHeatPump',
'heat_pump_heating_coil_gross_rated_cop': 2.75,
'heat_pump_heating_minimum_outdoor_dry_bulb_temperature': -8.0,
'heat_pump_defrost_maximum_outdoor_dry_bulb_temperature': 5.0,
'heat_pump_defrost_strategy': 'ReverseCycle',
'heat_pump_defrost_control': 'Timed',
'supplemental_heating_coil_type': 'Electric',
'supplemental_heating_coil_maximum_outdoor_dry_bulb_temperature': 21.0,
'zone_cooling_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 14.0,
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:Unitary': {
'outdoor_air_method': 'Flow/Person',
'zone_cooling_design_supply_air_temperature_input_method': 'SystemSupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 12.8,
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SystemSupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:VRF': {
'outdoor_air_method': 'Flow/Person',
'supply_air_fan_placement': 'BlowThrough',
'cooling_coil_type': 'VariableRefrigerantFlowDX',
'supply_fan_total_efficiency': 0.7,
'heating_coil_type': 'VariableRefrigerantFlowDX',
'zone_cooling_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 14.0,
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0
},
'HVACTemplate:Zone:WaterToAirHeatPump': {
'outdoor_air_method': 'Flow/Person',
'supply_fan_placement': 'DrawThrough',
'cooling_coil_type': 'Coil:Cooling:WaterToAirHeatPump:EquationFit',
'cooling_coil_gross_rated_cop': 3.5,
# todo_eo: The template and ZoneHVAC:WaterToAirHeatPump defaults are mismatched for this
# field. This is not default efficiency for Fan:OnOff
'supply_fan_total_efficiency': 0.7,
'heat_pump_heating_coil_type': 'Coil:Heating:WaterToAirHeatPump:EquationFit',
'heat_pump_heating_coil_gross_rated_cop': 4.2,
'maximum_cycling_rate': 2.5,
'supplemental_heating_coil_type': 'Electric',
'zone_cooling_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_cooling_design_supply_air_temperature': 14.0,
'zone_cooling_design_supply_air_temperature_difference': 11.11,
'zone_heating_design_supply_air_temperature_input_method': 'SupplyAirTemperature',
'zone_heating_design_supply_air_temperature': 50.0,
'zone_heating_design_supply_air_temperature_difference': 30.0,
'heat_pump_coil_water_flow_mode': 'Cycling'
}
}
for object_name, object_fields in object_structure.items():
# set defaults
selected_default_map = zone_default_map.get(object_type)
if selected_default_map:
for field, default_value in selected_default_map.items():
if not object_fields.get(field):
object_fields[field] = default_value
# set a mapping of zone template type to look up parent system
zone_template_map = {
('HVACTemplate:Zone:ConstantVolume', ):
(
'template_constant_volume_system_name',
['HVACTemplate:System:ConstantVolume', ]),
('HVACTemplate:Zone:BaseboardHeat', 'HVACTemplate:Zone:FanCoil', 'HVACTemplate:Zone:PTAC',
'HVACTemplate:Zone:PTHP', 'HVACTemplate:Zone:WaterToAirHeatPump', 'HVACTemplate:Zone:VRF', ):
(
'dedicated_outdoor_air_system_name',
['HVACTemplate:System:DedicatedOutdoorAir', ]),
('HVACTemplate:Zone:Unitary', ):
(
'template_unitary_system_name',
['HVACTemplate:System:Unitary', 'HVACTemplate:System:UnitaryHeatPump',
'HVACTemplate:System:UnitaryHeatPump:AirToAir', 'HVACTemplate:System:UnitarySystem']),
('HVACTemplate:Zone:VAV', 'HVACTemplate:Zone:VAVFanPowered'):
(
'template_vav_system_name',
['HVACTemplate:System:VAV', 'HVACTemplate:System:PackagedVAV']),
('HVACTemplate:Zone:DualDuct', ):
(
'template_dual_duct_system_name',
['HVACTemplate:System:DualDuct', ]),
('HVACTemplate:Zone:vrf', ):
(
'template_vrf_system_name',
['HVACTemplate:System:VRF', ])}
# Check the referenced system against the epjson and issue a warning if it isn't found
system_check_list = [v for k, v in zone_template_map.items() if object_type in k]
if system_check_list:
system_check_list = system_check_list[0]
for object_name, object_fields in object_structure.items():
system_name = object_fields.get(system_check_list[0])
if not system_name and system_check_list[0] == 'dedicated_outdoor_air_system_name':
continue
else:
template_system_name = None
for system_type in system_check_list[1]:
system_group = epjson.get(system_type)
if system_group:
template_system_name = True if system_name in system_group else False
if template_system_name:
break
if not template_system_name:
raise InvalidTemplateException(
'Error: In {} ({}) Could not find air handler name referenced ({})'
.format(object_type, object_name, system_name))
# check fields
for object_name, object_fields in object_structure.items():
# check for required info
if not object_fields.get('template_thermostat_name', None):
self.logger.info(
'In {} ({}) template thermostat name not provided'
.format(object_type, object_name))
# check baseboard settings
if object_fields.get('baseboard_heating_type', None) == 'HotWater' and (
not epjson.get('HVACTemplate:Plant:HotWaterLoop') or not
epjson.get('HVACTemplate:Plant:Boiler')):
self.logger.warning(
'Warning: Both a HVACTemplate:Plant:HotWaterLoop and a HVACTemplate:Plant:Boiler are '
'needed when using hot water baseboards. Template name: {}'.format(object_name))
# fan coil capacity control with doas
if object_type == 'HVACTemplate:Zone:FanCoil':
if object_fields.get('capacity_control_method') == 'ConstantFanVariableFlow' and \
object_fields.get('dedicated_outdoor_air_system_name', '') != '':
self.logger.warning(
'Warning: In {} ({})'
' the Capacity Control Method is {}'
' and the zone is served by a dedicated outdoor air system.'
.format(object_type, object_name, object_fields.get('capacity_control_method')))
# IdealLoads input check
if object_type == 'HVACTemplate:Zone:IdealLoadsAirSystem':
heating_limit = object_fields.get('heating_limit')
maximum_heating_air_flow_rate = object_fields.get('maximum_heating_air_flow_rate', '')
maximum_sensible_heating_capacity = \
object_fields.get('maximum_sensible_heating_capacity', '')
cooling_limit = object_fields.get('cooling_limit')
maximum_cooling_air_flow_rate = object_fields.get('maximum_cooling_air_flow_rate', '')
maximum_total_cooling_capacity = \
object_fields.get('maximum_total_cooling_capacity', '')
if heating_limit == 'LimitFlowRate' and maximum_heating_air_flow_rate == '':
raise InvalidTemplateException(
'Error: In {} ({})'
' the Heating Limit field is {} but the Maximum Heating Air Flow Rate field is '
'blank. Enter a value or autosize in this field.'
.format(object_type, object_name, object_fields.get('heating_limit')))
elif heating_limit == 'LimitCapacity' and maximum_sensible_heating_capacity == '':
raise InvalidTemplateException(
'Error: In {} ({})'
' the Heating Limit field is {} but the Maximum Sensible Heating Capacity field is '
'blank. Enter a value or autosize in this field.'
.format(object_type, object_name, object_fields.get('heating_limit')))
elif heating_limit == 'LimitFlowRateAndCapacity' and (
maximum_heating_air_flow_rate == '' or maximum_sensible_heating_capacity == ''):
msg = []
if maximum_heating_air_flow_rate == '':
msg.append('the Maximum Heating Air Flow Rate field is blank')
if maximum_sensible_heating_capacity == '':
msg.append('the Maximum Sensible Heating Capacity field is blank')
raise InvalidTemplateException(
'Error: In {} ({})'
' the Heating Limit field is {} but {}. Enter a value or autosize in this field.'
.format(
object_type,
object_name,
object_fields.get('heating_limit'),
' and '.join(msg)))
if cooling_limit == 'LimitFlowRate' and maximum_cooling_air_flow_rate == '':
raise InvalidTemplateException(
'Error: In {} ({})'
' the Heating Limit field is {} but the Maximum Cooling Air Flow Rate field is '
'blank. Enter a value or autosize in this field.'
.format(object_type, object_name, object_fields.get('cooling_limit')))
elif cooling_limit == 'LimitCapacity' and maximum_total_cooling_capacity == '':
raise InvalidTemplateException(
'Error: In {} ({})'
' the Cooling Limit field is {} but the Maximum Total Cooling Capacity field is '
'blank. Enter a value or autosize in this field.'
.format(object_type, object_name, object_fields.get('cooling_limit')))
elif cooling_limit == 'LimitFlowRateAndCapacity' and (
maximum_cooling_air_flow_rate == '' or maximum_total_cooling_capacity == ''):
msg = []
if maximum_cooling_air_flow_rate == '':
msg.append('the Maximum Cooling Air Flow Rate field is blank')
if maximum_total_cooling_capacity == '':
msg.append('the Maximum Total Cooling Capacity field is blank')
raise InvalidTemplateException(
'Error: In {} ({})'
' the Cooling Limit field is {} but {}. Enter a value or autosize in this field.'
.format(
object_type,
object_name,
object_fields.get('cooling_limit'),
' and '.join(msg)))
self.merge_epjson(
super_dictionary=self.templates_zones,
object_dictionary={object_type: object_structure},
unique_name_override=False)
elif re.match('^HVACTemplate:System:('
'VRF|Unitary|UnitaryHeatPump:AirToAir|UnitarySystem|VAV|PackagedVAV|'
'ConstantVolume|DualDuct|DedicatedOutdoorAir'
')$', object_type):
# check for individual template issues
system_default_map = {
'HVACTemplate:System:ConstantVolume': {
'cooling_coil_type': 'ChilledWater',
'cooling_coil_design_setpoint_temperature': 12.8,
'cooling_coil_setpoint_at_outdoor_dry_bulb_low': 15.6,
'cooling_coil_reset_outdoor_dry_bulb_low': 15.6,
'cooling_coil_setpoint_at_outdoor_dry_bulb_high': 12.8,
'cooling_coil_reset_outdoor_dry_bulb_high': 23.3,
'economizer_type': 'NoEconomizer',
'heating_coil_type': 'HotWater',
'heating_coil_design_setpoint': 10,
'heating_coil_setpoint_at_outdoor_dry_bulb_low': 15.0,
'heating_coil_reset_outdoor_dry_bulb_low': 7.8,
'heating_coil_setpoint_at_outdoor_dry_bulb_high': 12.2,
'heating_coil_reset_outdoor_dry_bulb_high': 12.2
},
'HVACTemplate:System:DedicatedOutdoorAir': {
'air_outlet_type': 'DirectIntoZone',
'cooling_coil_type': 'ChilledWater',
'cooling_coil_design_setpoint_temperature': 12.8,
'cooling_coil_setpoint_at_outdoor_dry_bulb_low': 15.6,
'cooling_coil_reset_outdoor_dry_bulb_low': 15.6,
'cooling_coil_setpoint_at_outdoor_dry_bulb_high': 12.8,
'cooling_coil_reset_outdoor_dry_bulb_high': 23.3,
'dx_cooling_coil_gross_rated_cop': 3.0,
'heating_coil_type': 'HotWater',
'heating_coil_design_setpoint': 12.2,
'heating_coil_setpoint_at_outdoor_dry_bulb_low': 15.0,
'heating_coil_reset_outdoor_dry_bulb_low': 7.8,
'heating_coil_setpoint_at_outdoor_dry_bulb_high': 12.2,
'heating_coil_reset_outdoor_dry_bulb_high': 12.2,
'humidifier_rated_capacity': 1e-06,
'humidifier_constant_setpoint': 0.003
},
'HVACTemplate:System:DualDuct': {
'system_configuration_type': 'SingleFanConstantVolume',
'main_supply_fan_minimum_flow_fraction': 0.2,
'cold_duct_supply_fan_minimum_flow_fraction': 0.2,
'cold_duct_supply_fan_placement': 'DrawThrough',
'hot_duct_supply_fan_minimum_flow_fraction': 0.2,
'hot_duct_supply_fan_placement': 'DrawThrough',
'cooling_coil_type': 'ChilledWater',
'cooling_coil_setpoint_control_type': 'FixedSetpoint',
'cooling_coil_design_setpoint_temperature': 12.8,
'cooling_coil_setpoint_at_outdoor_dry_bulb_low': 15.6,
'cooling_coil_reset_outdoor_dry_bulb_low': 15.6,
'cooling_coil_setpoint_at_outdoor_dry_bulb_high': 12.8,
'cooling_coil_reset_outdoor_dry_bulb_high': 23.3,
'heating_coil_type': 'HotWater',
'heating_coil_setpoint_control_type': 'FixedSetpoint',
'heating_coil_design_setpoint': 50,
'heating_coil_setpoint_at_outdoor_dry_bulb_low': 50,
'heating_coil_reset_outdoor_dry_bulb_low': 7.8,
'heating_coil_setpoint_at_outdoor_dry_bulb_high': 26,
'heating_coil_reset_outdoor_dry_bulb_high': 12.2,
'preheat_coil_design_setpoint': 7.2
},
'HVACTemplate:System:PackagedVAV': {
'cooling_coil_type': 'TwoSpeedDX',
'cooling_coil_design_setpoint': 12.8,
'cooling_coil_gross_rated_cop': 3.0,
'heating_coil_design_setpoint': 10
},
'HVACTemplate:System:Unitary': {
'cooling_coil_type': 'SingleSpeedDX',
'cooling_design_supply_air_temperature': 12.8,
'cooling_coil_gross_rated_cop': 3.0,
'heating_design_supply_air_temperature': 50.0,
'economizer_type': 'NoEconomizer',
'economizer_lockout': 'NoLockout',
'supply_fan_placement': 'BlowThrough',
'dehumidification_setpoint': 60.0,
'humidifier_rated_capacity': 1e-06,
'humidifier_setpoint': 30.0
},
'HVACTemplate:System:UnitarySystem': {
'control_type': 'Load',
'supply_fan_placement': 'BlowThrough',
'cooling_coil_type': 'SingleSpeedDX',
'number_of_speeds_for_cooling': 1,
'dx_cooling_coil_gross_rated_cop': 3.0,
'heating_coil_type': 'Gas',
'number_of_speeds_or_stages_for_heating': 1,
'heat_pump_heating_coil_gross_rated_cop': 2.75,
'heat_pump_heating_minimum_outdoor_dry_bulb_temperature': -8.0,
'heat_pump_defrost_maximum_outdoor_dry_bulb_temperature': 5.0,
'heat_pump_defrost_strategy': 'ReverseCycle',
'heat_pump_defrost_control': 'Timed',
'supplemental_heating_or_reheat_coil_type': 'None',
'supplemental_heating_or_reheat_coil_maximum_outdoor_dry_bulb_temperature': 21.0,
'economizer_type': 'NoEconomizer',
'economizer_lockout': 'NoLockout',
'heat_recovery_frost_control_type': 'None',
'dehumidification_control_type': 'None',
'dehumidification_relative_humidity_setpoint': 60.0,
'humidifier_type': 'None',
'humidifier_rated_capacity': 1e-06,
'humidifier_relative_humidity_setpoint': 30.0,
'sizing_option': 'NonCoincident',
'return_fan': 'No'
},
'HVACTemplate:System:VAV': {
'cooling_coil_type': 'ChilledWater',
'cooling_coil_design_setpoint': 12.8,
'heating_coil_type': 'None',
'heating_coil_design_setpoint': 10,
'preheat_coil_design_setpoint': 7.2,
'humidifier_rated_capacity': 1e-06
}
}
for object_name, object_fields in object_structure.items():
# set defaults
selected_default_map = system_default_map.get(object_type)
if selected_default_map:
for field, default_value in selected_default_map.items():
if not object_fields.get(field):
object_fields[field] = default_value
try:
zone_system_field = self._get_zone_template_field_from_system_type(object_type)
except InvalidTemplateException:
continue
system_names = [
zone_fields.get(zone_system_field) for zone_type, zone_structure in epjson.items()
if re.match(r'HVACTemplate:Zone:.*', zone_type)
for zone_template_name, zone_fields in zone_structure.items()]
if object_name not in system_names:
raise InvalidTemplateException(
'Error: In {} ({}) Did not find any HVACTemplate:Zone objects connected to system.'
'There must be at least one zone object which specifies '
'this system as the Template Unitary System Name.'
.format(object_type, object_name))
if object_fields.get('night_cycle_control', 'None') == 'CycleOnControlZone' and \
object_fields.get('night_cycle_control_zone_name', 'None') == 'None':
self.logger.warning('Warning: A zone name must be specified when Night Cycle Control is '
'set to Cycle on Control Zone for {} with unique name {}'
.format(object_type, object_name))
# check for control zones
if object_type in ['HVACTemplate:System:Unitary',
'HVACTemplate:System:ConstantVolume',
'HVACTemplate:System:UnitarySystem']:
for object_name, object_fields in object_structure.items():
try:
zone_system_field = self._get_zone_template_field_from_system_type(object_type)
except InvalidTemplateException:
continue
try:
zones_served = [
zone_fields.get('zone_name') for zone_type, zone_structure in epjson.items()
if re.match(r'HVACTemplate:Zone:.*', zone_type)
for zone_template_name, zone_fields in zone_structure.items()
if zone_fields.get(zone_system_field) == object_name]
except AttributeError:
raise InvalidTemplateException(
'Error: In {} ({}) No HVACTemplate:Zone template objects reference'
' the system object'
.format(object_type, object_name))
if object_type in ['HVACTemplate:System:Unitary', 'HVACTemplate:System:UnitarySystem'] and \
object_fields.get('control_zone_or_thermostat_location_name') and \
object_fields.get('control_zone_or_thermostat_location_name') not in zones_served:
raise InvalidTemplateException(
'Error: In {} ({}) for the field control_zone_or_thermostat_location_name could '
'not find a matching HVACTemplate:Zone:Unitary named {}'
.format(
object_type,
object_name,
object_fields.get('control_zone_or_thermostat_location_name')))
elif object_type in ['HVACTemplate:System:Unitary',
'HVACTemplate:System:UnitarySystem'] and \
not object_fields.get('control_zone_or_thermostat_location_name'):
raise InvalidTemplateException(
'Error: control_zone_or_thermostat_location_name must '
'be specified for {} which is a {}'.format(object_name, object_type))
elif object_type == 'HVACTemplate:System:ConstantVolume' and \
object_fields.get('cooling_coil_control_zone_name') and \
object_fields.get('cooling_coil_control_zone_name') not in zones_served:
raise InvalidTemplateException(
'Error: In {} named {} for the field cooling_coil_control_zone_name could '
'not find a matching HVACTemplate:Zone:Unitary named {}'
.format(
object_type,
object_name,
object_fields.get('cooling_coil_control_zone_name')))
elif object_type == 'HVACTemplate:System:ConstantVolume' and \
object_fields.get('heating_coil_control_zone_name') and \
object_fields.get('heating_coil_control_zone_name') not in zones_served:
raise InvalidTemplateException(
'Error: In {} named {} for the field heating_coil_control_zone_name could '
'not find a matching HVACTemplate:Zone:Unitary named {}'
.format(
object_type,
object_name,
object_fields.get('heating_coil_control_zone_name')))
# check vrf master thermostat referenced zone
if object_type in ['HVACTemplate:System:VRF', ]:
for object_name, object_fields in object_structure.items():
try:
zone_system_field = self._get_zone_template_field_from_system_type(object_type)
except InvalidTemplateException:
continue
try:
zones_served = [
zone_fields.get('zone_name') for zone_type, zone_structure in epjson.items()
if re.match(r'HVACTemplate:Zone:.*', zone_type)
for zone_template_name, zone_fields in zone_structure.items()
if zone_fields.get(zone_system_field) == object_name]
except AttributeError:
raise InvalidTemplateException('Error: No HVACTemplate:Zone:Unitary template objects reference'
' the {} object'.format(object_type))
if object_fields.get('master_thermostat_priority_control_type') == \
'MasterThermostatPriority' and \
object_fields.get('zone_name_for_master_thermostat_location') not in zones_served:
raise InvalidTemplateException(
'Error: In {} ({}) for the field Zone Name for '
'Master Thermostat Location could not find a matching '
'HVACTemplate:Zone:VRF named: {}'
.format(
object_type,
object_name,
object_fields.get('zone_name_for_master_thermostat_location')))
if object_fields.get('master_thermostat_priority_control_type') == 'Scheduled' and \
not object_fields.get('thermostat_priority_schedule_name'):
raise InvalidTemplateException(
'Error: In {} ({}) the Master Thermostat '
'Priority Control Type = Scheduled, but the Thermostat Priority Schedule Name '
'is blank.'.format(object_type, object_name))
self.merge_epjson(
super_dictionary=self.templates_systems,
object_dictionary={object_type: object_structure},
unique_name_override=False)
elif re.match('^HVACTemplate:Plant:(ChilledWater|HotWater|MixedWater)Loop$', object_type):
if len(object_structure.keys()) > 1:
self.logger.warning('Warning: Only one {} allowed per file.'.format(object_type))
plant_loop_default_map = {
'HVACTemplate:Plant:ChilledWaterLoop': {
'chilled_water_design_setpoint': 7.22,
'condenser_water_design_setpoint': 29.4,
'chilled_water_pump_configuration': 'ConstantPrimaryNoSecondary',
'chilled_water_setpoint_at_outdoor_dry_bulb_low': 12.2,
'chilled_water_reset_outdoor_dry_bulb_low': 15.6,
'chilled_water_setpoint_at_outdoor_dry_bulb_high': 6.7,
'chilled_water_reset_outdoor_dry_bulb_high': 26.7
},
'HVACTemplate:Plant:HotWaterLoop': {
'hot_water_design_setpoint': 82,
'hot_water_pump_configuration': 'ConstantFlow',
'hot_water_setpoint_at_outdoor_dry_bulb_low': 82.2,
'hot_water_reset_outdoor_dry_bulb_low': -6.7,
'hot_water_setpoint_at_outdoor_dry_bulb_high': 65.6,
'hot_water_reset_outdoor_dry_bulb_high': 10
},
'HVACTemplate:Plant:MixedWaterLoop': {
'high_temperature_design_setpoint': 33,
'low_temperature_design_setpoint': 20,
'water_pump_configuration': 'ConstantFlow'
}
}
for object_name, object_fields in object_structure.items():
# set defaults
selected_default_map = plant_loop_default_map.get(object_type)
if selected_default_map:
for field, default_value in selected_default_map.items():
if not object_fields.get(field):
object_fields[field] = default_value
if object_type == 'HVACTemplate:Plant:HotWaterLoop':
loop_system_list = [
'HVACTemplate:System:VAV', 'HVACTemplate:Zone:FanCoil', 'HVACTemplate:Zone:Unitary',
'HVACTemplate:Zone:PTAC', 'HVACTemplate:Zone:PTHP', 'HVACTemplate:Zone:WaterToAirHeatPump',
'HVACTemplate:System:UnitaryHeatPump:AirToAir', 'HVACTemplate:System:PackagedVAV',
'HVACTemplate:System:DedicatedOutdoorAir', 'HVACTemplate:System:ConstantVolume',
'HVACTemplate:System:DualDuct', 'HVACTemplate:Zone:BaseboardHeat',
'HVACTemplate:System:UnitarySystem', 'HVACTemplate:System:VRF']
if not any(hwlst in loop_system_list for hwlst in epjson.keys()):
self.logger.warning(
'Warning: You must specify at least one {} '
'if a HVACTemplate:Plant:HotWaterLoop is defined.'
.format(' or '.join(loop_system_list)))
if object_type == 'HVACTemplate:Plant:ChilledWaterLoop':
loop_system_list = [
'HVACTemplate:System:VAV', 'HVACTemplate:Zone:FanCoil',
'HVACTemplate:System:DedicatedOutdoorAir', 'HVACTemplate:System:ConstantVolume',
'HVACTemplate:System:DualDuct', 'HVACTemplate:System:UnitarySystem']
if not any(hwlst in loop_system_list for hwlst in epjson.keys()):
self.logger.warning(
'Warning: You must specify at least one {} '
'if a HVACTemplate:Plant:ChilledWaterLoop is defined.'
.format(' or '.join(loop_system_list)))
if object_type == 'HVACTemplate:Plant:MixedWaterLoop':
loop_system_list = [
'HVACTemplate:Zone:WaterToAirHeatPump', 'HVACTemplate:System:VRF',
'HVACTemplate:System:UnitarySystem']
if not any(hwlst in loop_system_list for hwlst in epjson.keys()):
self.logger.warning(
'Warning: You must specify at least one {} '
'if a HVACTemplate:Plant:MixedWaterLoop is defined.'
.format(' or '.join(loop_system_list)))
if 'HVACTemplate:Plant:HotWaterLoop' in epjson.keys():
self.logger.warning(
'Warning: In {}'
' an HVACTemplate:Plant:HotWaterLoop is also present. All boilers with blank Template '
'Loop Type field will be connected to the Hot Water Loop.'
.format(object_type))
self.merge_epjson(
super_dictionary=self.templates_plant_loops,
object_dictionary={object_type: object_structure},
unique_name_override=False)
elif re.match('^HVACTemplate:Plant:(Chiller|Tower|Boiler)(:ObjectReference)*$', object_type):
boiler_default_map = {
'HVACTemplate:Plant:Boiler': {
'fuel_type': 'NaturalGas',
'priority': '1',
'efficiency': 0.8,
'water_outlet_upper_temperature_limit': 100.0
},
'HVACTemplate:Plant:Boiler:ObjectReference': {
'boiler_object_type': 'Boiler:HotWater',
'priority': '1'
},
'HVACTemplate:Plant:Chiller': {
'condenser_type': 'WaterCooled'
},
'HVACTemplate:Plant:Chiller:ObjectReference': {
'chiller_object_type': 'Chiller:Electric:EIR',
'priority': '1'
},
'HVACTemplate:Plant:Tower:ObjectReference': {
'cooling_tower_object_type': 'CoolingTower:SingleSpeed'
},
}
for object_name, object_fields in object_structure.items():
# set defaults
selected_default_map = boiler_default_map.get(object_type)
if selected_default_map:
for field, default_value in selected_default_map.items():
if not object_fields.get(field):
object_fields[field] = default_value
# Check boiler inputs
if object_type == 'HVACTemplate:Plant:Boiler':
for object_name, object_fields in object_structure.items():
if not object_fields.get('fuel_type') and \
object_fields.get('boiler_type') != 'DistrictHotWater':
raise InvalidTemplateException(
'Error: In {} ({}) fuel_type must be specified when boiler_type is not '
'DistrictHotWater'.format(object_type, object_name))
# Check tower inputs
if object_type == 'HVACTemplate:Plant:Tower':
for object_name, object_fields in object_structure.items():
high_speed_nominal_capacity = object_fields.get('high_speed_nominal_capacity', 'Autosize')
free_convection_capacity = object_fields.get('free_convection_capacity', 'Autosize')
if (str(high_speed_nominal_capacity).lower() == 'autosize' and str(
free_convection_capacity).lower() != 'autosize') or \
(str(high_speed_nominal_capacity).lower() != 'autosize' and str(
free_convection_capacity).lower() == 'autosize'):
raise InvalidTemplateException(
'Error: In {} ({}) For a {} tower the high speed capacity and free '
'convection capacity both need to be specified or set to autosize.'
.format(object_type, object_name, object_fields.get('tower_type')))
# for plant equipment object references, add the referenced object to epjson for complex input resolution
# later on. For chiller objects, also identify condenser type and make it a template attribute.
elif object_type == 'HVACTemplate:Plant:Boiler:ObjectReference':
for object_name, object_fields in object_structure.items():
reference_object_structure = epjson.get(object_fields['boiler_object_type'])
if not reference_object_structure:
raise InvalidTemplateException(
'Error: In {} ({}) Referenced boiler not found: {}'
.format(object_type, object_name, object_fields))
for reference_object_name, reference_object_fields in reference_object_structure.items():
if reference_object_name == object_fields['boiler_name']:
if reference_object_fields.get('boiler_water_inlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank Inlet Node Name found in referenced boiler: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('boiler_water_outlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank Outlet Node Name found in referenced boiler: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('boiler_water_inlet_node_name') == \
reference_object_fields.get('boiler_water_outlet_node_name'):
raise InvalidTemplateException(
'Error: in {} ({}) Duplicate hot water node name found in '
'referenced boiler. All boiler inlet and outlet node names '
'must be unique'
.format(object_type, object_name))
object_structure[object_name]['epjson'] = \
{object_fields['boiler_object_type']: {reference_object_name: reference_object_fields}}
break
if not object_structure[object_name].get('epjson'):
raise InvalidTemplateException(
'Error: In {} ({}) Referenced boiler not found: {}'
.format(object_type, object_name, object_fields))
elif object_type == 'HVACTemplate:Plant:Chiller:ObjectReference':
for object_name, object_fields in object_structure.items():
reference_object_structure = epjson.get(object_fields['chiller_object_type'])
if not reference_object_structure:
raise InvalidTemplateException(
'Error: In {} ({}) Referenced chiller not found: {}'
.format(object_type, object_name, object_fields))
for reference_object_name, reference_object_fields in reference_object_structure.items():
if reference_object_name == object_fields['chiller_name']:
if reference_object_fields.get('chilled_water_inlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank chilled water Inlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('chilled_water_outlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank chilled water Outlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('chilled_water_inlet_node_name') == \
reference_object_fields.get('chilled_water_outlet_node_name'):
raise InvalidTemplateException(
'Error: in {} ({}) Duplicate chilled water node name found in '
'referenced chiller. All chiller inlet and outlet node names '
'must be unique'
.format(object_type, object_name))
try:
object_structure[object_name]['condenser_type'] = reference_object_fields['condenser_type']
except (KeyError, AttributeError):
object_structure[object_name]['condenser_type'] = 'WaterCooled'
if object_structure[object_name]['condenser_type'] == 'WaterCooled':
if reference_object_fields.get('condenser_inlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank condenser water Inlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('condenser_outlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank condenser water Outlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('condenser_inlet_node_name') == \
reference_object_fields.get('condenser_outlet_node_name'):
raise InvalidTemplateException(
'Error: in {} ({}) Duplicate condenser water node name found in '
'referenced chiller. All chiller inlet and outlet node names '
'must be unique'
.format(object_type, object_name))
object_structure[object_name]['epjson'] = \
{object_fields['chiller_object_type']: {reference_object_name: reference_object_fields}}
break
if not object_structure[object_name].get('epjson'):
raise InvalidTemplateException(
'Error: In {} ({}) Referenced chiller not found: {}'
.format(object_type, object_name, object_fields))
elif object_type == 'HVACTemplate:Plant:Tower:ObjectReference':
for object_name, object_fields in object_structure.items():
reference_object_structure = epjson.get(object_fields['cooling_tower_object_type'])
if not reference_object_structure:
raise InvalidTemplateException(
'Error: In {} ({}) Referenced tower not found: {}'
.format(object_type, object_name, object_fields))
for reference_object_name, reference_object_fields in reference_object_structure.items():
if reference_object_name == object_fields['cooling_tower_name']:
if reference_object_fields.get('water_inlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank Inlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('water_outlet_node_name', '') in ['', 'None']:
raise InvalidTemplateException(
'Error: In {} ({}) Blank Outlet Node Name found in '
'referenced chiller: {}'
.format(object_type, object_name, object_fields))
if reference_object_fields.get('water_inlet_node_name') == \
reference_object_fields.get('water_outlet_node_name'):
raise InvalidTemplateException(
'Error: in {} ({}) Duplicate node name found in referenced tower. '
'All tower inlet and outlet node names must be unique'
.format(object_type, object_name))
object_structure[object_name]['epjson'] = \
{object_fields['cooling_tower_object_type']: {reference_object_name: reference_object_fields}}
break
if not object_structure[object_name].get('epjson'):
raise InvalidTemplateException(
'Error: In {} ({}) Referenced tower not found: {}'
.format(object_type, object_name, object_fields))
self.merge_epjson(
super_dictionary=self.templates_plant_equipment,
object_dictionary={object_type: object_structure},
unique_name_override=False)
else:
raise InvalidTemplateException(
'Error: Template object type {} was not recognized'.format(object_type))
# store original templates into dictionary
self.merge_epjson(
super_dictionary=self.templates,
object_dictionary={object_type: object_structure},
unique_name_override=False)
else:
# store all non-template objects into a base epjson object.
self.merge_epjson(
super_dictionary=self.base_objects,
object_dictionary={object_type: object_structure},
unique_name_override=False)
return
def _expand_templates(self, templates, expand_class, **kwargs):
"""
Run Expand operations on multiple templates
:param templates: dictionary of HVACTemplate:.* objects
:param expand_class: ExpandObjects child class to operate on template (e.g. ExpandZone).
:return: dictionary of expanded objects with unique name as key
"""
expanded_template_dictionary = {}
templates = self.epjson_genexp(templates)
for template in templates:
(_, template_structure), = template.items()
(template_name, template_fields), = template_structure.items()
external_epjson_objects = template_fields.pop('epjson', None)
expanded_template = expand_class(
template=template,
epjson=external_epjson_objects,
logger_level=self.logger_level,
logger_name=self.logger_name,
**kwargs).run()
expanded_template_dictionary[template_name] = expanded_template
return expanded_template_dictionary
def _create_zonecontrol_thermostat(self, zone_class_object):
"""
Create ZoneControl:Thermostat objects. This operations is performed outside of ExpandObjects because it
requires cross-referencing between HVACTemplate:Zone and HVACTemplate:Thermostat objects
:param zone_class_object: ExpandZone object
:return: Updated class epJSON dictionary with ThermostatSetpoint objects added. Objects are also added
to the class self.epsjon dictionary.
"""
# Retreive the thermostat object
try:
thermostat_template_name = getattr(zone_class_object, 'template_thermostat_name')
except AttributeError:
self.logger.info(
'In {} ({}) Zone object does not reference a thermostat class object'
.format(zone_class_object.template_type, zone_class_object.unique_name))
return
except ValueError:
raise InvalidTemplateException('Error: Zone template ({}) is improperly formatted.'
.format(zone_class_object.unique_name))
try:
thermostat_object = self.expanded_thermostats[thermostat_template_name]
except (ValueError, KeyError):
raise InvalidTemplateException('Error: Thermostat object does not exist ({}) but is reference by '
'zone template {}'
.format(thermostat_template_name, zone_class_object.unique_name))
# Evaluate the thermostat type in the thermostat object and format the output object accordingly
try:
zone_name = getattr(zone_class_object, 'zone_name')
thermostat_epjson = {t_type: t_struct for t_type, t_struct
in thermostat_object.epjson.items()
if re.match(r'^ThermostatSetpoint.*', t_type)}
(thermostat_type, thermostat_structure), = thermostat_epjson.items()
(thermostat_name, _), = thermostat_structure.items()
# create control schedule based on thermostat type
if thermostat_type == "ThermostatSetpoint:SingleHeating":
control_schedule = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)\
.build_compact_schedule(
structure_hierarchy=['Objects', 'Common', 'Objects', 'Schedule', 'Compact', 'ALWAYS_VAL'],
insert_values=[1, ])
elif thermostat_type == "ThermostatSetpoint:SingleCooling":
control_schedule = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)\
.build_compact_schedule(
structure_hierarchy=['Objects', 'Common', 'Objects', 'Schedule', 'Compact', 'ALWAYS_VAL'],
insert_values=[2, ])
elif thermostat_type == "ThermostatSetpoint:DualSetpoint":
control_schedule = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)\
.build_compact_schedule(
structure_hierarchy=['Objects', 'Common', 'Objects', 'Schedule', 'Compact', 'ALWAYS_VAL'],
insert_values=[4, ])
else:
raise InvalidTemplateException("Error: {} ({}) Invalid thermostat type set in ExpandThermostat"
.format(thermostat_type, thermostat_object.unique_name))
# create zonecontrol object
(_, schedule_structure), = control_schedule.items()
(schedule_name, _), = schedule_structure.items()
zonecontrol_thermostat = {
"ZoneControl:Thermostat": {
"{} Thermostat".format(zone_name): {
"control_1_name": thermostat_name,
"control_1_object_type": thermostat_type,
"control_type_schedule_name": schedule_name,
"zone_or_zonelist_name": "{}".format(zone_name)
}
}
}
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=dict(control_schedule, **zonecontrol_thermostat),
unique_name_override=True
)
return dict(control_schedule, **zonecontrol_thermostat)
except (ValueError, AttributeError, KeyError):
raise InvalidTemplateException(
"Error: HVACTemplate failed to build ZoneControl:Thermostat from zone template "
"{}".format(zone_class_object.unique_name)) # pragma: no cover - catchall
@staticmethod
def _get_zone_template_field_from_system_type(template_type):
"""
Retrieve the corresponding zone field name for a system template type
:param template_type: HVACTemplate:System object type
:return: zone field name
"""
# get the zone field_name that will identify the system template name
if re.match(r'HVACTemplate:System:ConstantVolume', template_type):
zone_system_template_field_name = 'template_constant_volume_system_name'
elif re.match(r'HVACTemplate:System:DedicatedOutdoorAir', template_type):
zone_system_template_field_name = 'dedicated_outdoor_air_system_name'
elif re.match(r'HVACTemplate:System:DualDuct', template_type):
zone_system_template_field_name = 'template_dual_duct_system_name'
elif re.match(r'HVACTemplate:System:Unitary.*', template_type):
zone_system_template_field_name = 'template_unitary_system_name'
elif re.match(r'HVACTemplate:System:.*VAV$', template_type):
zone_system_template_field_name = 'template_vav_system_name'
elif re.match(r'HVACTemplate:System:VRF', template_type):
zone_system_template_field_name = 'template_vrf_system_name'
else:
raise InvalidTemplateException(
"Error: Invalid system type passed to supply path creation function: {}".format(template_type))
return zone_system_template_field_name
def _create_system_path_connection_objects(self, system_class_object, expanded_zones):
"""
Create objects connecting system supply air to zone objects. An AirLoopHVAC:SupplyPath object is created with
either an AirLoopHVAC:SupplyPlenum or an AirLoopHVAC:ZoneSplitter object. The same is true for
AirLoopHVAC:ReturnPath and AirLoopHVAC:ReturnPlenum/AirLoopHVAC:ZoneMixer.
:param system_class_object: Expanded HVACTemplate:System:.* class object
:param expanded_zones: dictionary of ExpandZone objects
:return: system supply air connection objects. AirLoopHVAC:SupplyPath object and either
AirLoopHVAC:SupplyPlenum or AirLoopHVAC:ZoneSplitter object as well ass AirLoopHVAC:ReturnPath and either
AirLoopHVAC:ReturnPlenum or AirLoopHVAC:ZoneMixer.
"""
zone_system_template_field_name = \
self._get_zone_template_field_from_system_type(template_type=system_class_object.template_type)
# iterate over inlet node name types. For DualDuct, this is two entries (hot/cold). For all other systems,
# this is a single value
if system_class_object.template_type == 'HVACTemplate:System:DualDuct':
inlet_nodes = ['cold_air_inlet_node_name', 'hot_air_inlet_node_name']
else:
inlet_nodes = ['air_inlet_node_name', ]
# create ExpandObjects class object to use some yaml and epjson functions
eo = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)
eo.unique_name = getattr(system_class_object, 'template_name')
# iterate over expanded zones and if the system reference field exists, and is for the referenced system,
# append them in the splitter and mixer lists
zone_return_plenums = []
zone_induced_air_nodes = []
for node_idx, inlet_node in enumerate(inlet_nodes):
zone_splitters = []
zone_mixers = []
zone_supply_plenums = []
for _, ez in expanded_zones.items():
if getattr(ez, zone_system_template_field_name, None) == system_class_object.template_name:
if getattr(ez, 'flow_type', None) in ['SeriesFromPlenum', 'ParallelFromPlenum']:
zone_induced_air_node = ez.unique_name
else:
zone_induced_air_node = None
if getattr(ez, 'supply_plenum_name', None) or (
getattr(ez, 'cold_supply_plenum_name', None) and inlet_node == 'cold_air_inlet_node_name') or (
getattr(ez, 'hot_supply_plenum_name', None) and inlet_node == 'hot_air_inlet_node_name'):
try:
zone_supply_equipment = {'AirLoopHVAC:SupplyPlenum': ez.epjson['AirLoopHVAC:SupplyPlenum']}
except (KeyError, AttributeError):
raise InvalidTemplateException(
'Error: supply_plenum_name indicated for zone template {} but '
'AirLoopHVAC:SupplyPlenum was not created'.format(ez.unique_name))
else:
zone_supply_equipment = self.get_epjson_objects(
epjson=ez.epjson,
object_type_regexp=r'^AirTerminal:.*')
try:
(zone_supply_equipment_type, zone_supply_equipment_structure), = zone_supply_equipment.items()
(zone_supply_equipment_name, zone_supply_equipment_fields), = zone_supply_equipment_structure.items()
if zone_supply_equipment_type == 'AirLoopHVAC:SupplyPlenum':
outlet_node_name = zone_supply_equipment_fields['inlet_node_name']
zone_supply_plenums.append({
'component_name': zone_supply_equipment_name,
'component_object_type': zone_supply_equipment_type
})
elif zone_supply_equipment_type in ['AirTerminal:SingleDuct:SeriesPIU:Reheat',
'AirTerminal:SingleDuct:ParallelPIU:Reheat']:
# Raise error if inlet node name is overridden for multi-inlet node systems (DualDuct)
if len(inlet_nodes) > 1:
raise InvalidTemplateException(
'Error: Series or Parallel PIU is being referenced '
'by an invalid system {}'.format(system_class_object.template_type))
outlet_node_name = zone_supply_equipment_fields['supply_air_inlet_node_name']
else:
outlet_node_name = zone_supply_equipment_fields[inlet_node]
except (KeyError, AttributeError, ValueError):
raise InvalidTemplateException(
'Error: Search for zone equipment from Supply Path creation failed for '
'outlet node. system {}, zone {}, zone equipment {}'
.format(system_class_object.template_name, ez.unique_name, zone_supply_equipment))
if getattr(ez, 'return_plenum_name', None):
try:
zone_return_equipment = {'AirLoopHVAC:ReturnPlenum': ez.epjson['AirLoopHVAC:ReturnPlenum']}
except (KeyError, AttributeError):
raise InvalidTemplateException(
'Error: return_plenum_name indicated for zone template {} but '
'AirLoopHVAC:ReturnPlenum was not created'.format(ez.unique_name))
else:
try:
zone_return_equipment = {'ZoneHVAC:EquipmentConnections': ez.epjson['ZoneHVAC:EquipmentConnections']}
except (KeyError, AttributeError, ValueError):
raise InvalidTemplateException(
'Error: Search for ZoneHVAC:EquipmentConnections object from Supply '
'Path creation failed for inlet node. system {}, zone {}'
.format(system_class_object.template_name, ez.unique_name))
try:
(zone_return_equipment_type, zone_return_equipment_structure), = zone_return_equipment.items()
(zone_return_equipment_name, zone_return_equipment_fields), = zone_return_equipment_structure.items()
if zone_return_equipment_type == 'AirLoopHVAC:ReturnPlenum':
inlet_node_name = zone_return_equipment_fields['outlet_node_name']
# use node_idx to prevent multiple zone_return_plenum objects from being created in dualduct zones
if node_idx == 0:
zone_return_plenums.append({
'component_name': zone_return_equipment_name,
'component_object_type': zone_return_equipment_type
})
else:
inlet_node_name = zone_return_equipment_fields['zone_return_air_node_or_nodelist_name']
except (KeyError, AttributeError, ValueError):
raise InvalidTemplateException(
'Error: Search for zone equipment from Return Path creation failed for '
'inlet node. system {}, zone {}, zone equipment {}'
.format(system_class_object.template_name, ez.unique_name, zone_return_equipment))
zone_splitters.append(
{
"outlet_node_name": outlet_node_name
}
)
zone_mixers.append(
{
"inlet_node_name": inlet_node_name
}
)
if zone_induced_air_node:
# This is for PIU objects that use SeriesFromPlenum or ParallelFromPlenum
zone_induced_air_nodes.append(
{
"node_name": '{} Return'.format(zone_induced_air_node)
}
)
# create plenums or spliters/mixers, depending on template inputs
supply_object = None
supply_plenum_name = getattr(system_class_object, 'supply_plenum_name', None)
cold_supply_plenum_name = getattr(system_class_object, 'cold_supply_plenum_name', None)
hot_supply_plenum_name = getattr(system_class_object, 'hot_supply_plenum_name', None)
if system_class_object.template_type == 'HVACTemplate:System:DualDuct' and \
cold_supply_plenum_name and inlet_node.startswith('cold_air'):
eo.cold_supply_plenum_name = cold_supply_plenum_name
cold_supply_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'SupplyPlenum', 'DualDuct', 'Cold'])
cold_supply_object['nodes'] = zone_splitters
supply_object = {'AirLoopHVAC:SupplyPlenum': cold_supply_object}
elif system_class_object.template_type == 'HVACTemplate:System:DualDuct' and \
hot_supply_plenum_name and inlet_node.startswith('hot_air'):
eo.hot_supply_plenum_name = hot_supply_plenum_name
hot_supply_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'SupplyPlenum', 'DualDuct', 'Hot'])
hot_supply_object['nodes'] = zone_splitters
supply_object = {'AirLoopHVAC:SupplyPlenum': hot_supply_object}
elif supply_plenum_name:
# set return plenum name attribute for transition and mapping processing
eo.supply_plenum_name = supply_plenum_name
supply_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'SupplyPlenum', 'Base'])
supply_object['nodes'] = zone_splitters
supply_object = {'AirLoopHVAC:SupplyPlenum': supply_object}
else:
supply_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'ZoneSplitter', 'Base'])
supply_object['nodes'] = zone_splitters
supply_object = {'AirLoopHVAC:ZoneSplitter': supply_object}
# Add Path objects
supply_path_object = {'AirLoopHVAC:SupplyPath':
eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'SupplyPath', 'Base'])}
# add zone supply plenums if they were created
if zone_supply_plenums:
(_, supply_path_object_fields), = supply_path_object.items()
supply_path_object_fields['components'].extend(zone_supply_plenums)
# Rename objects if multi-inlet node system is used
if system_class_object.template_type == 'HVACTemplate:System:DualDuct':
(_, supply_object_fields), = supply_object.items()
(_, supply_path_object_fields), = supply_path_object.items()
if inlet_node.startswith('cold_air'):
supply_object_fields['name'] = supply_object_fields['name'].replace('{}', '{} Cold')
supply_object_fields['inlet_node_name'] = supply_object_fields['inlet_node_name'].replace('{}', '{} Cold')
supply_path_object_fields['name'] = supply_path_object_fields['name'].replace('{}', '{} Cold')
if inlet_node.startswith('hot_air'):
supply_object_fields['name'] = supply_object_fields['name'].replace('{}', '{} Hot')
supply_object_fields['inlet_node_name'] = supply_object_fields['inlet_node_name'].replace('{}', '{} Hot')
supply_path_object_fields['name'] = supply_path_object_fields['name'].replace('{}', '{} Hot')
path_dictionary = eo.yaml_list_to_epjson_dictionaries(
yaml_list=[supply_object, supply_path_object])
resolved_path_dictionary = eo.resolve_objects(epjson=path_dictionary)
# save output to class epsjon
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=resolved_path_dictionary)
# Create return objects
return_plenum_name = getattr(system_class_object, 'return_plenum_name', None)
return_nodelist = {}
if return_plenum_name:
# set return plenum name attribute for transition and mapping processing
eo.return_plenum_name = return_plenum_name
return_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'ReturnPlenum', 'Base'])
return_object['nodes'] = zone_mixers
return_object = {'AirLoopHVAC:ReturnPlenum': return_object}
if zone_induced_air_nodes:
return_object['AirLoopHVAC:ReturnPlenum']['induced_air_outlet_node_or_nodelist_name'] = \
'{} Induced Air Nodes'.format(system_class_object.template_name)
return_nodelist = {
'NodeList': {
'name': '{} Induced Air Nodes'.format(system_class_object.template_name),
"nodes": zone_induced_air_nodes
}
}
else:
return_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'ZoneMixer', 'Base'])
return_object['nodes'] = zone_mixers
return_object = {'AirLoopHVAC:ZoneMixer': return_object}
# Add Path objects
return_path_object = {
'AirLoopHVAC:ReturnPath':
eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'AirLoopHVAC', 'ReturnPath', 'Base'])}
# add zone return plenums if they were created
if zone_return_plenums:
(_, return_path_object_fields), = return_path_object.items()
# only take the first item, subsequent items are only duplicates from dualduct zone templates
return_path_object_fields['components'] = zone_return_plenums + return_path_object_fields['components']
path_dictionary = eo.yaml_list_to_epjson_dictionaries(
yaml_list=[return_object, return_path_object, return_nodelist])
resolved_path_dictionary = eo.resolve_objects(epjson=path_dictionary)
# save output to class epsjon
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=resolved_path_dictionary)
return resolved_path_dictionary
def _create_system_vrf_path_connection_objects(self, system_class_object, expanded_zones):
"""
Create objects connecting VRF system to zone objects.
:param system_class_object: Expanded HVACTemplate:System:.* class object
:param expanded_zones: dictionary of ExpandZone objects
:return: system supply air connection objects. AirLoopHVAC:SupplyPath object and either
AirLoopHVAC:SupplyPlenum or AirLoopHVAC:ZoneSplitter object as well ass AirLoopHVAC:ReturnPath and either
AirLoopHVAC:ReturnPlenum or AirLoopHVAC:ZoneMixer.
"""
# create ExpandObjects class object to use some yaml and epjson functions
eo = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)
eo.unique_name = getattr(system_class_object, 'template_name')
vrf_object_name_list = []
zone_system_template_field_name = \
self._get_zone_template_field_from_system_type(template_type=system_class_object.template_type)
for _, ez in expanded_zones.items():
if getattr(ez, zone_system_template_field_name, None) == system_class_object.template_name:
try:
vrf_object = ez.epjson['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']
(vrf_object_name, _), = vrf_object.items()
except (KeyError, AttributeError):
raise InvalidTemplateException(
"Error: VRF zone template {} expanded with no "
"ZoneHVAC:TerminalUnit:VariableRefrigerantFlow object".format(ez.unique_name))
except ValueError:
raise InvalidTemplateException(
'ZoneHVAC:TerminalUnit:VariableRefrigerantFlow object incorrectly formatted: {}'
.format(ez.epjson.get('ZoneHVAC:TerminalUnit:VariableRefrigerantFlow', 'None')))
vrf_object_name_list.append({'zone_terminal_unit_name': vrf_object_name})
if vrf_object_name_list:
vrf_terminal_object = eo.get_structure(structure_hierarchy=[
'AutoCreated', 'System', 'ZoneTerminalUnitList', 'Base'])
vrf_terminal_object['terminal_units'] = vrf_object_name_list
path_dictionary = eo.yaml_list_to_epjson_dictionaries(
yaml_list=[{'ZoneTerminalUnitList': vrf_terminal_object}, ])
resolved_path_dictionary = eo.resolve_objects(epjson=path_dictionary)
# save output to class epsjon
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=resolved_path_dictionary)
else:
raise InvalidTemplateException(
'Error: Failed to create VRF terminal unit list for {}'.format(system_class_object.template_name))
return
def _create_templates_from_plant_equipment(self, plant_equipment_class_object, expanded_plant_loops):
"""
Create plant and platn equipment loop templates from ExpandPlantEquipment object attributes.
These outputs will be used as inputs to the initialize new ExpandPlantLoop and ExpandPlantLoopEquipment classes.
This process must be performed because ExpandPlantLoop must be
run before ExpandPlantEquipment. However, certain equipment inputs can cause for new loops to be created.
:param plant_equipment_class_object: ExpandPlantEquipment class object
:param expanded_plant_loops: ExpandPlantLoop objects
:return: Array of Dictionary of HVAC:Template:Plant template objects to create an ExpandPlantLoop object
"""
# create dictionary to store plant loops
plant_loop_dictionary = {}
plant_equipment_dictionary = {}
# get each loop type specified in the existing plant loop class objects
plant_loops = [getattr(pl, 'template_type').lower() for pl in expanded_plant_loops.values()]
# create condenser water loop for water cooled condensers
if getattr(plant_equipment_class_object, 'template_type', None).lower() in \
['hvactemplate:plant:chiller', 'hvactemplate:plant:chiller:objectreference'] \
and getattr(plant_equipment_class_object, 'condenser_type', 'WaterCooled').lower() == 'watercooled' \
and 'hvactemplate:plant:condenserwaterloop' not in plant_loops \
and getattr(plant_equipment_class_object, 'chiller_type', None) != 'DistrictChilledWater':
# try to get the chilled water loop attributes to transition to condenser water
chw_loop = [
pl for pl
in expanded_plant_loops.values()
if getattr(pl, 'template_type').lower() == 'hvactemplate:plant:chilledwaterloop']
cndw_attributes = {}
# transfer ChilledWaterLoop attributes to CondenserWaterLoop
if chw_loop:
for cndw_attribute, chw_attribute in zip(
['condenser_water_pump_rated_head', 'condenser_water_design_setpoint',
'condenser_plant_operation_scheme_type', 'condenser_equipment_operation_schemes_name',
'condenser_water_temperature_control_type', 'condenser_water_setpoint_schedule_name',
'pump_schedule_name', 'pump_control_type', 'condenser_water_pump_type',
'condenser_water_supply_side_bypass_pipe', 'condenser_water_demand_side_bypass_pipe',
'condenser_water_load_distribution_scheme'],
['condenser_water_pump_rated_head', 'condenser_water_design_setpoint',
'condenser_plant_operation_scheme_type', 'condenser_equipment_operation_schemes_name',
'condenser_water_temperature_control_type', 'condenser_water_setpoint_schedule_name',
'pump_schedule_name', 'pump_control_type', 'condenser_water_pump_type',
'condenser_water_supply_side_bypass_pipe', 'condenser_water_demand_side_bypass_pipe',
'condenser_water_load_distribution_scheme']):
try:
cndw_attributes[cndw_attribute] = getattr(chw_loop[0], chw_attribute)
except AttributeError:
self.logger.debug('Chilled water attribute {} not set by user, using default for '
'condenser water'.format(chw_attribute))
cndw_attributes['template_plant_loop_type'] = 'CondenserWaterLoop'
self.merge_epjson(
super_dictionary=plant_loop_dictionary,
object_dictionary={
'HVACTemplate:Plant:CondenserWaterLoop': {
'Condenser Water Loop': cndw_attributes
}
})
# append plant loop to list to prevent another one being added.
plant_loops.append('hvactemplate:plant:condenserwaterloop')
return plant_loop_dictionary, plant_equipment_dictionary
def _create_additional_plant_loops_and_equipment_from_equipment(
self,
expanded_plant_equipment,
expanded_plant_loops):
"""
Create additional HVACTemplate:Plant:.*Loops based on HVACTemplate:Plant:(Chiller|Tower|Boiler) inputs
:param expanded_plant_equipment: ExpandPlantEquipment objects
:param expanded_plant_loops: ExpandPlantLoop objects
:return: Additional plant loop and equipment templates and objects added to expanded classes attributes
"""
# create deepcopy to iterate over because the expanded_plant_equipment object may change size during iteration
epe = copy.deepcopy(expanded_plant_equipment)
for epl_name, epl in epe.items():
plant_loop_template, plant_equipment_template = self._create_templates_from_plant_equipment(
plant_equipment_class_object=epl,
expanded_plant_loops=expanded_plant_loops)
# If a plant loop was created, reprocess it here.
if plant_loop_template:
# add new plant loop to the templates
for tmpl in [self.templates, self.templates_plant_loops]:
self.merge_epjson(
super_dictionary=tmpl,
object_dictionary=plant_loop_template
)
# Expand new plant loop and add to the class objects
additional_plant_loops = self._expand_templates(
templates=plant_loop_template,
expand_class=ExpandPlantLoop
)
try:
for expanded_name, expanded_object in additional_plant_loops.items():
if expanded_name not in expanded_plant_loops.keys():
expanded_plant_loops[expanded_name] = expanded_object
except (AttributeError, ValueError):
InvalidTemplateException(
'Error: A Plant loop was specified to be created from a plant equipment object '
'{}, but the process failed to attach the created objects'.format(epl_name))
# if a plant equipment template was created, process it here
if plant_equipment_template:
# add new plant equipment to the templates
for tmpl in [self.templates, self.templates_plant_equipment]:
self.merge_epjson(
super_dictionary=tmpl,
object_dictionary=plant_equipment_template
)
# Expand new plant equipment and add to the class objects
# pass updated expanded_plant_loops to the class initialization as well.
additional_plant_equipment = self._expand_templates(
templates=plant_equipment_template,
expand_class=ExpandPlantEquipment,
plant_loop_class_objects=expanded_plant_loops
)
try:
for expanded_name, expanded_object in additional_plant_equipment.items():
if expanded_name not in expanded_plant_loops.keys():
expanded_plant_equipment[expanded_name] = expanded_object
except (AttributeError, ValueError):
raise InvalidTemplateException(
'Error: A Plant equipment was specified to be created from a plant '
'equipment object {}, but the process failed to attach the create objects'.format(epl_name))
return
@staticmethod
def _get_plant_equipment_waterloop_branches_by_loop_type(
plant_loop_class_object,
expanded_plant_equipment):
"""
Extract plant equipment branches by loop type and store in epJSON formatted dictionary
:param plant_loop_class_object: ExpandPlantLoop object
:param expanded_plant_equipment: dictionary of ExpandPlantEquipment objects
:return: epJSON formatted dictionary of branch objects for loop connections
"""
branch_dictionary = {}
for pe in expanded_plant_equipment.values():
branch_objects = copy.deepcopy(pe.epjson.get('Branch', {}))
for branch_name, branch_structure in branch_objects.items():
components = branch_structure.get('components')
if not components:
raise InvalidTemplateException(
'Error: In {} ({}) A branch object failed to create component fields {}'
.format(pe.template_type, pe.template_name, branch_name))
# Special handling for chillers with condenser water and chilled water branches
# todo_eo: Currently the chilled and condenser water branches are separated by parsing the names. A more
# robust solution should be investigated.
if pe.template_type in ['HVACTemplate:Plant:Chiller', 'HVACTemplate:Plant:Chiller:ObjectReference'] \
and getattr(pe, 'condenser_type', 'WaterCooled') == 'WaterCooled':
for branch_name, branch_structure in branch_objects.items():
if 'chilledwater' in plant_loop_class_object.template_type.lower() and 'chw' in branch_name.lower():
branch_dictionary.update({branch_name: branch_objects[branch_name]})
if 'condenserwater' in plant_loop_class_object.template_type.lower() and 'cnd' in branch_name.lower():
branch_dictionary.update({branch_name: branch_objects[branch_name]})
# typical handling when all plant equipment branches belong in one loop
elif pe.template_plant_loop_type in plant_loop_class_object.template_type:
branch_dictionary.update(branch_objects)
if branch_dictionary:
return {'Branch': branch_dictionary}
else:
return None
@staticmethod
def _get_zone_system_waterloop_branches_by_loop_type(
plant_loop_class_object,
expanded_zones,
expanded_systems):
"""
Extract zone and system branch objects by loop type and store in epJSON formatted dictionary
:param plant_loop_class_object: ExpandPlantLoop class object
:param expanded_zones: ExpandZone objects
:param expanded_systems: ExpandSystem objects
:return: epJSON formatted dictionary of branch objects
"""
# create list of regex matches for the given loop
if 'chilledwater' in plant_loop_class_object.template_type.lower():
branch_rgx = ['^Coil:Cooling:Water($|:DetailedGeometry)+', ]
elif 'hotwater' in plant_loop_class_object.template_type.lower():
branch_rgx = ['^Coil:Heating:Water($|:DetailedGeometry)+', '^ZoneHVAC:Baseboard.*Water']
elif 'mixedwater' in plant_loop_class_object.template_type.lower():
branch_rgx = ['^Coil:.*HeatPump.*', '^AirConditioner:VariableRefrigerantFlow$']
elif 'condenserwater' in plant_loop_class_object.template_type.lower():
return None
else:
InvalidTemplateException('an invalid loop type was specified when creating plant loop connections: {}'
.format(plant_loop_class_object.template_type))
branch_dictionary = {}
object_list = [expanded_zones or {}, expanded_systems or {}]
for class_object in object_list:
for co in class_object.values():
branch_objects = copy.deepcopy(co.epjson.get('Branch', {}))
for branch_name, branch_structure in branch_objects.items():
# the regex check for 'main branch' is to avoid DualDuct main branches from accidentally being
# included since they have coil objects in them as well. They typical main branch is never accidentally
# caught because the coil objects are never in the 0th position.
for br in branch_rgx:
if re.match(br, branch_structure['components'][0]['component_object_type']) and not \
re.match('.*main branch$', branch_name.lower()):
branch_dictionary.update({branch_name: branch_objects[branch_name]})
if branch_dictionary:
return {'Branch': branch_dictionary}
else:
return None
def _split_supply_and_demand_side_branches(
self,
plant_loop_class_object,
expanded_plant_equipment,
expanded_systems,
expanded_zones):
"""
Separate plant equipment, zone, and system branches into supply and demand sides for a given ExpandPlantLoop
object.
:param plant_loop_class_object: ExpandPlantLoop class object
:param expanded_plant_equipment: expanded dictionary of ExpandPlantEquipment objects
:param expanded_systems: expanded dictionary of ExpandSystem objects
:param expanded_zones: expanded dictionary of ExpandZone objects
:return: tuple of demand and supply side branches for processing
"""
# Get plant equipment, zone, and system branches
plant_equipment_branch_dictionary = self._get_plant_equipment_waterloop_branches_by_loop_type(
plant_loop_class_object=plant_loop_class_object,
expanded_plant_equipment=expanded_plant_equipment
)
zone_system_branch_dictionary = self._get_zone_system_waterloop_branches_by_loop_type(
plant_loop_class_object=plant_loop_class_object,
expanded_zones=expanded_zones,
expanded_systems=expanded_systems
)
# get branches in the loop
demand_branches = {}
# Special handling for condenser water loop where the chiller objects are the demand side.
if 'condenserwater' in plant_loop_class_object.template_type.lower():
pebd = copy.deepcopy(plant_equipment_branch_dictionary)
for object_name, object_structure in plant_equipment_branch_dictionary['Branch'].items():
try:
if re.match(r'Chiller:.*', object_structure['components'][0]['component_object_type']):
demand_branches.update({object_name: pebd['Branch'].pop(object_name)})
except (AttributeError, KeyError):
raise InvalidTemplateException(
'Error: Branch object is incorrectly formatted: {}'.format(plant_equipment_branch_dictionary))
supply_branches = pebd['Branch']
else:
demand_branches = zone_system_branch_dictionary.get('Branch') if zone_system_branch_dictionary else None
supply_branches = plant_equipment_branch_dictionary.get('Branch') \
if plant_equipment_branch_dictionary else None
return demand_branches, supply_branches
def _create_water_loop_connectors_and_nodelist(
self,
plant_loop_class_object,
expanded_plant_equipment,
expanded_zones=None,
expanded_systems=None):
"""
Create Branchlist, Connector, ConnectorList, and supply NodeLists objects that connect the PlantLoop to supply
and demand water objects. This operation is performed outside of ExpandObjects because it requires outputs
from ExpandPlantEquipment, ExpandZone, and ExpandSystem objects.
:param plant_loop_class_object: ExpandPlantLoop class object
:param expanded_plant_equipment: expanded dictionary of ExpandPlantEquipment objects
:param expanded_systems: expanded dictionary of ExpandSystem objects
:param expanded_zones: expanded dictionary of ExpandZone objects
:return: Updated class epjson attribute with Branchlist, Connector, and ConnectorList objects.
"""
# Get plant equipment, zone, and system branches. Split them into demand and supply sides
demand_branches, supply_branches = self._split_supply_and_demand_side_branches(
plant_loop_class_object=plant_loop_class_object,
expanded_plant_equipment=expanded_plant_equipment,
expanded_systems=expanded_systems,
expanded_zones=expanded_zones
)
# check to make sure loops aren't empty
if demand_branches:
if plant_loop_class_object.template_type == 'HVACTemplate:Plant:ChilledWaterLoop':
try:
equipment_types = [
(component[-1]['component_name'], component[-1]['component_object_type']) for
object_name, object_structure in supply_branches.items()
for component in object_structure.values()]
except AttributeError:
raise PyExpandObjectsYamlStructureException(
'Error: In {} ({}) No supply branches found plant loop object'
.format(plant_loop_class_object.template_type, plant_loop_class_object.unique_name))
chillers = [i for i in equipment_types if re.match(r'Chiller:.*', i[1])]
towers = [i for i in equipment_types if re.match(r'CoolingTower:.*', i[1])]
# For water-cooled chillers, the tower is in the condenserloop so that needs to be checked instead of
# the chilledwaterloop
if 'CondenserWaterLoop' in [
ep_structure.template_plant_loop_type for ep_name, ep_structure in expanded_plant_equipment.items()
if ep_structure.template_type in ['HVACTemplate:Plant:Tower',
'HVACTemplate:Plant:Tower:ObjectReference']]:
towers = True
if chillers and not towers and 'CondenserWaterLoop' in [
ep_structure.template_plant_loop_type
for ep_name, ep_structure in expanded_plant_equipment.items()]:
raise InvalidTemplateException(
'Error: In {} ({})'
' there is one or more water cooled chiller(s) but there are no towers serving this loop.'
.format(plant_loop_class_object.template_type, plant_loop_class_object.unique_name))
if not demand_branches or not supply_branches:
msg = []
if not demand_branches:
msg.append('There is no demand-side equipment connected to this loop.')
if not supply_branches:
msg.append('There is no supply-side equipment serving this loop.')
raise InvalidTemplateException(
'Error: in {} ({}). {}'
.format(plant_loop_class_object.template_type, plant_loop_class_object.unique_name,
' '.join(msg)))
# Use ExpandObjects class for helper functions
eo = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)
eo.unique_name = getattr(plant_loop_class_object, 'template_name')
# create connector objects based on template attributes
if (plant_loop_class_object.template_type == 'HVACTemplate:Plant:ChilledWaterLoop' and getattr(
plant_loop_class_object, 'chilled_water_supply_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:CondenserWaterLoop' and getattr(
plant_loop_class_object, 'condenser_water_supply_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:HotWaterLoop' and getattr(
plant_loop_class_object, 'supply_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:MixedWaterLoop' and getattr(
plant_loop_class_object, 'supply_side_bypass_pipe', 'Yes') == 'No'):
supply_branchlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'BranchList', 'SupplyNoBypass'])
connector_supply_mixer = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Mixer', 'SupplyNoBypass'])
connector_supply_splitter = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Splitter', 'SupplyNoBypass'])
# set the 'branches' value type to list if it's none
if not connector_supply_mixer['branches']:
connector_supply_splitter['branches'] = []
if not connector_supply_splitter['branches']:
connector_supply_mixer['branches'] = []
else:
supply_branchlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'BranchList', 'Supply'])
connector_supply_mixer = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Mixer', 'Supply'])
connector_supply_splitter = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Splitter', 'Supply'])
if (plant_loop_class_object.template_type == 'HVACTemplate:Plant:ChilledWaterLoop' and getattr(
plant_loop_class_object, 'chilled_water_demand_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:CondenserWaterLoop' and getattr(
plant_loop_class_object, 'condenser_water_demand_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:HotWaterLoop' and getattr(
plant_loop_class_object, 'demand_side_bypass_pipe', 'Yes') == 'No') or \
(plant_loop_class_object.template_type == 'HVACTemplate:Plant:MixedWaterLoop' and getattr(
plant_loop_class_object, 'demand_side_bypass_pipe', 'Yes') == 'No'):
demand_branchlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'BranchList', 'DemandNoBypass'])
connector_demand_splitter = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Splitter', 'DemandNoBypass'])
connector_demand_mixer = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Mixer', 'DemandNoBypass'])
# set the 'branches' value type to list if it's none
if not connector_demand_mixer['branches']:
connector_demand_splitter['branches'] = []
if not connector_demand_splitter['branches']:
connector_demand_mixer['branches'] = []
else:
demand_branchlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'BranchList', 'Demand'])
connector_demand_splitter = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Splitter', 'Demand'])
connector_demand_mixer = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'Connector', 'Mixer', 'Demand'])
# create supply nodelist
supply_nodelist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'NodeList', 'Supply'])
# apply branches
try:
for branch in demand_branches:
demand_branchlist['branches'].insert(1, {'branch_name': branch})
connector_demand_splitter['branches'].append({'outlet_branch_name': branch})
connector_demand_mixer['branches'].append({'inlet_branch_name': branch})
for branch in supply_branches:
supply_branchlist['branches'].insert(1, {'branch_name': branch})
connector_supply_splitter['branches'].insert(-1, {'outlet_branch_name': branch})
connector_supply_mixer['branches'].insert(-1, {'inlet_branch_name': branch})
supply_nodelist['nodes'].insert(
0,
{'node_name': supply_branches[branch]['components'][-1]['component_outlet_node_name']})
except (KeyError, AttributeError):
raise PyExpandObjectsYamlStructureException(
'Error: In {} AutoCreated PlantLoop Connector YAML object was '
'improperly formatted'.format(plant_loop_class_object.template_type))
# add connector list
demand_connectorlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'ConnectorList', 'Demand']
)
supply_connectorlist = eo.get_structure(
structure_hierarchy=['AutoCreated', 'PlantLoop', 'ConnectorList', 'Supply']
)
# format yaml objects into epJSON dictionaries, resolve, and output
connector_dictionary = eo.yaml_list_to_epjson_dictionaries(
yaml_list=[
{'BranchList': demand_branchlist},
{'BranchList': supply_branchlist},
{'Connector:Splitter': connector_demand_splitter},
{'Connector:Splitter': connector_supply_splitter},
{'Connector:Mixer': connector_demand_mixer},
{'Connector:Mixer': connector_supply_mixer},
{'ConnectorList': demand_connectorlist},
{'ConnectorList': supply_connectorlist},
{'NodeList': supply_nodelist}
])
resolved_path_dictionary = eo.resolve_objects(epjson=connector_dictionary)
# save output to class epsjon
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=resolved_path_dictionary
)
return
def _create_plant_equipment_lists(
self,
plant_loop_class_object,
expanded_plant_equipment):
"""
Create PlantEquipmentList and CondenserEquipmentList for a given ExpandPlantLoop class object.
This operation is performed outside of ExpandObjects because it requires outputs from
ExpandPlantEquipment objects.
:param plant_loop_class_object: ExpandPlantLoop class object
:param expanded_plant_equipment: expanded dictionary of ExpandPlantEquipment objects
:return: Updated class epjson attribute with PlantEquipmentList or CondenserEquipmentlist.
"""
# Get plant equipment, zone, and system branches. Split them into demand and supply sides
_, supply_branches = self._split_supply_and_demand_side_branches(
plant_loop_class_object=plant_loop_class_object,
expanded_plant_equipment=expanded_plant_equipment,
expanded_systems=None,
expanded_zones=None
)
equipment = []
# Extract priority from each equipment object referenced by the branch and use it to order the equipment list
supply_branches_with_priority = []
for sb in supply_branches.values():
for equipment_name, equipment_class in expanded_plant_equipment.items():
if equipment_class.template_type == 'HVACTemplate:Plant:Boiler:ObjectReference':
equipment_name = equipment_class.boiler_name
elif equipment_class.template_type == 'HVACTemplate:Plant:Chiller:ObjectReference':
equipment_name = equipment_class.chiller_name
elif equipment_class.template_type == 'HVACTemplate:Plant:Tower:ObjectReference':
equipment_name = equipment_class.cooling_tower_name
if sb['components'][-1]['component_name'] == equipment_name:
# make tuple of (object, priority)
# if priority isn't set, use infinity to push it to the end when sorted
supply_branches_with_priority.append((sb, getattr(equipment_class, 'priority', float('inf'))))
supply_branches_ordered = [
branch for branch, priority
in sorted(supply_branches_with_priority, key=lambda s: s[1])]
for sb in supply_branches_ordered:
equipment.append({
'equipment_name': sb['components'][-1]['component_name'],
'equipment_object_type': sb['components'][-1]['component_object_type']
})
# use ExpandObjects functions
eo = ExpandObjects(logger_level=self.logger_level, logger_name=self.logger_name)
eo.unique_name = getattr(plant_loop_class_object, 'template_name')
if 'hotwater' in plant_loop_class_object.template_type.lower() or \
'chilledwater' in plant_loop_class_object.template_type.lower():
list_dictionary = \
eo.get_structure(structure_hierarchy=['AutoCreated', 'PlantLoop', 'PlantEquipmentList'])
list_dictionary['equipment'] = equipment
equipment_list_dictionary = [{'PlantEquipmentList': list_dictionary}, ]
elif 'mixedwater' in plant_loop_class_object.template_type.lower():
heating_equipment = [i for i in equipment if re.match(r'Boiler:.*', i['equipment_object_type'])]
heating_list_dictionary = \
eo.get_structure(structure_hierarchy=['AutoCreated', 'PlantLoop', 'PlantEquipmentListMixedWaterHeating'])
heating_list_dictionary['equipment'] = heating_equipment
cooling_equipment = [i for i in equipment if re.match(r'CoolingTower:.*', i['equipment_object_type'])]
cooling_list_dictionary = \
eo.get_structure(structure_hierarchy=['AutoCreated', 'PlantLoop', 'PlantEquipmentListMixedWaterCooling'])
cooling_list_dictionary['equipment'] = cooling_equipment
equipment_list_dictionary = [
{'PlantEquipmentList': cooling_list_dictionary},
{'PlantEquipmentList': heating_list_dictionary}]
elif 'condenserwater' in plant_loop_class_object.template_type.lower():
list_dictionary = \
eo.get_structure(structure_hierarchy=['AutoCreated', 'PlantLoop', 'CondenserEquipmentList'])
list_dictionary['equipment'] = equipment
equipment_list_dictionary = [{'CondenserEquipmentList': list_dictionary}, ]
else:
raise InvalidTemplateException(
'Error: an invalid loop type was specified when creating plant loop connections: {}'
.format(plant_loop_class_object.template_type))
equipment_list_formatted_dictionary = eo.yaml_list_to_epjson_dictionaries(
yaml_list=equipment_list_dictionary)
resolved_path_dictionary = eo.resolve_objects(epjson=equipment_list_formatted_dictionary)
# save output to class epsjon
self.merge_epjson(
super_dictionary=self.epjson,
object_dictionary=resolved_path_dictionary)
return
def run(self, input_epjson=None):
"""
Execute HVAC Template process workflow
:param input_epjson: input epJSON file
:return: epJSON containing expanded objects from templates
"""
if not input_epjson:
if self.input_epjson:
input_epjson = self.input_epjson
else:
raise InvalidEpJSONException("No epJSON file loaded or provided to HVACTemplate processor")
self.epjson_process(epjson_ref=input_epjson)
self.logger.info('##### PreProcessing Data #####')
self._hvac_template_preprocess(epjson=self.input_epjson)
self.logger.info('##### Processing Thermostats #####')
self.expanded_thermostats = self._expand_templates(
templates=self.templates_thermostats,
expand_class=ExpandThermostat)
self.logger.info('##### Processing Systems #####')
self.expanded_systems = self._expand_templates(
templates=self.templates_systems,
expand_class=ExpandSystem)
self.logger.info('##### Processing Zones #####')
self.expanded_zones = self._expand_templates(
templates=self.templates_zones,
expand_class=ExpandZone,
system_class_objects=self.expanded_systems)
self.logger.info('##### Building Zone-Thermostat Connections #####')
for _, zone_class_object in self.expanded_zones.items():
self._create_zonecontrol_thermostat(zone_class_object=zone_class_object)
self.logger.info('##### Building System-Zone Connections #####')
for _, system_class_object in self.expanded_systems.items():
# VRF systems do not connect via air paths, and need a separate function.
if system_class_object.template_type == 'HVACTemplate:System:VRF':
self._create_system_vrf_path_connection_objects(
system_class_object=system_class_object,
expanded_zones=self.expanded_zones)
else:
self._create_system_path_connection_objects(
system_class_object=system_class_object,
expanded_zones=self.expanded_zones)
self.logger.info('##### Processing Plant Loops #####')
self.expanded_plant_loops = self._expand_templates(
templates=self.templates_plant_loops,
expand_class=ExpandPlantLoop)
self.logger.info('##### Processing Plant Equipment #####')
self.expanded_plant_equipment = self._expand_templates(
templates=self.templates_plant_equipment,
expand_class=ExpandPlantEquipment,
plant_loop_class_objects=self.expanded_plant_loops)
# Pass through expanded plant equipment objects to create additional plant loops and equipment if necessary
self._create_additional_plant_loops_and_equipment_from_equipment(
expanded_plant_equipment=self.expanded_plant_equipment,
expanded_plant_loops=self.expanded_plant_loops)
self.logger.info('##### Building Plant-Plant Equipment Connections #####')
for expanded_pl in self.expanded_plant_loops.values():
self._create_water_loop_connectors_and_nodelist(
plant_loop_class_object=expanded_pl,
expanded_plant_equipment=self.expanded_plant_equipment,
expanded_systems=self.expanded_systems,
expanded_zones=self.expanded_zones)
self._create_plant_equipment_lists(
plant_loop_class_object=expanded_pl,
expanded_plant_equipment=self.expanded_plant_equipment)
self.logger.info('##### Creating epJSON #####')
# Merge each set of epJSON dictionaries
merge_list = [
self.epjson,
self.base_objects,
*[j.epjson for i, j in self.expanded_thermostats.items()],
*[j.epjson for i, j in self.expanded_zones.items()],
*[j.epjson for i, j in self.expanded_systems.items()],
*[j.epjson for i, j in self.expanded_plant_loops.items()],
*[j.epjson for i, j in self.expanded_plant_equipment.items()]]
output_epjson = {}
# The unique_name_override option is enabled here due to ObjectReference templates having the base equipment
# in them as well as being present in the base epjson. A better solution should be investigated so that this
# option can be turned back off.
for merge_dictionary in merge_list:
self.merge_epjson(
super_dictionary=output_epjson,
object_dictionary=merge_dictionary,
unique_name_override=True)
# Use this for file debugging
# import json
# with open('test.epJSON', 'w') as base_file:
# json.dump(output_epjson, base_file, indent=4, sort_keys=True)
# Create output format
output_epjson = {
"epJSON": output_epjson,
"epJSON_base": self.base_objects,
"epJSON_hvac_templates": self.templates,
'Output:PreprocessorMessage': self.stream.getvalue()
}
return output_epjson
|
py | 1a545d7e1b2b4e13228fa6bad5308a8b92a15673 | from __future__ import print_function
import pandas as pd
from sklearn.model_selection import train_test_split
from keras_text_summarization.library.utility.plot_utils import plot_and_save_history
from keras_text_summarization.library.seq2seq import Seq2SeqSummarizer
from keras_text_summarization.library.applications.fake_news_loader import fit_text
import numpy as np
import os
import tensorflow as tf
LOAD_EXISTING_WEIGHTS = False
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
def main():
np.random.seed(42)
data_dir_path = './data'
report_dir_path = './reports'
model_dir_path = './models'
print('loading csv file ...')
#df = pd.read_csv(data_dir_path + "/fake_or_real_news.csv")
print('extract configuration from input texts ...')
with open(data_dir_path + '/train_preprocessed.en') as f:
X = f.read().split('\n');
with open(data_dir_path + '/train_preprocessed.de') as f:
Y = f.read().split('\n');
config = fit_text(X, Y)
summarizer = Seq2SeqSummarizer(config)
if LOAD_EXISTING_WEIGHTS:
summarizer.load_weights(weight_file_path=Seq2SeqSummarizer.get_weight_file_path(model_dir_path=model_dir_path))
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.2, random_state=42)
print('demo size: ', len(Xtrain))
print('testing size: ', len(Xtest))
print('start fitting ...')
history = summarizer.fit(Xtrain, Ytrain, Xtest, Ytest, epochs=100)
history_plot_file_path = report_dir_path + '/' + Seq2SeqSummarizer.model_name + '-history.png'
if LOAD_EXISTING_WEIGHTS:
history_plot_file_path = report_dir_path + '/' + Seq2SeqSummarizer.model_name + '-history-v' + str(summarizer.version) + '.png'
plot_and_save_history(history, summarizer.model_name, history_plot_file_path, metrics={'loss', 'acc'})
if __name__ == '__main__':
main()
|
py | 1a545e0a97b333d4fb595b52e1891b6885c9960e | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User
from .models import Domain, Items, Relation
from django.contrib.admin.models import LogEntry
UserAdmin.fieldsets+= ('Custom fields set', {'fields': ('isu_number', 'is_rpd_developer', 'is_expertise_master')}),
admin.site.register(User, UserAdmin)
# admin.site.register(Membership)
class ItemAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'domain', 'value',)
list_filter = ('domain',)
empty_value_display = 'None'
class RelationAdmin(admin.ModelAdmin):
list_display = ('id', 'item1', 'relation', 'item2',)
list_filter = ('item1', 'relation')
empty_value_display = 'None'
admin.site.register(LogEntry)
admin.site.register(Domain)
admin.site.register(Items, ItemAdmin)
admin.site.register(Relation, RelationAdmin)
|
py | 1a545fcac0c880a03669f1f1d522ea32db72dcd3 | # -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
:codeauthor: :email:`Bo Maryniuk <[email protected]>`
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
# Import Salt Testing Libs
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import no_symlinks
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import salt libs
from salt.modules.inspectlib.collector import Inspector
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(no_symlinks(), "Git missing 'core.symlinks=true' config")
class InspectorCollectorTestCase(TestCase):
'''
Test inspectlib:collector:Inspector
'''
def setUp(self):
patcher = patch("os.mkdir", MagicMock())
patcher.start()
self.addCleanup(patcher.stop)
def test_env_loader(self):
'''
Get packages on the different distros.
:return:
'''
cachedir = os.sep + os.sep.join(['foo', 'cache'])
piddir = os.sep + os.sep.join(['foo', 'pid'])
inspector = Inspector(cachedir=cachedir, piddir=piddir, pidfilename='bar.pid')
self.assertEqual(
inspector.dbfile,
os.sep + os.sep.join(['foo', 'cache', '_minion_collector.db']))
self.assertEqual(
inspector.pidfile,
os.sep + os.sep.join(['foo', 'pid', 'bar.pid']))
def test_file_tree(self):
'''
Test file tree.
:return:
'''
inspector = Inspector(cachedir=os.sep + 'test',
piddir=os.sep + 'test',
pidfilename='bar.pid')
tree_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'inspectlib', 'tree_test')
expected_tree = ([os.sep + os.sep.join(['a', 'a', 'dummy.a']),
os.sep + os.sep.join(['a', 'b', 'dummy.b']),
os.sep + os.sep.join(['b', 'b.1']),
os.sep + os.sep.join(['b', 'b.2']),
os.sep + os.sep.join(['b', 'b.3'])],
[os.sep + 'a',
os.sep + os.sep.join(['a', 'a']),
os.sep + os.sep.join(['a', 'b']),
os.sep + os.sep.join(['a', 'c']),
os.sep + 'b',
os.sep + 'c'],
[os.sep + os.sep.join(['a', 'a', 'dummy.ln.a']),
os.sep + os.sep.join(['a', 'b', 'dummy.ln.b']),
os.sep + os.sep.join(['a', 'c', 'b.1']),
os.sep + os.sep.join(['b', 'b.4']),
os.sep + os.sep.join(['b', 'b.5']),
os.sep + os.sep.join(['c', 'b.1']),
os.sep + os.sep.join(['c', 'b.2']),
os.sep + os.sep.join(['c', 'b.3'])])
tree_result = []
for chunk in inspector._get_all_files(tree_root):
buff = []
for pth in chunk:
buff.append(pth.replace(tree_root, ''))
tree_result.append(buff)
tree_result = tuple(tree_result)
self.assertEqual(expected_tree, tree_result)
def test_get_unmanaged_files(self):
'''
Test get_unmanaged_files.
:return:
'''
inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid')
managed = (
['a', 'b', 'c'],
['d', 'e', 'f'],
['g', 'h', 'i'],
)
system_all = (
['a', 'b', 'c'],
['d', 'E', 'f'],
['G', 'H', 'i'],
)
self.assertEqual(inspector._get_unmanaged_files(managed=managed, system_all=system_all),
([], ['E'], ['G', 'H']))
def test_pkg_get(self):
'''
Test if grains switching the pkg get method.
:return:
'''
debian_list = """
g++
g++-4.9
g++-5
gawk
gcc
gcc-4.9
gcc-4.9-base:amd64
gcc-4.9-base:i386
gcc-5
gcc-5-base:amd64
gcc-5-base:i386
gcc-6-base:amd64
gcc-6-base:i386
"""
inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid')
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data.get = MagicMock(return_value='Debian')
with patch.object(inspector, '_Inspector__get_cfg_pkgs_dpkg', MagicMock(return_value='dpkg')):
with patch.object(inspector, '_Inspector__get_cfg_pkgs_rpm', MagicMock(return_value='rpm')):
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data().get = MagicMock(return_value='Debian')
self.assertEqual(inspector._get_cfg_pkgs(), 'dpkg')
inspector.grains_core.os_data().get = MagicMock(return_value='Suse')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
inspector.grains_core.os_data().get = MagicMock(return_value='redhat')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
|
py | 1a545fd9eaf6fb02783b8d119fa18140b606444d | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
fMRIprep base processing workflows
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_fmriprep_wf
.. autofunction:: init_single_subject_wf
"""
import sys
import os
from copy import deepcopy
from nipype import __version__ as nipype_ver
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from niworkflows.interfaces.nilearn import NILEARN_VERSION
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.bids import (
BIDSInfo, BIDSDataGrabber, BIDSFreeSurferDir
)
from niworkflows.utils.bids import collect_data
from niworkflows.utils.misc import fix_multi_T1w_source_name
from smriprep.workflows.anatomical import init_anat_preproc_wf
from ..interfaces import SubjectSummary, AboutSummary, DerivativesDataSink
from ..__about__ import __version__
from .bold import init_func_preproc_wf
def init_fmriprep_wf(
anat_only,
aroma_melodic_dim,
bold2t1w_dof,
cifti_output,
debug,
dummy_scans,
echo_idx,
err_on_aroma_warn,
fmap_bspline,
fmap_demean,
force_syn,
freesurfer,
fs_subjects_dir,
hires,
ignore,
layout,
longitudinal,
low_mem,
medial_surface_nan,
omp_nthreads,
output_dir,
regressors_all_comps,
regressors_dvars_th,
regressors_fd_th,
run_uuid,
skull_strip_fixed_seed,
skull_strip_template,
spaces,
subject_list,
t2s_coreg,
task_id,
use_aroma,
use_bbr,
use_syn,
work_dir,
):
"""
Build *fMRIPrep*'s pipeline.
This workflow organizes the execution of FMRIPREP, with a sub-workflow for
each subject.
If FreeSurfer's ``recon-all`` is to be run, a corresponding folder is created
and populated with any needed template subjects under the derivatives folder.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
import os
from collections import namedtuple, OrderedDict
BIDSLayout = namedtuple('BIDSLayout', ['root'])
from fmriprep.workflows.base import init_fmriprep_wf
from niworkflows.utils.spaces import Reference, SpatialReferences
os.environ['FREESURFER_HOME'] = os.getcwd()
wf = init_fmriprep_wf(
anat_only=False,
aroma_melodic_dim=-200,
bold2t1w_dof=9,
cifti_output=False,
debug=False,
dummy_scans=None,
echo_idx=None,
err_on_aroma_warn=False,
fmap_bspline=False,
fmap_demean=True,
force_syn=True,
freesurfer=True,
fs_subjects_dir=None,
hires=True,
ignore=[],
layout=BIDSLayout('.'),
longitudinal=False,
low_mem=False,
medial_surface_nan=False,
omp_nthreads=1,
output_dir='.',
regressors_all_comps=False,
regressors_dvars_th=1.5,
regressors_fd_th=0.5,
run_uuid='X',
skull_strip_fixed_seed=False,
skull_strip_template=Reference('OASIS30ANTs'),
spaces=SpatialReferences(
spaces=['MNI152Lin',
('fsaverage', {'density': '10k'}),
'T1w',
'fsnative'],
checkpoint=True),
subject_list=['fmripreptest'],
t2s_coreg=False,
task_id='',
use_aroma=False,
use_bbr=True,
use_syn=True,
work_dir='.',
)
Parameters
----------
anat_only : bool
Disable functional workflows
bold2t1w_dof : 6, 9 or 12
Degrees-of-freedom for BOLD-T1w registration
cifti_output : bool
Generate bold CIFTI file in output spaces
debug : bool
Enable debugging outputs
dummy_scans : int or None
Number of volumes to consider as non steady state
echo_idx : int or None
Index of echo to preprocess in multiecho BOLD series,
or ``None`` to preprocess all
err_on_aroma_warn : bool
Do not fail on ICA-AROMA errors
fmap_bspline : bool
**Experimental**: Fit B-Spline field using least-squares
fmap_demean : bool
Demean voxel-shift map during unwarp
force_syn : bool
**Temporary**: Always run SyN-based SDC
freesurfer : bool
Enable FreeSurfer surface reconstruction (may increase runtime)
hires : bool
Enable sub-millimeter preprocessing in FreeSurfer
ignore : list
Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
layout : BIDSLayout object
BIDS dataset layout
longitudinal : bool
Treat multiple sessions as longitudinal (may increase runtime)
See sub-workflows for specific differences
low_mem : bool
Write uncompressed .nii files in some cases to reduce memory usage
medial_surface_nan : bool
Replace medial wall values with NaNs on functional GIFTI files
omp_nthreads : int
Maximum number of threads an individual process may use
output_dir : str
Directory in which to save derivatives
regressors_all_comps
Return all CompCor component time series instead of the top fraction
regressors_dvars_th
Criterion for flagging DVARS outliers
regressors_fd_th
Criterion for flagging framewise displacement outliers
run_uuid : str
Unique identifier for execution instance
skull_strip_template : tuple
Name of target template for brain extraction with ANTs' ``antsBrainExtraction``,
and corresponding dictionary of output-space modifiers.
skull_strip_fixed_seed : bool
Do not use a random seed for skull-stripping - will ensure
run-to-run replicability when used with --omp-nthreads 1
spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
A container for storing, organizing, and parsing spatial normalizations. Composed of
:py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
(e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
(e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
would lead to resampling on a 2mm resolution of the space).
subject_list : list
List of subject labels
t2s_coreg : bool
For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
task_id : str or None
Task ID of BOLD series to preprocess, or ``None`` to preprocess all
use_aroma : bool
Perform ICA-AROMA on MNI-resampled functional series
use_bbr : bool or None
Enable/disable boundary-based registration refinement.
If ``None``, test BBR result for distortion before accepting.
use_syn : bool
**Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
If fieldmaps are present and enabled, this is not run, by default.
work_dir : str
Directory in which to store workflow execution state and temporary files
"""
fmriprep_wf = Workflow(name='fmriprep_wf')
fmriprep_wf.base_dir = work_dir
if freesurfer:
fsdir = pe.Node(
BIDSFreeSurferDir(
derivatives=output_dir,
freesurfer_home=os.getenv('FREESURFER_HOME'),
spaces=spaces.get_fs_spaces()),
name='fsdir_run_' + run_uuid.replace('-', '_'), run_without_submitting=True)
if fs_subjects_dir is not None:
fsdir.inputs.subjects_dir = str(fs_subjects_dir.absolute())
reportlets_dir = os.path.join(work_dir, 'reportlets')
for subject_id in subject_list:
single_subject_wf = init_single_subject_wf(
anat_only=anat_only,
aroma_melodic_dim=aroma_melodic_dim,
bold2t1w_dof=bold2t1w_dof,
cifti_output=cifti_output,
debug=debug,
dummy_scans=dummy_scans,
echo_idx=echo_idx,
err_on_aroma_warn=err_on_aroma_warn,
fmap_bspline=fmap_bspline,
fmap_demean=fmap_demean,
force_syn=force_syn,
freesurfer=freesurfer,
hires=hires,
ignore=ignore,
layout=layout,
longitudinal=longitudinal,
low_mem=low_mem,
medial_surface_nan=medial_surface_nan,
name="single_subject_" + subject_id + "_wf",
omp_nthreads=omp_nthreads,
output_dir=output_dir,
regressors_all_comps=regressors_all_comps,
regressors_dvars_th=regressors_dvars_th,
regressors_fd_th=regressors_fd_th,
reportlets_dir=reportlets_dir,
skull_strip_fixed_seed=skull_strip_fixed_seed,
skull_strip_template=skull_strip_template,
spaces=spaces,
subject_id=subject_id,
t2s_coreg=t2s_coreg,
task_id=task_id,
use_aroma=use_aroma,
use_bbr=use_bbr,
use_syn=use_syn,
)
single_subject_wf.config['execution']['crashdump_dir'] = (
os.path.join(output_dir, "fmriprep", "sub-" + subject_id, 'log', run_uuid)
)
for node in single_subject_wf._get_all_nodes():
node.config = deepcopy(single_subject_wf.config)
if freesurfer:
fmriprep_wf.connect(fsdir, 'subjects_dir',
single_subject_wf, 'inputnode.subjects_dir')
else:
fmriprep_wf.add_nodes([single_subject_wf])
return fmriprep_wf
def init_single_subject_wf(
anat_only,
aroma_melodic_dim,
bold2t1w_dof,
cifti_output,
debug,
dummy_scans,
echo_idx,
err_on_aroma_warn,
fmap_bspline,
fmap_demean,
force_syn,
freesurfer,
hires,
ignore,
layout,
longitudinal,
low_mem,
medial_surface_nan,
name,
omp_nthreads,
output_dir,
reportlets_dir,
regressors_all_comps,
regressors_dvars_th,
regressors_fd_th,
skull_strip_fixed_seed,
skull_strip_template,
spaces,
subject_id,
t2s_coreg,
task_id,
use_aroma,
use_bbr,
use_syn,
):
"""
This workflow organizes the preprocessing pipeline for a single subject.
It collects and reports information about the subject, and prepares
sub-workflows to perform anatomical and functional preprocessing.
Anatomical preprocessing is performed in a single workflow, regardless of
the number of sessions.
Functional preprocessing is performed using a separate workflow for each
individual BOLD series.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from collections import namedtuple
from niworkflows.utils.spaces import Reference, SpatialReferences
from fmriprep.workflows.base import init_single_subject_wf
BIDSLayout = namedtuple('BIDSLayout', ['root'])
wf = init_single_subject_wf(
anat_only=False,
aroma_melodic_dim=-200,
bold2t1w_dof=9,
cifti_output=False,
debug=False,
dummy_scans=None,
echo_idx=None,
err_on_aroma_warn=False,
fmap_bspline=False,
fmap_demean=True,
force_syn=True,
freesurfer=True,
hires=True,
ignore=[],
layout=BIDSLayout('.'),
longitudinal=False,
low_mem=False,
medial_surface_nan=False,
name='single_subject_wf',
omp_nthreads=1,
output_dir='.',
reportlets_dir='.',
regressors_all_comps=False,
regressors_dvars_th=1.5,
regressors_fd_th=0.5,
skull_strip_fixed_seed=False,
skull_strip_template=Reference('OASIS30ANTs'),
spaces=SpatialReferences(
spaces=['MNI152Lin',
('fsaverage', {'density': '10k'}),
'T1w',
'fsnative'],
checkpoint=True),
subject_id='test',
t2s_coreg=False,
task_id='',
use_aroma=False,
use_bbr=True,
use_syn=True,
)
Parameters
----------
anat_only : bool
Disable functional workflows
aroma_melodic_dim : int
Maximum number of components identified by MELODIC within ICA-AROMA
(default is -200, i.e., no limitation).
bold2t1w_dof : 6, 9 or 12
Degrees-of-freedom for BOLD-T1w registration
cifti_output : bool
Generate bold CIFTI file in output spaces
debug : bool
Enable debugging outputs
dummy_scans : int or None
Number of volumes to consider as non steady state
echo_idx : int or None
Index of echo to preprocess in multiecho BOLD series,
or ``None`` to preprocess all
err_on_aroma_warn : bool
Do not fail on ICA-AROMA errors
fmap_bspline : bool
**Experimental**: Fit B-Spline field using least-squares
fmap_demean : bool
Demean voxel-shift map during unwarp
force_syn : bool
**Temporary**: Always run SyN-based SDC
freesurfer : bool
Enable FreeSurfer surface reconstruction (may increase runtime)
hires : bool
Enable sub-millimeter preprocessing in FreeSurfer
ignore : list
Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
layout : BIDSLayout object
BIDS dataset layout
longitudinal : bool
Treat multiple sessions as longitudinal (may increase runtime)
See sub-workflows for specific differences
low_mem : bool
Write uncompressed .nii files in some cases to reduce memory usage
medial_surface_nan : bool
Replace medial wall values with NaNs on functional GIFTI files
name : str
Name of workflow
omp_nthreads : int
Maximum number of threads an individual process may use
output_dir : str
Directory in which to save derivatives
reportlets_dir : str
Directory in which to save reportlets
regressors_all_comps
Return all CompCor component time series instead of the top fraction
regressors_fd_th
Criterion for flagging framewise displacement outliers
regressors_dvars_th
Criterion for flagging DVARS outliers
skull_strip_fixed_seed : bool
Do not use a random seed for skull-stripping - will ensure
run-to-run replicability when used with --omp-nthreads 1
skull_strip_template : tuple
Name of target template for brain extraction with ANTs' ``antsBrainExtraction``,
and corresponding dictionary of output-space modifiers.
subject_id : str
List of subject labels
t2s_coreg : bool
For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
A container for storing, organizing, and parsing spatial normalizations. Composed of
:py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
(e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
(e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
would lead to resampling on a 2mm resolution of the space).
task_id : str or None
Task ID of BOLD series to preprocess, or ``None`` to preprocess all
use_aroma : bool
Perform ICA-AROMA on MNI-resampled functional series
use_bbr : bool or None
Enable/disable boundary-based registration refinement.
If ``None``, test BBR result for distortion before accepting.
use_syn : bool
**Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
If fieldmaps are present and enabled, this is not run, by default.
Inputs
------
subjects_dir : str
FreeSurfer's ``$SUBJECTS_DIR``.
"""
if name in ('single_subject_wf', 'single_subject_fmripreptest_wf'):
# for documentation purposes
subject_data = {
't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
'bold': ['/completely/made/up/path/sub-01_task-nback_bold.nii.gz']
}
else:
subject_data = collect_data(layout, subject_id, task_id, echo_idx)[0]
# Make sure we always go through these two checks
if not anat_only and subject_data['bold'] == []:
raise Exception("No BOLD images found for participant {} and task {}. "
"All workflows require BOLD images.".format(
subject_id, task_id if task_id else '<all>'))
if not subject_data['t1w']:
raise Exception("No T1w images found for participant {}. "
"All workflows require T1w images.".format(subject_id))
workflow = Workflow(name=name)
workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPrep* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).
""".format(fmriprep_ver=__version__, nipype_ver=nipype_ver)
workflow.__postdesc__ = """
Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://fmriprep.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").
### Copyright Waiver
The above boilerplate text was automatically generated by fMRIPrep
with the express intention that users should copy and paste this
text into their manuscripts *unchanged*.
It is released under the [CC0]\
(https://creativecommons.org/publicdomain/zero/1.0/) license.
### References
""".format(nilearn_ver=NILEARN_VERSION)
inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
name='inputnode')
bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only),
name='bidssrc')
bids_info = pe.Node(BIDSInfo(
bids_dir=layout.root, bids_validate=False), name='bids_info')
summary = pe.Node(SubjectSummary(std_spaces=spaces.get_spaces(nonstandard=False),
nstd_spaces=spaces.get_spaces(standard=False)),
name='summary', run_without_submitting=True)
about = pe.Node(AboutSummary(version=__version__,
command=' '.join(sys.argv)),
name='about', run_without_submitting=True)
ds_report_summary = pe.Node(
DerivativesDataSink(base_directory=reportlets_dir,
desc='summary', keep_dtype=True),
name='ds_report_summary', run_without_submitting=True)
ds_report_about = pe.Node(
DerivativesDataSink(base_directory=reportlets_dir,
desc='about', keep_dtype=True),
name='ds_report_about', run_without_submitting=True)
# Preprocessing of T1w (includes registration to MNI)
anat_preproc_wf = init_anat_preproc_wf(
bids_root=layout.root,
debug=debug,
freesurfer=freesurfer,
hires=hires,
longitudinal=longitudinal,
name="anat_preproc_wf",
num_t1w=len(subject_data['t1w']),
omp_nthreads=omp_nthreads,
output_dir=output_dir,
reportlets_dir=reportlets_dir,
spaces=spaces,
skull_strip_fixed_seed=skull_strip_fixed_seed,
skull_strip_template=skull_strip_template,
)
workflow.connect([
(inputnode, anat_preproc_wf, [('subjects_dir', 'inputnode.subjects_dir')]),
(bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')]),
(inputnode, summary, [('subjects_dir', 'subjects_dir')]),
(bidssrc, summary, [('t1w', 't1w'),
('t2w', 't2w'),
('bold', 'bold')]),
(bids_info, summary, [('subject', 'subject_id')]),
(bids_info, anat_preproc_wf, [(('subject', _prefix), 'inputnode.subject_id')]),
(bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
('t2w', 'inputnode.t2w'),
('roi', 'inputnode.roi'),
('flair', 'inputnode.flair')]),
(bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(summary, ds_report_summary, [('out_report', 'in_file')]),
(bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(about, ds_report_about, [('out_report', 'in_file')]),
])
# Overwrite ``out_path_base`` of smriprep's DataSinks
for node in workflow.list_node_names():
if node.split('.')[-1].startswith('ds_'):
workflow.get_node(node).interface.out_path_base = 'fmriprep'
if anat_only:
return workflow
for bold_file in subject_data['bold']:
func_preproc_wf = init_func_preproc_wf(
aroma_melodic_dim=aroma_melodic_dim,
bold2t1w_dof=bold2t1w_dof,
bold_file=bold_file,
cifti_output=cifti_output,
debug=debug,
dummy_scans=dummy_scans,
err_on_aroma_warn=err_on_aroma_warn,
fmap_bspline=fmap_bspline,
fmap_demean=fmap_demean,
force_syn=force_syn,
freesurfer=freesurfer,
ignore=ignore,
layout=layout,
low_mem=low_mem,
medial_surface_nan=medial_surface_nan,
num_bold=len(subject_data['bold']),
omp_nthreads=omp_nthreads,
output_dir=output_dir,
reportlets_dir=reportlets_dir,
regressors_all_comps=regressors_all_comps,
regressors_fd_th=regressors_fd_th,
regressors_dvars_th=regressors_dvars_th,
spaces=spaces,
t2s_coreg=t2s_coreg,
use_aroma=use_aroma,
use_bbr=use_bbr,
use_syn=use_syn,
)
workflow.connect([
(anat_preproc_wf, func_preproc_wf,
[(('outputnode.t1w_preproc', _pop), 'inputnode.t1w_preproc'),
('outputnode.t1w_brain', 'inputnode.t1w_brain'),
('outputnode.t1w_mask', 'inputnode.t1w_mask'),
('outputnode.t1w_dseg', 'inputnode.t1w_dseg'),
('outputnode.t1w_aseg', 'inputnode.t1w_aseg'),
('outputnode.t1w_aparc', 'inputnode.t1w_aparc'),
('outputnode.t1w_tpms', 'inputnode.t1w_tpms'),
('outputnode.template', 'inputnode.template'),
('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'),
('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'),
('outputnode.joint_template', 'inputnode.joint_template'),
('outputnode.joint_anat2std_xfm', 'inputnode.joint_anat2std_xfm'),
('outputnode.joint_std2anat_xfm', 'inputnode.joint_std2anat_xfm'),
# Undefined if --fs-no-reconall, but this is safe
('outputnode.subjects_dir', 'inputnode.subjects_dir'),
('outputnode.subject_id', 'inputnode.subject_id'),
('outputnode.t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm'),
('outputnode.fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm')]),
])
return workflow
def _prefix(subid):
if subid.startswith('sub-'):
return subid
return '-'.join(('sub', subid))
def _pop(inlist):
if isinstance(inlist, (list, tuple)):
return inlist[0]
return inlist
|
py | 1a545fe246564f2a244c4a7519b3e111c48178a3 | from django.urls import path
from apps.superbonus import views
urlpatterns = [
path('', views.app, name='bonus-app-view'),
path('add-condo', views.add_condo, name='bonus-add-condo'),
path('add-villa', views.add_villa, name='bonus-add-villa'),
path('interventions/<int:id>', views.interventions, name='bonus-add-intervention'),
path('preview/<int:id>', views.preview, name='bonus-preview'),
path('catastal/<int:id>', views.catastal, name='bonus-catastal'),
path('beneficiary/<int:id>', views.beneficiary, name='bonus-beneficiary'),
path('interventions-costs/<int:id>/<str:type>', views.intervention_costs, name='bonus-costs'),
path('add-interventions-costs/<int:id>/<str:type>', views.add_intervention_costs, name='add-bonus-costs'),
path('edit-interventions-costs/<int:id>/<str:type>', views.edit_intervention_costs, name='edit-bonus-costs'),
path('add-professional/<int:id>', views.professionals, name='bonus-professional'),
path('add-professional/<int:id>/<str:type>/<str:prof>/', views.add_professionals, name='bonus-add-professional'),
path('delete/<str:type>/<int:id>', views.delete_prop, name='bonus-delete'),
]
|
py | 1a546040ad45a5561000c8e58e9f86ec1f83f814 | '''
Template tags for Stripe Non PCI Complaince
'''
from django import template
from django.template.loader import render_to_string
register = template.Library()
class StripeNode(template.Node):
def __init__(self, integration):
self.integration = template.Variable(integration)
def render(self, context):
int_obj = self.integration.resolve(context)
form_str = render_to_string("billing/stripe.html",
{"form": int_obj.generate_form(),
"integration": int_obj}, context)
return form_str
@register.tag
def stripe_payment(parser, token):
try:
tag, int_obj = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r was expecting a single argument" %token.split_contents()[0])
return StripeNode(int_obj)
|
py | 1a546074ce0fbba7cda5aa520eef2f279acf7f24 | import torch
import torchaudio
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from utils.config import config
from typing import Optional
from transforms.audio import RandomSoxAugmentations, NoSoxAugmentations
from transforms.mfsc import ToMelSpec, SpecAug
from dataset.test_dataset import SimClrTestDataset
from pytorch_lightning.utilities import move_data_to_device
class UnsupervisedCommonVoiceDataModule(pl.LightningDataModule):
def __init__(self):
super().__init__()
self.sox_augmentations = RandomSoxAugmentations(
config.dataset.sample_rate)
self.no_augmentation = NoSoxAugmentations(config.dataset.sample_rate)
self.mel_then_specaug = torch.jit.script(
torch.nn.Sequential(ToMelSpec(), SpecAug()))
self.only_mel = torch.jit.script(torch.nn.Sequential(ToMelSpec()))
def setup(self, stage: Optional[str] = None):
if stage == 'test':
self.test_dataset = SimClrTestDataset(
root=config.dataset.test_root, tsv=config.dataset.test)
self.transform = self.only_mel
self.augmentation = self.no_augmentation
else:
self.unsupervised_dataset = torchaudio.datasets.COMMONVOICE(
root=config.dataset.root, tsv=config.dataset.unsupervised_train)
self.transform = self.mel_then_specaug
self.augmentation = self.sox_augmentations
def num_train_samples(self):
return len(self.unsupervised_dataset)
def num_test_samples(self):
return len(self.test_dataset)
def train_dataloader(self):
return DataLoader(
self.unsupervised_dataset,
batch_size=config.dataloader.batch_size,
num_workers=config.dataloader.num_workers,
pin_memory=True,
drop_last=True,
shuffle=True,
collate_fn=self._collate_fn
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=len(self.test_dataset),
num_workers=0,
pin_memory=True,
drop_last=False,
collate_fn=self._collate_fn
)
def transfer_batch_to_device(self, batch, device):
device = device or self.device
self.transform = self.transform.to(device)
return move_data_to_device(batch, device)
def on_after_batch_transfer(self, batch, dataloader_idx):
input_a, input_b, input_a_lengths, input_b_lengths = batch
input_a = self.transform(input_a)
input_b = self.transform(input_b)
input_a_lengths = (input_a_lengths / (config.audio.model_sample_rate /
1000 * config.audio.stride_in_ms)).ceil_()
input_b_lengths = (input_b_lengths / (config.audio.model_sample_rate /
1000 * config.audio.stride_in_ms)).ceil_()
return (input_a, input_b, input_a_lengths, input_b_lengths)
# input: batch -> (waveform, sample_rate, dictionary)
# returns: (aug1, aug2, aug1_len, aug2_len) where aug1 == (batch, time)
def _collate_fn(self, batch):
raw_inputs = [b[0] for b in batch if b]
input_a = [self.augmentation(raw_input).transpose(1, 0)
for raw_input in raw_inputs]
input_b = [self.augmentation(raw_input).transpose(1, 0)
for raw_input in raw_inputs]
input_a_lengths = torch.tensor(
[t.size(0) for t in input_a],
dtype=torch.int32,
device=input_a[0].device,
)
input_b_lengths = torch.tensor(
[t.size(0) for t in input_b],
dtype=torch.int32,
device=input_b[0].device,
)
input_a = torch.nn.utils.rnn.pad_sequence(
input_a, batch_first=True).transpose(1, -1).squeeze(1)
input_b = torch.nn.utils.rnn.pad_sequence(
input_b, batch_first=True).transpose(1, -1).squeeze(1)
return (input_a, input_b, input_a_lengths, input_b_lengths)
if __name__ == "__main__":
loader = UnsupervisedCommonVoiceDataModule()
loader.setup()
for i, batch in enumerate(loader.train_dataloader()):
print(batch[0].shape, batch[1].shape, batch[2], batch[3])
if i > 0 and i % 20 == 0:
break
|
py | 1a5460ee02f908abf0f256809b998e9ff9e4d207 | # -*- coding: utf-8 -*-
import random
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from django.db import transaction
from allauth.account.models import EmailAddress
from datetime import date
from faker import Faker
from wye.profiles.models import UserType, Profile
from wye.organisations.models import Organisation
from wye.regions.models import Location, State
from wye.workshops.models import WorkshopSections, Workshop
from wye.base.constants import WorkshopStatus, WorkshopLevel
NUMBER_OF_USERS = getattr(settings, "NUMBER_OF_USERS", 10)
NUMBER_OF_LOCATIONS = getattr(settings, "NUMBER_OF_LOCATIONS", 10)
NUMBER_OF_ORGANISATIONS = getattr(settings, "NUMBER_OF_ORGANISATIONS", 10)
NUMBER_OF_WORKSHOP_SECTIONS = getattr(
settings, "NUMBER_OF_WORKSHOP_SECTIONS", 5)
class Command(BaseCommand):
help = "Creating Initial demo data for testing application"
fake = Faker()
@transaction.atomic
def handle(self, *args, **options):
self.fake.seed(4321)
self.stdout.write(' Updating domain to localhost:8000') # Update site url
site = Site.objects.get_current()
site.domain, site.name = 'localhost:8000', 'Local'
site.save()
self.stdout.write(' Creating Superuser')
email = '[email protected]'
user = self.create_user(is_superuser=True, username='admin',
email=email, is_active=True, is_staff=True,
first_name='Admin')
# User
self.stdout.write(' Creating sample users')
for i in range(NUMBER_OF_USERS):
self.create_user()
# Location
self.stdout.write(' Creating sample locations')
self.create_locations(counter=NUMBER_OF_LOCATIONS)
# Organization
self.stdout.write(' Creating sample organisations')
self.create_organisations(counter=NUMBER_OF_ORGANISATIONS)
# Workshop
self.stdout.write(' Creating sample workshop sections')
self.create_workshop_sections()
# User Type
self.stdout.write(' Creating User Types')
self.create_user_type(counter=NUMBER_OF_WORKSHOP_SECTIONS)
# Profile
self.stdout.write(' Creating Profile')
self.create_profile(user)
# Sample Workshops
self.stdout.write(' Creating Sample Workshop')
self.create_sample_workshops(user)
user_email = EmailAddress.objects.create(
email=user.email, user=user, verified=True)
user_email.save()
def create_user(self, counter=None, **kwargs):
params = {
"first_name": kwargs.get('first_name', self.fake.first_name()),
"last_name": kwargs.get('last_name', self.fake.last_name()),
"username": kwargs.get('username', self.fake.user_name()),
"email": kwargs.get('email', self.fake.email()),
"is_active": kwargs.get('is_active', self.fake.boolean()),
"is_superuser": kwargs.get('is_superuser', False),
"is_staff": kwargs.get('is_staff', kwargs.get('is_superuser', self.fake.boolean())),
}
user, created = get_user_model().objects.get_or_create(**params)
if params['is_superuser']:
password = '123123'
user.set_password(password)
user.save()
self.stdout.write("SuperUser created with username: {username} and password: {password}".format(
username=params['username'], password=password)
)
return user
def create_locations(self, counter=None):
for i in range(counter):
state, updated = State.objects.update_or_create(
name=self.fake.state())
Location.objects.update_or_create(
name=self.fake.city(), state=state)
def create_user_type(self, counter=None):
user_type_tuple = [
('tutor', 'Tutor'),
('lead', 'Regional Lead'),
('poc', 'College POC'),
('admin', 'admin')]
for i in user_type_tuple:
obj, updated = UserType.objects.update_or_create(
slug=i[0])
obj.display_name = i[1]
obj.save()
def create_organisations(self, counter=None):
users = get_user_model().objects.all()
locations = Location.objects.all()
for i in range(counter):
number = self.fake.random_digit()
text = self.fake.text()
name = self.fake.company()
org, updated = Organisation.objects.update_or_create(
name=name,
location=locations[number],
organisation_type=number,
organisation_role=text,
description=text,
)
org.user.add(users[number])
def create_workshop_sections(self):
sections = ["Python2", "Python3", "Django", "Flask", "Gaming"]
for section in sections:
self.stdout.write(' Creating %s' % section)
WorkshopSections.objects.create(name=section)
def create_profile(self, user):
django = WorkshopSections.objects.get(name='Django')
python3 = WorkshopSections.objects.get(name='Python3')
location = Location.objects.all()[0]
user_type = UserType.objects.get(slug='admin')
profile = Profile(
user=user,
mobile='8758885872',
location=location)
profile.usertype.add(user_type)
profile.interested_locations.add(location)
profile.interested_sections.add(django, python3)
profile.save()
return profile
def create_sample_workshops(self, user):
organisations = Organisation.objects.all()
locations = Location.objects.all()
sections = WorkshopSections.objects.all()
for i in range(50):
w = Workshop.objects.create(
no_of_participants=random.randrange(10, 100),
expected_date=date(2015, random.randrange(1, 12), random.randrange(1, 29)),
description=self.fake.text(),
requester=random.choice(organisations),
location=random.choice(locations),
workshop_level=WorkshopLevel.BEGINNER,
workshop_section=random.choice(sections),
status=WorkshopStatus.COMPLETED
)
w.presenter.add(user)
w.save()
|
py | 1a5465194a42eaa20d34b66f0b4b215c3f037afe | # Tic Tac Toe
import random
def drawBoard(board):
# This function prints out the board that it was passed.
print(' | |')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(' | |')
def inputPlayerLetter():
# Lets the player type which letter they want to be.
# Returns a list with the player's letter as the first item, and the computer's letter as the second.
letter = ''
while not (letter == 'X' or letter == 'O'):
print('Do you want to be X or O?')
letter = input().upper()
# the first element in the tuple is the player's letter, the second is the computer's letter.
if letter == 'X':
return ['X', 'O']
else:
return ['O', 'X']
def whoGoesFirst():
# Randomly choose the player who goes first.
if random.randint(0, 1) == 0:
return 'computer'
else:
return 'player'
def playAgain():
# This function returns True if the player wants to play again, otherwise it returns False.
print('Do you want to play again? (yes or no)')
return input().lower().startswith('y')
def makeMove(board, letter, move):
board[move] = letter
def isWinner(bo, le):
# Given a board and a player's letter, this function returns True if that player has won.
# We use bo instead of board and le instead of letter so we don't have to type as much.
return ((bo[7] == le and bo[8] == le and bo[9] == le) or # across the top
(bo[4] == le and bo[5] == le and bo[6] == le) or # across the middle
(bo[1] == le and bo[2] == le and bo[3] == le) or # across the bottom
(bo[7] == le and bo[4] == le and bo[1] == le) or # down the left side
(bo[8] == le and bo[5] == le and bo[2] == le) or # down the middle
(bo[9] == le and bo[6] == le and bo[3] == le) or # down the right side
(bo[7] == le and bo[5] == le and bo[3] == le) or # diagonal
(bo[9] == le and bo[5] == le and bo[1] == le)) # diagonal
def getBoardCopy(board):
# Make a duplicate of the board list and return it the duplicate.
dupeBoard = []
for i in board:
dupeBoard.append(i)
return dupeBoard
def isSpaceFree(board, move):
# Return true if the passed move is free on the passed board.
return board[move] == ' '
def getPlayerMove(board):
# Let the player type in his move.
move = ' '
while move not in '1 2 3 4 5 6 7 8 9'.split() or not isSpaceFree(board, int(move)):
print('What is your next move? (1-9)')
move = input()
return int(move)
def chooseRandomMoveFromList(board, movesList):
# Returns a valid move from the passed list on the passed board.
# Returns None if there is no valid move.
possibleMoves = []
for i in movesList:
if isSpaceFree(board, i):
possibleMoves.append(i)
if len(possibleMoves) != 0:
return random.choice(possibleMoves)
else:
return None
def getComputerMove(board, computerLetter):
# Given a board and the computer's letter, determine where to move and return that move.
if computerLetter == 'X':
playerLetter = 'O'
else:
playerLetter = 'X'
# Here is our algorithm for our Tic Tac Toe AI:
# First, check if we can win in the next move
for i in range(1, 10):
copy = getBoardCopy(board)
if isSpaceFree(copy, i):
makeMove(copy, computerLetter, i)
if isWinner(copy, computerLetter):
return i
# Check if the player could win on his next move, and block them.
for i in range(1, 10):
copy = getBoardCopy(board)
if isSpaceFree(copy, i):
makeMove(copy, playerLetter, i)
if isWinner(copy, playerLetter):
return i
# Try to take one of the corners, if they are free.
move = chooseRandomMoveFromList(board, [1, 3, 7, 9])
if move != None:
return move
# Try to take the center, if it is free.
if isSpaceFree(board, 5):
return 5
# Move on one of the sides.
return chooseRandomMoveFromList(board, [2, 4, 6, 8])
def isBoardFull(board):
# Return True if every space on the board has been taken. Otherwise return False.
for i in range(1, 10):
if isSpaceFree(board, i):
return False
return True
print('Welcome to Tic Tac Toe!')
while True:
# Reset the board
theBoard = [' '] * 10
playerLetter, computerLetter = inputPlayerLetter()
turn = whoGoesFirst()
print('The ' + turn + ' will go first.')
gameIsPlaying = True
while gameIsPlaying:
if turn == 'player':
# Player's turn.
drawBoard(theBoard)
move = getPlayerMove(theBoard)
makeMove(theBoard, playerLetter, move)
if isWinner(theBoard, playerLetter):
drawBoard(theBoard)
print('Hooray! You have won the game!')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print('The game is a tie!')
break
else:
turn = 'computer'
else:
# Computer's turn.
move = getComputerMove(theBoard, computerLetter)
makeMove(theBoard, computerLetter, move)
if isWinner(theBoard, computerLetter):
drawBoard(theBoard)
print('The computer has beaten you! You lose.')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print('The game is a tie!')
break
else:
turn = 'player'
if not playAgain():
break |
py | 1a54652e225f465e3abdbba8b94c15f7f6393e3f | # -*- coding: utf-8 -*-
import base
from ecl.network import network_service
from ecl import resource2
from ecl import exceptions
class Port(base.NetworkBaseResource):
resource_key = 'port'
resources_key = 'ports'
service = network_service.NetworkService("v2.0")
base_path = '/' + service.version + '/ports'
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
# query mappings
_query_mapping = resource2.QueryParameters('description',
'device_id',
'device_owner',
'id', 'mac_address',
'name', 'network_id',
'segmentation_id',
'segmentation_type',
'status', 'tenant_id',
'port_id',
"sort_key", "sort_dir",)
# properties
#: admin state of port
admin_state_up = resource2.Body('admin_state_up')
#: admin_state displayed by 'UP' or 'DOWN'
admin_state = resource2.Body('admin_state')
#: allowed address pairs
allowed_address_pairs = resource2.Body('allowed_address_pairs')
#: The port description.
description = resource2.Body('description')
#: Device ID of this port.
device_id = resource2.Body('device_id')
#: Device owner of this port (e.g. ``network:dhcp``).
device_owner = resource2.Body('device_owner')
#: fixed ips of the port.
fixed_ips = resource2.Body('fixed_ips')
#: ID of the port.
id = resource2.Body('id')
#: The MAC address of the port.
mac_address = resource2.Body('mac_address')
#: If the port is managed by service
managed_by_service = resource2.Body('managed_by_service')
#: The port name.
name = resource2.Body('name')
#: The ID of the attached network.
network_id = resource2.Body('network_id')
#: The ID of the project who owns the network. Only administrative
#: users can specify a project ID other than their own.
project_id = resource2.Body('tenant_id')
#: The segmentation ID of ports.
segmentation_id = resource2.Body('segmentation_id', type=int)
#: The segmentation type of ports.
segmentation_type = resource2.Body('segmentation_type')
#: The port status. Value is ``ACTIVE`` or ``DOWN``.
status = resource2.Body('status')
#: tags of the port.
tags = resource2.Body('tags')
#: admin_state displayed by 'UP' or 'DOWN'
@property
def admin_state(self):
admin_state_up = resource2.Body('admin_state_up')
admin_state = 'UP' if admin_state_up else 'DOWN'
return admin_state
@classmethod
def find(cls, session, name_or_id, ignore_missing=False, **params):
"""Find a resource by its name or id.
:param session: The session to use for making this request.
:type session: :class:`~ecl.session.Session`
:param name_or_id: This resource's identifier, if needed by
the request. The default is ``None``.
:param bool ignore_missing: When set to ``False``
:class:`~ecl.exceptions.ResourceNotFound` will be
raised when the resource does not exist.
When set to ``True``, None will be returned when
attempting to find a nonexistent resource.
:param dict params: Any additional parameters to be passed into
underlying methods, such as to
:meth:`~ecl.resource2.Resource.existing`
in order to pass on URI parameters.
:return: The :class:`Resource` object matching the given name or id
or None if nothing matches.
:raises: :class:`ecl.exceptions.DuplicateResource` if more
than one resource is found for this request.
:raises: :class:`ecl.exceptions.ResourceNotFound` if nothing
is found and ignore_missing is ``False``.
"""
# Try to short-circuit by looking directly for a matching ID.
data = cls.list(session, **params)
result = cls._get_one_match(name_or_id, data)
if result is not None:
return result
if ignore_missing:
return None
raise exceptions.ResourceNotFound(
"No %s found for %s" % (cls.__name__, name_or_id)) |
py | 1a54654754a814debfd98c85153e4d1e516a3991 |
"""Tests for `inne` package."""
import time
from unittest.mock import Mock, patch
import numpy as np
import pytest
from inne import IsolationNNE
from scipy.sparse import csc_matrix, csr_matrix
from sklearn.datasets import (load_diabetes, load_digits, load_iris,
make_blobs, make_moons)
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import ParameterGrid, train_test_split
from sklearn.utils import check_random_state
from sklearn.utils._testing import (assert_allclose, assert_array_almost_equal,
assert_array_equal, ignore_warnings)
from sklearn.ensemble import IsolationForest
rng = check_random_state(0)
# load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the diabetes dataset
# and randomly permute it
diabetes = load_diabetes()
perm = rng.permutation(diabetes.target.size)
diabetes.data = diabetes.data[perm]
diabetes.target = diabetes.target[perm]
# also load the digits dataset
# and randomly permute it
digit = load_diabetes()
perm = rng.permutation(digit.target.size)
digit.data = digit.data[perm]
digit.target = digit.target[perm]
def test_inne():
"""Check Isolation NNE for various parameter settings."""
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid(
{"n_estimators": [100, 200], "max_samples": [10, 20, 30]}
)
with ignore_warnings():
for params in grid:
IsolationNNE(random_state=0, **
params).fit(X_train).predict(X_test)
def test_inne_performance():
"""Test Isolation NNE performs well"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = IsolationNNE(n_estimators=100, max_samples=16).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = -clf.decision_function(X_test)
# check that there is at most 6 errors (false positive or false negative)
assert roc_auc_score(y_test, y_pred) > 0.98
@pytest.mark.parametrize("contamination", [0.25, "auto"])
def test_inne_works(contamination):
# toy sample (the last two samples are outliers)
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]]
# Test IsolationForest
clf = IsolationNNE(random_state=0, contamination=contamination)
clf.fit(X)
decision_func = -clf.decision_function(X)
pred = clf.predict(X)
# assert detect outliers:
assert np.min(decision_func[-2:]) > np.max(decision_func[:-2])
assert_array_equal(pred, 6 * [1] + 2 * [-1])
def test_score_samples():
X_train = [[1, 1], [1, 2], [2, 1]]
clf1 = IsolationNNE(contamination=0.1)
clf1.fit(X_train)
clf2 = IsolationNNE()
clf2.fit(X_train)
assert_array_equal(
clf1.score_samples([[2.0, 2.0]]),
clf1.decision_function([[2.0, 2.0]]) + clf1.offset_,
)
assert_array_equal(
clf2.score_samples([[2.0, 2.0]]),
clf2.decision_function([[2.0, 2.0]]) + clf2.offset_,
)
assert_array_equal(
clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]])
)
def test_fit_time():
data = digit.data
print(data.shape)
clf = IsolationNNE(n_estimators=200, max_samples=256)
t1 = time.time()
clf.fit(data)
t2 = time.time()
anomaly_labels = clf.predict(data)
t3 = time.time()
print(t2-t1)
print(t3-t2)
clf2 = IsolationForest(n_estimators=200, max_samples=256)
t1 = time.time()
clf2.fit(data)
t2 = time.time()
anomaly_labels = clf2.predict(data)
t3 = time.time()
print(t2-t1)
print(t3-t2) |
py | 1a5465ba843764f21232d9df526da00100ddc3ba | # RCS14_entrainment_naive.py
# Generate timeseries analysis and power estimate
# Author: maria.olaru@
"""
Created on Mon May 3 18:22:44 2021
@author: mariaolaru
"""
import numpy as np
import pandas as pd
import glob
from datetime import datetime
import os
import re
def get_filepaths(dir_name):
nchars = len(dir_name)
if (dir_name[nchars-1] != '/'):
dir_name = dir_name + '/'
filepaths = glob.glob(dir_name + "Session*/" + "Device*")
return filepaths
def find_file(file_name, parent_dir):
#STEP 1: Get all files in all subdirectories of parent_dir
array_all = np.array([])
for root, subdirectories, files in os.walk(parent_dir):
if file_name in files:
file_match = os.path.join(root, file_name)
array_all = np.append(array_all, file_match)
return array_all
def preprocess_tdd(tdd, p):
"""
Parameters
----------
df : JSON to CSV converted time domain data.
Returns
-------
df_preproc : Restructured and reformatted tdd.
"""
tdd_rename = tdd.rename(columns={"DerivedTime": "timestamp", "key0": "ch0_mv", "key1": "ch1_mv", "key2": "ch2_mv", "key3": "ch3_mv"})
tdd_preproc = tdd_rename.drop(["localTime"], axis = 1)
tdd_preproc = tdd_preproc.drop(["samplerate"], axis = 1)
# sesh_id = os.path.basename(os.path.abspath(os.path.join(p, "../")))
# sesh_id = re.findall(r'\d+', sesh_id)[0]
# tdd_preproc.insert(0, 'session_id', sesh_id)
# tdd_preproc[['session_id']] = tdd_preproc[['session_id']].astype(int).astype(str)
tdd_preproc[['timestamp']] = tdd_preproc[['timestamp']].astype(int)
return tdd_preproc
def preprocess_sls(df):
"""
Parameters
----------
df : JSON to CSV converted stim log settings data.
Returns
-------
df_expanded : Restructured and reformatted sls data.
"""
df = df.rename(columns={"HostUnixTime": "timestamp_unix", "therapyStatus": "stim_status"})
df_expanded_params = df["stimParams_prog1"].str.split(pat = ",", expand = True)
df_expanded_params = df_expanded_params.rename(columns={0: "stim_contacts", 1: "stim_amp", 2: "stim_pw", 3: "stim_freq"})
df_expanded_params["stim_amp"] = df_expanded_params["stim_amp"].str.replace('mA', '')
df_expanded_params["stim_amp"] = df_expanded_params["stim_amp"].astype(float)
df_expanded_params["stim_pw"] = df_expanded_params["stim_pw"].str.replace('us', '')
df_expanded_params["stim_pw"] = df_expanded_params["stim_pw"].astype(int)
df_expanded_params["stim_freq"] = df_expanded_params["stim_freq"].str.replace('Hz', '')
df_expanded_params["stim_freq"] = df_expanded_params["stim_freq"].astype(float)
df_expanded_contact = df_expanded_params["stim_contacts"].str.split(pat = "+", expand = True)
df_expanded_contact = df_expanded_contact.rename(columns={0: "stim_contact_an", 1: "stim_contact"})
df_expanded_contact["stim_contact"] = df_expanded_contact["stim_contact"].str.replace('-', '')
df_expanded_contact["stim_contact"] = df_expanded_contact["stim_contact"].astype(int)
df_expanded_params = df_expanded_params.drop(["stim_contacts"], axis = 1)
df_expanded = pd.concat([df.loc[:, ["timestamp_unix", "stim_status"]], df_expanded_contact, df_expanded_params], axis=1)
indx = np.array(df_expanded[(df_expanded['stim_status'] == 1) & (df_expanded['stim_amp'] == 0)].index)
#include low stimulation amplitudes in field
if indx.size != 0:
df_expanded.loc[indx, 'stim_amp'] = 0.001
#change amplitude to reflect stimulation status
indx = np.array(df_expanded[(df_expanded['stim_status'] == 0) & (df_expanded['stim_amp'] != 0)].index)
if indx.size != 0:
df_expanded.loc[indx, 'stim_amp'] = 0
return df_expanded
def preprocess_tds(df):
"""
Parameters
----------
df : JSON to CSV converted time domain settings data.
Returns
-------
df_expanded : Restructured and reformatted tds data.
"""
#NEED DECIDE WHICH TIMESTAMP TO KEEP, TIMESTOP, OR TIMESTART
df = df.rename(columns={"timeStart": "timestamp_unix"}) #time start of settings
df = df.rename(columns={"timeStop": "timestamp_unix_stop"}) #time stop of settings
df_ch1 = expand_sense_params(df["chan1"], "ch0")
df_ch2 = expand_sense_params(df["chan2"], "ch1")
df_ch3 = expand_sense_params(df["chan3"], "ch2")
df_ch4 = expand_sense_params(df["chan4"], "ch3")
df_expanded = pd.concat([df["timestamp_unix"], df["timestamp_unix_stop"], df_ch1, df_ch2, df_ch3, df_ch4], axis = 1)
df_expanded = df_expanded.drop(['ch1_sr', 'ch2_sr', 'ch3_sr'], axis = 1)
df_expanded = df_expanded.rename(columns={'ch0_sr': 'sr'})
return df_expanded
def expand_sense_params(df, label):
"""
Parameters
----------
df : data from a single tds channel.
label : label of tds channel from df.
Returns
-------
df_expanded : expanded columns for each input datatype
"""
df_expanded_params = df.str.split(pat = " ", expand = True)
df_expanded_params = df_expanded_params.rename(columns={0: (label+"_sense_contacts"), 1: (label+"_lfp1"), 2: (label+"_lfp2"), 3: (label+"_sr")})
df_expanded_params[(label+"_lpf1")] = df_expanded_params[(label+"_lfp1")].str.replace('LFP1-', '')
# df_expanded_params[(label+"_lfp1")] = df_expanded_params[(label+"_lfp1")].astype(int)
df_expanded_params[(label+"_lpf2")] = df_expanded_params[(label+"_lfp2")].str.replace('LFP2-', '')
# df_expanded_params[(label+"_lfp2")] = df_expanded_params[(label+"_lfp2")].astype(int)
df_expanded_params[(label+"_lpfs")] = df_expanded_params[label+"_lpf1"] + '-' + df_expanded_params[label+"_lpf2"]
df_expanded_params[(label+"_sr")] = df_expanded_params[(label+"_sr")].str.replace('SR-', '')
df_expanded_params = df_expanded_params.drop([label + '_lfp1', label + '_lfp2', label + '_lpf1', label + '_lpf2'], axis = 1)
#Need to edit this later
if ((df_expanded_params[(label+"_sr")] == 'Disabled').any()):
indx_vals = df_expanded_params[df_expanded_params[(label+"_sr")]=='Disabled'].index
df_expanded_params[(label+"_sr")][indx_vals] = 0
print("Warning: hardcoding sr of 0 for Disabled value")
df_expanded_params[(label+"_sr")] = df_expanded_params[(label+"_sr")].astype(int)
#df_expanded_contact = df_expanded_params[(label+"_sense_contacts")].str.split(pat = "-", expand = True)
#df_expanded_contact = df_expanded_contact.rename(columns={0: (label+"_sense_contact_an"), 1: (label+"_sense_contact_cath")})
#df_expanded_contact[(label+"_sense_contact_an")] = df_expanded_contact[(label+"_sense_contact_an")].str.replace('+', '', regex=True)
#df_expanded_contact[(label+"_sense_contact_an")] = df_expanded_contact[(label+"_sense_contact_an")].astype(int)
#df_expanded_contact[(label+"_sense_contact_cath")] = df_expanded_contact[(label+"_sense_contact_cath")].astype(int)
#df_expanded_params = df_expanded_params.drop([(label+"_sense_contacts")], axis = 1)
#df_expanded = pd.concat([df_expanded_contact, df_expanded_params], axis=1)
return df_expanded_params
def preprocess_elt(df, p):
"""
Parameters
----------
df : JSON to CSV converted event log table data
Returns
-------
df_rename : Restructured and reformatted elt data.
"""
if not "SessionId" in df:
sesh_id = os.path.basename(os.path.abspath(os.path.join(p, "../")))
sesh_id = float(re.findall(r'\d+', sesh_id)[0])
if df.empty:
df = pd.DataFrame([sesh_id], columns = ['session_id'])
return df
df_rename = df.rename(columns={"HostUnixTime": "timestamp_unix", "SessionId": "session_id", "EventType": "event_type", "EventSubType" : "event_subtype"})
df_subset = df_rename[["session_id", "timestamp_unix", "event_type", "event_subtype"]]
#Get comments from older version of RCS implementation
partial_match = ["Feeling", "Balance", "Slowness", "Dyskinesia", "Dystonia", "Rigidity", "Speech", "Tremor", "Mania", "Sleep"]
import math
indx = np.array([])
for i in range(len(df_subset)):
entry = df_subset.loc[i, 'event_type']
if type(entry) != str:
if math.isnan(entry):
continue
for j in range(len(partial_match)):
pm = partial_match[j]
if entry.startswith(pm):
indx = np.append(indx, i)
if indx.size > 0:
df_reformat = df_subset.iloc[indx, :]
df_reformat = df_reformat.rename(columns = {'event_type': 'conditions', 'event_subtype': 'extra_comments'})
df_standard = pd.melt(df_reformat, id_vars=['session_id', 'timestamp_unix'], value_vars = ['conditions', 'extra_comments'])
df_standard = df_standard.rename(columns = {'variable': 'event_type', 'value': 'event_subtype'})
df_subset = pd.concat([df_subset, df_standard])
ls_keep = ["conditions", "medication", "extra_comments"]
df_select = df_subset.loc[df_subset['event_type'].isin(ls_keep)]
if (df_select.size == 0):
df_standard = df_subset[["session_id", "timestamp_unix"]]
df_reformat = df_standard.iloc[0:1, :]
else:
dfp = df_select.pivot(columns = 'event_type')['event_subtype']
if (not "conditions" in dfp):
dfp = dfp.assign(conditions = np.nan)
if (not "medication" in dfp):
dfp = dfp.assign(medication = np.nan)
if (not "extra_comments" in dfp):
dfp = dfp.assign(extra_comments = np.nan)
df_reformat = df_select[["session_id", "timestamp_unix"]]
df_reformat = df_reformat.assign(medication = pd.Series(dfp['medication']))
df_reformat = df_reformat.assign(symptoms = pd.Series(dfp['conditions']))
df_reformat = df_reformat.assign(comments = pd.Series(dfp['extra_comments']))
return df_reformat
def preprocess_md(df, p):
"""
Parameters
----------
df : JSON to CSV converted meta data.
Returns
-------
df : Restructured and reformatted md.
"""
if (df['implant_side'] == 'Undefined').any():
implant_side = os.path.abspath(os.path.join(p, "../.."))
implant_side = implant_side[-1]
df.implant_side[df.implant_side=="Undefined"]=implant_side
else:
df_implant_expanded = df[("implant_side")].str.split(pat = " ", expand = True)
df["implant_side"] = df_implant_expanded.iloc[:,0]
df_rename = df.rename(columns={"subj_ID": "subj_id"})
df_rename['subj_id'] = df_rename['subj_id'][0][:-1]
return df_rename
def settings_combine(eltp, mdp, slsp, tdsp, out_dir):
"""
Parameters
----------
eltp : preprocessed event log table data.
mdp : preprocessed meta data.
slsp : preprocessed stim log settings data.
tdsp : preprocessed time domain settings data.
out_dir : fullpath to parent directory of output.
Returns
-------
df : a single dataframe containing all input data.
"""
subj_id = mdp['subj_id'].unique()
subj_id = subj_id[~pd.isnull(subj_id)][0]
hemi = mdp['implant_side'].unique()
hemi = hemi[~pd.isnull(hemi)][0]
sesh_id = eltp['session_id'].unique()
sesh_id = sesh_id[~pd.isnull(sesh_id)][0]
tdspc = tdsp.drop(['timestamp_unix_stop'], axis=1)
df = slsp.append(tdspc)
df.insert(0, 'subj_id', subj_id)
df.insert(1, 'implant_side', hemi)
df.insert(2, 'session_id', sesh_id)
df = df.sort_values('timestamp_unix')
df = df.reset_index(drop = True)
timestamp_dt = convert_unix2dt(df["timestamp_unix"])
df.insert(4, 'timestamp', timestamp_dt)
df[['timestamp_unix']] = df[['timestamp_unix']].astype(int)
df[['session_id']] = df[['session_id']].astype(int).astype(str)
df.to_csv(out_dir + 'combined_settings.csv', index = False, header=True)
eltp.to_csv(out_dir + 'session_notes.csv', index = False, header = True)
return df
def convert_unix2dt(series):
"""
Parameters
----------
series : column from pandas dataframe in UNIX microsecond formatting
Returns
-------
timestamp_dt : series in date-time format
"""
if (len(series) == 1):
unix_s = series/1000
else:
unix_s = series.squeeze()/1000
timestamp_dt = np.zeros(len(unix_s), dtype='datetime64[ms]')
for i in range(len(timestamp_dt)):
timestamp_dt[i] = datetime.fromtimestamp(unix_s.iloc[i])
return timestamp_dt
def preprocess_settings(dir_name):
"""
Parameters
----------
path_list : full-path list of all directories to individually process, with data one level lower than head directory.
Returns
-------
Meta-settings table of all individual session settings tables
"""
paths = get_filepaths(dir_name)
msc = pd.DataFrame() #initialize metadata table of settings information
meltp = pd.DataFrame()
p_temp = paths[0]
gp = os.path.abspath(os.path.join(p_temp, "../.."))
subj_id = os.path.basename(gp)
msc_fp = gp + '/' + subj_id + '_meta_combined_settings.csv'
meltp_fp = gp + '/' + subj_id + '_meta_session_notes.csv'
if (os.path.exists(msc_fp)):
msc = pd.read_csv(msc_fp, header=0)
meltp = pd.read_csv(meltp_fp, header=0)
else:
for i in range(0, len(paths)):
p = paths[i] + '/'
# for testing purposes:
#p = '/Users/mariaolaru/RCS02 Un-Synced Data/SummitData/SummitContinuousBilateralStreaming/RCS02L/Session1557951903217/Device/'
exists = len(find_file("timeDomainData.csv", p))
if (exists == 0):
print("Can't process, timeDomainData does not exist: \n" + p + "\n")
continue
else:
print("Processing settings: \n" + p + "\n")
sls = pd.read_csv(p + "stimLogSettings.csv")
tds = pd.read_csv(p + "timeDomainSettings.csv", index_col=False)
if os.stat(p + "eventLogTable.csv").st_size > 1:
elt = pd.read_csv(p + "eventLogTable.csv")
else:
elt = pd.DataFrame()
md = pd.read_csv(p + "metaData.csv")
slsp = preprocess_sls(sls)
tdsp = preprocess_tds(tds)
eltp = preprocess_elt(elt, p)
mdp = preprocess_md(md, p)
sc = settings_combine(eltp, mdp, slsp, tdsp, p)
msc = pd.concat([msc, sc])
meltp = pd.concat([meltp, eltp])
msc['session_id'] = msc['session_id'].astype(int)
# col_dont_fill = 0 #do not fill for med/symp/comments
# if ('medication' in msc):
# col_dont_fill = col_dont_fill + 1
# if ('symptoms' in msc):
# col_dont_fill = col_dont_fill + 1
# if ('comments' in msc):
# col_dont_fill = col_dont_fill + 1
# fill_cols = msc.columns[0:len(msc.columns)-col_dont_fill]
msc = msc.sort_values('timestamp_unix')
meltp = meltp.sort_values('timestamp_unix')
msc = msc.fillna(method='ffill')
# msc = msc.fillna(method='bfill')
msc.drop(index=msc.index[0], axis = 0, inplace = True) #remove first index
msc = msc.reset_index(drop = True)
gp = os.path.abspath(os.path.join(p, "../.."))
msc.to_csv(msc_fp, index = False, header=True)
meltp.to_csv(meltp_fp, index = False, header=True)
return [msc, meltp, gp]
def preprocess_data(dir_name, msc, gp):
paths = get_filepaths(dir_name)
md = pd.DataFrame() #initialize metadata table of settings information
subj_id = os.path.basename(gp)
md_fp = gp + '/' + subj_id + '_meta_data.csv'
if (os.path.exists(md_fp)):
md = pd.read_csv(md_fp, header=0)
else:
for i in range(0, len(paths)):
p = paths[i] + '/'
# for testing purposes:
#p = '/Users/mariaolaru/RCS02 Un-Synced Data/SummitData/SummitContinuousBilateralStreaming/RCS02L/Session1557952808964/Device'
exists = len(find_file("timeDomainData.csv", p))
if (exists == 0):
print("Can't process, timeDomainData does not exist: \n" + p + "\n")
continue
else:
print("Processing data: \n" + p + "\n")
tdd = pd.read_csv(p + "timeDomainData.csv")
tddp = preprocess_tdd(tdd, p)
md = pd.concat([md, tddp])
md = md.sort_values('timestamp')
md = md.reset_index(drop=True)
md.to_csv(md_fp, index = False, header=True)
return md
def melt_md(md):
"""
Parameters
----------
md : dataframe, wide-form meta-data
fs : int, sample rate
out_name : str, filename of output file
step_size : int, increment in seconds with which to group data
Returns
-------
long-form of meta-data
"""
print("Converting meta data to long-form")
md = md.rename(columns={"ch0_mv": "0", "ch1_mv": "1", "ch2_mv": "2", "ch3_mv": "3"})
mdm = pd.melt(md, id_vars=['timestamp'], value_vars = ['0', '1', '2', '3'], var_name = 'channel', value_name = 'voltage')
mdm['channel'] = mdm['channel'].astype(int)
mdm = mdm[mdm['voltage'].notna()]
# out_name_mdsm = gp + '/' + out_name + "_mdsm.csv"
# mdsm.to_csv(out_name_mdsm, index = False)
return mdm
def link_data_wide(msc, md, gp):
subj_id = os.path.basename(gp)
ld_fp = gp + '/' + subj_id + '_linked_data_wide.csv'
if (os.path.exists(ld_fp)):
df_linked = pd.read_csv(ld_fp, header=0)
else:
mscc = msc.drop(['subj_id', 'implant_side', 'timestamp', 'stim_status', 'stim_contact_an'], axis = 1)
mscc = mscc.rename({'timestamp_unix' : 'timestamp'}, axis = 1)
df_linked = mscc.append(md)
df_linked = df_linked.sort_values('timestamp').reset_index(drop = True)
df_linked.loc[:, mscc.columns] = df_linked.loc[:, mscc.columns].fillna(method='ffill')
#df_linked = df_linked[df_linked['ch1_mV'].notna()]
df_linked[['session_id']] = df_linked[['session_id']].astype(int)
df_linked[['stim_contact']] = df_linked[['stim_contact']].astype(int)
df_linked[['sr']] = df_linked[['sr']].astype(int)
df_linked[['stim_pw']] = df_linked[['stim_pw']].astype(int)
df_linked.to_csv(ld_fp, index = False, header=True)
return df_linked
def link_data(msc, md, gp):
subj_id = os.path.basename(gp)
ld_fp = gp + '/' + subj_id + '_linked_data.csv'
# if (os.path.exists(ld_fp)):
# df_linked = pd.read_csv(ld_fp, header=0)
# else:
mscc = msc.drop(['subj_id', 'implant_side', 'timestamp', 'stim_status', 'stim_contact_an'], axis = 1)
mscc = mscc.rename({'timestamp_unix' : 'timestamp'}, axis = 1)
mdl = melt_md(md)
df_linked_ch0 = link_ch(mscc, mdl, 0)
df_linked_ch1 = link_ch(mscc, mdl, 1)
df_linked_ch2 = link_ch(mscc, mdl, 2)
df_linked_ch3 = link_ch(mscc, mdl, 3)
df_linked = pd.concat([df_linked_ch0, df_linked_ch1, df_linked_ch2, df_linked_ch3])
df_linked[['session_id']] = df_linked[['session_id']].astype(int)
df_linked[['stim_contact']] = df_linked[['stim_contact']].astype(int)
df_linked[['sr']] = df_linked[['sr']].astype(int)
df_linked[['stim_pw']] = df_linked[['stim_pw']].astype(int)
col = df_linked['sense_contacts']
df_linked = df_linked.drop(columns = 'sense_contacts')
df_linked.insert(1, 'sense_contacts', col)
df_linked = df_linked.drop('channel', axis = 1)
#df_linked = df_linked.rename(columns = {'stim_contact_cath':'stim_contact', 'amplitude_ma': 'stim_amp', 'pulsewidth_us': 'stim_pw', 'stimfrequency_hz':'stim_freq'})
df_linked = df_linked.sort_values(['session_id', 'sense_contacts', 'timestamp']).reset_index(drop = True)
df_linked.to_csv(ld_fp, index = False, header=True)
return df_linked
def concat_data(df):
df['settings'] = df['stim_freq'].astype(str) + '-' + df['stim_amp'].astype(str)
df = df.drop(['stim_contact', 'stim_pw', 'lpfs'], axis = 1)
return df
def link_ch(mscc, mdl, label):
mdl_ch = mdl[mdl['channel'] == label]
if 'label' in mscc:
mscc_ch = mscc[['session_id', 'timestamp', 'stim_contact', 'stim_amp', 'stim_pw', 'stim_freq', 'sr', 'ch' + str(label) +'_sense_contacts', 'ch' + str(label) + '_lpfs', 'label']]
else:
mscc_ch = mscc[['session_id', 'timestamp', 'stim_contact', 'stim_amp', 'stim_pw', 'stim_freq', 'sr', 'ch' + str(label) +'_sense_contacts', 'ch' + str(label) + '_lpfs']]
df_linked_ch = mscc_ch.append(mdl_ch)
df_linked_ch = df_linked_ch.sort_values('timestamp').reset_index(drop = True)
df_linked_ch.loc[:, mscc_ch.columns] = df_linked_ch.loc[:, mscc_ch.columns].fillna(method='ffill')
df_linked_ch = df_linked_ch[df_linked_ch['voltage'].notna()]
df_linked_ch = df_linked_ch.rename(columns = {'ch' + str(label) + '_sense_contacts':'sense_contacts', 'ch' + str(label) + '_lpfs':'lpfs'})
return df_linked_ch
def label_montage(msc, labels):
labels = np.array(labels)
sesh = msc['session_id'].unique()
for i in range(len(sesh)):
i_true = msc.index[msc['session_id'] == sesh[i]]
msc.loc[i_true, 'label'] = labels[i]
return msc
|
py | 1a5466f2f47443988e51a94bbbad6271df80993c | import socket
from itertools import chain, imap
from redis.exceptions import (
RedisError,
ConnectionError,
ResponseError,
InvalidResponse,
AuthenticationError
)
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import hiredis
hiredis_available = True
except ImportError:
hiredis_available = False
class PythonParser(object):
"Plain Python parsing class"
MAX_READ_LENGTH = 1000000
def __init__(self):
self._fp = None
def __del__(self):
try:
self.on_disconnect()
except:
pass
def on_connect(self, connection):
"Called when the socket connects"
self._fp = connection._sock.makefile('r')
def on_disconnect(self):
"Called when the socket disconnects"
if self._fp is not None:
self._fp.close()
self._fp = None
def read(self, length=None):
"""
Read a line from the socket is no length is specified,
otherwise read ``length`` bytes. Always strip away the newlines.
"""
try:
if length is not None:
bytes_left = length + 2 # read the line ending
if length > self.MAX_READ_LENGTH:
# apparently reading more than 1MB or so from a windows
# socket can cause MemoryErrors. See:
# https://github.com/andymccurdy/redis-py/issues/205
# read smaller chunks at a time to work around this
try:
buf = StringIO()
while bytes_left > 0:
read_len = min(bytes_left, self.MAX_READ_LENGTH)
buf.write(self._fp.read(read_len))
bytes_left -= read_len
buf.seek(0)
return buf.read(length)
finally:
buf.close()
return self._fp.read(bytes_left)[:-2]
# no length, read a full line
return self._fp.readline()[:-2]
except (socket.error, socket.timeout), e:
raise ConnectionError("Error while reading from socket: %s" % \
(e.args,))
def read_response(self):
response = self.read()
if not response:
raise ConnectionError("Socket closed on remote end")
byte, response = response[0], response[1:]
# server returned an error
if byte == '-':
if response.startswith('ERR '):
response = response[4:]
return ResponseError(response)
if response.startswith('LOADING '):
# If we're loading the dataset into memory, kill the socket
# so we re-initialize (and re-SELECT) next time.
raise ConnectionError("Redis is loading data into memory")
# single value
elif byte == '+':
return response
# int value
elif byte == ':':
return long(response)
# bulk response
elif byte == '$':
length = int(response)
if length == -1:
return None
response = self.read(length)
return response
# multi-bulk response
elif byte == '*':
length = int(response)
if length == -1:
return None
return [self.read_response() for i in xrange(length)]
raise InvalidResponse("Protocol Error")
class HiredisParser(object):
"Parser class for connections using Hiredis"
def __init__(self):
if not hiredis_available:
raise RedisError("Hiredis is not installed")
def __del__(self):
try:
self.on_disconnect()
except:
pass
def on_connect(self, connection):
self._sock = connection._sock
self._reader = hiredis.Reader(
protocolError=InvalidResponse,
replyError=ResponseError)
def on_disconnect(self):
self._sock = None
self._reader = None
def read_response(self):
if not self._reader:
raise ConnectionError("Socket closed on remote end")
response = self._reader.gets()
while response is False:
try:
buffer = self._sock.recv(4096)
except (socket.error, socket.timeout), e:
raise ConnectionError("Error while reading from socket: %s" % \
(e.args,))
if not buffer:
raise ConnectionError("Socket closed on remote end")
self._reader.feed(buffer)
# proactively, but not conclusively, check if more data is in the
# buffer. if the data received doesn't end with \n, there's more.
if not buffer.endswith('\n'):
continue
response = self._reader.gets()
return response
if hiredis_available:
DefaultParser = HiredisParser
else:
DefaultParser = PythonParser
class Connection(object):
"Manages TCP communication to and from a Redis server"
def __init__(self, host='localhost', port=6379, db=0, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', parser_class=DefaultParser):
self.host = host
self.port = port
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self._sock = None
self._parser = parser_class()
def __del__(self):
try:
self.disconnect()
except:
pass
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.error, e:
raise ConnectionError(self._error_message(e))
self._sock = sock
self.on_connect()
def _connect(self):
"Create a TCP socket connection"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect((self.host, self.port))
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
# if a password is specified, authenticate
if self.password:
self.send_command('AUTH', self.password)
if self.read_response() != 'OK':
raise AuthenticationError('Invalid Password')
# if a database is specified, switch to it
if self.db:
self.send_command('SELECT', self.db)
if self.read_response() != 'OK':
raise ConnectionError('Invalid Database')
def disconnect(self):
"Disconnects from the Redis server"
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.close()
except socket.error:
pass
self._sock = None
def send_packed_command(self, command):
"Send an already packed command to the Redis server"
if not self._sock:
self.connect()
try:
self._sock.sendall(command)
except socket.error, e:
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
raise ConnectionError("Error %s while writing to socket. %s." % \
(_errno, errmsg))
except:
self.disconnect()
raise
def send_command(self, *args):
"Pack and send a command to the Redis server"
self.send_packed_command(self.pack_command(*args))
def read_response(self):
"Read the response from a previously sent command"
try:
response = self._parser.read_response()
except:
self.disconnect()
raise
if response.__class__ == ResponseError:
raise response
return response
def encode(self, value):
"Return a bytestring representation of the value"
if isinstance(value, unicode):
return value.encode(self.encoding, self.encoding_errors)
return str(value)
def pack_command(self, *args):
"Pack a series of arguments into a value Redis command"
command = ['$%s\r\n%s\r\n' % (len(enc_value), enc_value)
for enc_value in imap(self.encode, args)]
return '*%s\r\n%s' % (len(command), ''.join(command))
class UnixDomainSocketConnection(Connection):
def __init__(self, path='', db=0, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', parser_class=DefaultParser):
self.path = path
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self._sock = None
self._parser = parser_class()
def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to unix socket: %s. %s." % \
(self.path, exception.args[0])
else:
return "Error %s connecting to unix socket: %s. %s." % \
(exception.args[0], self.path, exception.args[1])
# TODO: add ability to block waiting on a connection to be released
class ConnectionPool(object):
"Generic connection pool"
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections or 2**31
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"Releases the connection back to the pool"
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"Disconnects all connections in the pool"
all_conns = chain(self._available_connections, self._in_use_connections)
for connection in all_conns:
connection.disconnect()
|
py | 1a54675d68cc0606ae89722e068095657404f906 |
import copy
import six
import sqlalchemy.pool
from .pool import DjangoQueuePool
class DjangoPoolParams(object):
_slow_and_safe = {
'django_pool_class': sqlalchemy.pool.QueuePool, # sqlalchemy's builtin queue pool class
'django_pre_ping': True, # pre ping by django if dialect is None
'django_reset_on_return': False, # use sqlalchemy's reset on conn return
'pool_size': 5, # daily traffic: reuse long connections
'max_overflow': 0, # burst traffic: do not overload the db
'timeout': 30, # burst traffic: > external api timeout
'recycle': 120, # should be smaller than mysql timeout
'dialect': None, # sqlalchemy's mysql dialect instance
'pre_ping': False, # sqlalchemy pre ping requires dialect
'use_threadlocal': True, # every thread always get its same conn
'reset_on_return': 'rollback', # reset on every conn return by rollback
}
_fast_and_sane = {
'django_pool_class': sqlalchemy.pool.QueuePool, # sqlalchemy's builtin queue pool class
'django_pre_ping': False, # no pre ping due to long mysql timeout
'django_reset_on_return': True, # reset by rollback only when necessary
'pool_size': 5, # daily traffic: reuse long connections
'max_overflow': 10, # burst traffic: do not overload the db
'timeout': 30, # burst traffic: > external api timeout
'recycle': 3600, # to be much smaller than mysql timeout
'dialect': None, # sqlalchemy's mysql dialect instance
'pre_ping': False, # sqlalchemy pre ping requires dialect
'use_threadlocal': False, # diff threads share the db connections
'reset_on_return': None, # do not use sqlalchemy reset on return
}
_fast_and_wild = {
'django_pool_class': DjangoQueuePool, # customized from sqlalchemy queue pool
'django_pre_ping': False, # no pre ping due to long mysql timeout
'django_reset_on_return': True, # reset by rollback only when necessary
'django_core_pool_size': 5, # retire no conn if achieving core size
'django_unload_timeout': 2, # wait some random time before overload
'django_retire_interval': 5, # retire few non-core conn per interval
'django_retire_quantity': 1, # retire few non-core conn per interval
'pool_size': 30, # daily traffic: recycle or retire conn
'max_overflow': 0, # burst traffic: put overflow into pool
'timeout': 30, # burst traffic: > external api timeout
'recycle': 3600, # to be much smaller than mysql timeout
'dialect': None, # sqlalchemy's mysql dialect instance
'pre_ping': False, # sqlalchemy pre ping requires dialect
'use_threadlocal': False, # diff threads share the db connections
'reset_on_return': None, # do not use sqlalchemy reset on return
}
_supported_params = set(six.iterkeys(_fast_and_wild))
_params_to_kwargs = {
'django_pool_class': None,
'django_pre_ping': None,
'django_reset_on_return': None,
'django_core_pool_size': 'core_pool_size',
'django_unload_timeout': 'unload_timeout',
'django_retire_interval': 'retire_interval',
'django_retire_quantity': 'retire_quantity',
}
if not _supported_params.issuperset(_params_to_kwargs.viewkeys()):
raise Exception('invalid supported params: %s' % _supported_params)
def __init__(self, pool_params):
"""
:type pool_params: dict
"""
self.pool_params = pool_params
@classmethod
def unsupported(cls, params):
return six.viewkeys(params) - cls._supported_params
@classmethod
def new_slow_safe(cls, **updated):
return cls.new(cls._slow_and_safe, **updated)
@classmethod
def new_fast_sane(cls, **updated):
return cls.new(cls._fast_and_sane, **updated)
@classmethod
def new_fast_wild(cls, **updated):
return cls.new(cls._fast_and_wild, **updated)
@classmethod
def new(cls, default, **updated):
"""
:rtype: dict
"""
params = dict(default, **updated)
unsupported = cls.unsupported(params)
if unsupported:
raise Exception('unsupported pool params: %s' % unsupported)
return params
def get_pool_kwargs(self):
"""
:rtype: dict
"""
pool_class = self.django_pool_class
pool_kwargs = copy.deepcopy(self.pool_params)
for _k in self._params_to_kwargs:
pool_kwargs.pop(_k, None)
if pool_class == DjangoQueuePool:
for _k, _v in six.iteritems(self._params_to_kwargs):
if _k is not None and _v is not None:
pool_kwargs[_v] = self.pool_params.get(_k, None)
return pool_kwargs
@property
def django_pool_class(self):
return self.pool_params.get('django_pool_class', None)
@property
def django_pre_ping(self):
return self.pool_params.get('django_pre_ping', None)
@property
def django_reset_on_return(self):
return self.pool_params.get('django_reset_on_return', None)
|
py | 1a54676ebd61a2709d5eb6492f7ff0fc339d1cf1 | class Solution(object):
def reverseStr(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
counter = 0
index = 0
rev = True
buf = []
output = ""
while index < len(s):
if rev:
buf.append(s[index])
else:
output += (s[index])
counter += 1
index += 1
if counter == k:
rev = not rev
counter = 0
output += ("".join(reversed(buf)))
buf = []
if buf:
output += ("".join(reversed(buf)))
return output
|
py | 1a546790b68a640b346c114e625b256a949f2f73 | """
This file is uses slightly modified code from pyDRMetrics [1]_, see:
- https://doi.org/10.1016/j.heliyon.2021.e06199 - the article.
- https://data.mendeley.com/datasets/jbjd5fmggh/1 - the supplementary files.
The following changes have been made:
- :mod:`numba` JIT for performance reasons
- use broadcasting instead of a 3rd loop in :func:`_ranking_matrix`
[1] Zhang, Yinsheng (2021),
“Source code, sample data, and case study report for pyDRMetrics”,
Mendeley Data, V1, doi: 10.17632/jbjd5fmggh.1
"""
from ....tools.decorators import metric
from ....tools.normalize import log_cpm_hvg
from anndata import AnnData
from numba import njit
from scipy.sparse import issparse
from sklearn.metrics import pairwise_distances
from typing import Tuple
import numpy as np
__original_author__ = "Yinsheng Zhang"
__original_author_email__ = "[email protected]"
__license__ = "CC BY 4.0"
__license_link__ = (
"https://data.mendeley.com/datasets/"
"jbjd5fmggh/1/files/da1bca42-c4da-4376-9177-bd2d9a308108"
)
_K = 30
@njit(cache=True, fastmath=True)
def _ranking_matrix(D: np.ndarray) -> np.ndarray: # pragma: no cover
assert D.shape[0] == D.shape[1]
R = np.zeros(D.shape)
m = len(R)
ks = np.arange(m)
for i in range(m):
for j in range(m):
R[i, j] = np.sum(
(D[i, :] < D[i, j]) | ((ks < j) & (np.abs(D[i, :] - D[i, j]) <= 1e-12))
)
return R
@njit(cache=True, fastmath=True)
def _coranking_matrix(R1: np.ndarray, R2: np.ndarray) -> np.ndarray: # pragma: no cover
assert R1.shape == R2.shape
Q = np.zeros(R1.shape, dtype=np.int32)
m = len(Q)
for i in range(m):
for j in range(m):
k = int(R1[i, j])
l = int(R2[i, j]) # noqa: E741
Q[k, l] += 1
return Q
@njit(cache=True, fastmath=True)
def _metrics(
Q: np.ndarray,
) -> Tuple[
np.ndarray, np.ndarray, np.ndarray, float, np.ndarray, int, float, float
]: # pragma: no cover
Q = Q[1:, 1:]
m = len(Q)
T = np.zeros(m - 1) # trustworthiness
C = np.zeros(m - 1) # continuity
QNN = np.zeros(m) # Co-k-nearest neighbor size
LCMC = np.zeros(m) # Local Continuity Meta Criterion
for k in range(m - 1):
Qs = Q[k:, :k]
# a column vector of weights. weight = rank error = actual_rank - k
W = np.arange(Qs.shape[0]).reshape(-1, 1)
# 1 - normalized hard-k-intrusions. lower-left region.
# weighted by rank error (rank - k)
T[k] = 1 - np.sum(Qs * W) / (k + 1) / m / (m - 1 - k)
Qs = Q[:k, k:]
# a row vector of weights. weight = rank error = actual_rank - k
W = np.arange(Qs.shape[1]).reshape(1, -1)
# 1 - normalized hard-k-extrusions. upper-right region
C[k] = 1 - np.sum(Qs * W) / (k + 1) / m / (m - 1 - k)
for k in range(m):
# Q[0,0] is always m. 0-th nearest neighbor is always the point itself.
# Exclude Q[0,0]
QNN[k] = np.sum(Q[: k + 1, : k + 1]) / ((k + 1) * m)
LCMC[k] = QNN[k] - (k + 1) / (m - 1)
kmax = np.argmax(LCMC)
Qlocal = np.sum(QNN[: kmax + 1]) / (kmax + 1)
# skip the last. The last is (m-1)-nearest neighbor, including all samples.
Qglobal = np.sum(QNN[kmax:-1]) / (m - kmax - 1)
AUC = np.mean(QNN)
return T, C, QNN, AUC, LCMC, kmax, Qlocal, Qglobal
def _high_dim(adata: AnnData) -> np.ndarray:
adata.X = adata.layers["counts"]
adata = log_cpm_hvg(adata)
high_dim = adata.X
return high_dim.A if issparse(high_dim) else high_dim
def _fit(
X: np.ndarray, E: np.ndarray
) -> Tuple[float, float, float, float, float, float, float]:
if np.any(np.isnan(E)):
return 0.0, 0.0, 0.0, 0.5, -np.inf, -np.inf, -np.inf
Dx = pairwise_distances(X)
De = pairwise_distances(E)
Rx, Re = _ranking_matrix(Dx), _ranking_matrix(De)
Q = _coranking_matrix(Rx, Re)
T, C, QNN, AUC, LCMC, _kmax, Qlocal, Qglobal = _metrics(Q)
return T[_K], C[_K], QNN[_K], AUC, LCMC[_K], Qlocal, Qglobal
@metric("continuity", maximize=True)
def continuity(adata: AnnData) -> float:
_, C, _, *_ = _fit(_high_dim(adata), adata.obsm["X_emb"])
return float(np.clip(C, 0.0, 1.0)) # in [0, 1]
@metric("co-KNN size", maximize=True)
def qnn(adata: AnnData) -> float:
_, _, QNN, *_ = _fit(_high_dim(adata), adata.obsm["X_emb"])
# normalized in the code to [0, 1]
return float(np.clip(QNN, 0.0, 1.0))
@metric("co-KNN AUC", maximize=True)
def qnn_auc(adata: AnnData) -> float:
_, _, _, AUC, *_ = _fit(_high_dim(adata), adata.obsm["X_emb"])
return float(np.clip(AUC, 0.5, 1.0)) # in [0.5, 1]
@metric("local continuity meta criterion", maximize=True)
def lcmc(adata: AnnData) -> float:
*_, LCMC, _, _ = _fit(_high_dim(adata), adata.obsm["X_emb"])
return LCMC
@metric("local property", maximize=True)
def qlocal(adata: AnnData) -> float:
# according to authors, this is usually preferred to
# qglobal, because human are more sensitive to nearer neighbors
*_, Qlocal, _ = _fit(_high_dim(adata), adata.obsm["X_emb"])
return Qlocal
@metric("global property", maximize=True)
def qglobal(adata: AnnData) -> float:
*_, Qglobal = _fit(_high_dim(adata), adata.obsm["X_emb"])
return Qglobal
|
py | 1a54679b9c6b9ec52afe09bf68a22194babff5b2 | from setuptools import setup
setup(name='irispreppy',
version="0.9.1",
url='https://github.com/OfAaron3/irispreppy',
author='Aaron W. Peat',
author_email='[email protected]',
license='MIT',
packages=['irispreppy.psf', 'irispreppy.radcal', 'irispreppy', 'irispreppy.radcal.responses'],
install_requires=[
'numpy',
'tqdm',
'astropy<=4.2.0',
'scipy',
'beatifulsoup4'
],
include_package_data=True,
zip_safe=False
)
|
py | 1a5469abeff5e8a256a350ee54eea6a25beaf52a | # -*- coding: utf-8 -*-
import json
import threading
from plexapi import log
class AlertListener(threading.Thread):
""" Creates a websocket connection to the PlexServer to optionally receive alert notifications.
These often include messages from Plex about media scans as well as updates to currently running
Transcode Sessions. This class implements threading.Thread, therefore to start monitoring
alerts you must call .start() on the object once it's created. When calling
`PlexServer.startAlertListener()`, the thread will be started for you.
Known `state`-values for timeline entries, with identifier=`com.plexapp.plugins.library`:
:0: The item was created
:1: Reporting progress on item processing
:2: Matching the item
:3: Downloading the metadata
:4: Processing downloaded metadata
:5: The item processed
:9: The item deleted
When metadata agent is not set for the library processing ends with state=1.
Parameters:
server (:class:`~plexapi.server.PlexServer`): PlexServer this listener is connected to.
callback (func): Callback function to call on received messages. The callback function
will be sent a single argument 'data' which will contain a dictionary of data
received from the server. :samp:`def my_callback(data): ...`
"""
key = '/:/websockets/notifications'
def __init__(self, server, callback=None):
super(AlertListener, self).__init__()
self.daemon = True
self._server = server
self._callback = callback
self._ws = None
def run(self):
try:
import websocket
except ImportError:
log.warning("Can't use the AlertListener without websocket")
return
# create the websocket connection
url = self._server.url(self.key, includeToken=True).replace('http', 'ws')
log.info('Starting AlertListener: %s', url)
self._ws = websocket.WebSocketApp(url, on_message=self._onMessage,
on_error=self._onError)
self._ws.run_forever()
def stop(self):
""" Stop the AlertListener thread. Once the notifier is stopped, it cannot be directly
started again. You must call :func:`plexapi.server.PlexServer.startAlertListener()`
from a PlexServer instance.
"""
log.info('Stopping AlertListener.')
self._ws.close()
def _onMessage(self, *args):
""" Called when websocket message is received.
In earlier releases, websocket-client returned a tuple of two parameters: a websocket.app.WebSocketApp
object and the message as a STR. Current releases appear to only return the message.
We are assuming the last argument in the tuple is the message.
This is to support compatibility with current and previous releases of websocket-client.
"""
message = args[-1]
try:
data = json.loads(message)['NotificationContainer']
log.debug('Alert: %s %s %s', *data)
if self._callback:
self._callback(data)
except Exception as err: # pragma: no cover
log.error('AlertListener Msg Error: %s', err)
def _onError(self, *args): # pragma: no cover
""" Called when websocket error is received.
In earlier releases, websocket-client returned a tuple of two parameters: a websocket.app.WebSocketApp
object and the error. Current releases appear to only return the error.
We are assuming the last argument in the tuple is the message.
This is to support compatibility with current and previous releases of websocket-client.
"""
err = args[-1]
log.error('AlertListener Error: %s' % err)
|
py | 1a546ac67d155654aaccadfd49f32166dd01d8ac | from skimage.feature import hog
from skimage.transform import pyramid_gaussian
from sklearn.externals import joblib
from skimage import color
from imutils.object_detection import non_max_suppression
import imutils
import numpy as np
import cv2
import os
import glob
#Define HOG Parameters
# change them if necessary to orientations = 8, pixels per cell = (16,16), cells per block to (1,1) for weaker HOG
orientations = 9
pixels_per_cell = (8, 8)
cells_per_block = (2, 2)
threshold = .3
# define the sliding window:
def sliding_window(image, stepSize, windowSize):# image is the input, step size is the no.of pixels needed to skip and windowSize is the size of the actual window
# slide a window across the image
for y in range(0, image.shape[0], stepSize):# this line and the line below actually defines the sliding part and loops over the x and y coordinates
for x in range(0, image.shape[1], stepSize):
# yield the current window
yield (x, y, image[y: y + windowSize[1], x:x + windowSize[0]])
#%%
# Upload the saved svm model:
model = joblib.load('Inser\Path\of_the_trained\SVM-model\here')
# Test the trained classifier on an image below!
scale = 0
detections = []
# read the image you want to detect the object in:
img= cv2.imread("Insert\Path\of_the_image\here")
# Try it with image resized if the image is too big
img= cv2.resize(img,(300,200)) # can change the size to default by commenting this code out our put in a random number
# defining the size of the sliding window (has to be, same as the size of the image in the training data)
(winW, winH)= (64,128)
windowSize=(winW,winH)
downscale=1.5
# Apply sliding window:
for resized in pyramid_gaussian(img, downscale=1.5): # loop over each layer of the image that you take!
# loop over the sliding window for each layer of the pyramid
for (x,y,window) in sliding_window(resized, stepSize=10, windowSize=(winW,winH)):
# if the window does not meet our desired window size, ignore it!
if window.shape[0] != winH or window.shape[1] !=winW: # ensure the sliding window has met the minimum size requirement
continue
window=color.rgb2gray(window)
fds = hog(window, orientations, pixels_per_cell, cells_per_block, block_norm='L2') # extract HOG features from the window captured
fds = fds.reshape(1, -1) # re shape the image to make a silouhette of hog
pred = model.predict(fds) # use the SVM model to make a prediction on the HOG features extracted from the window
if pred == 1:
if model.decision_function(fds) > 0.6: # set a threshold value for the SVM prediction i.e. only firm the predictions above probability of 0.6
print("Detection:: Location -> ({}, {})".format(x, y))
print("Scale -> {} | Confidence Score {} \n".format(scale,model.decision_function(fds)))
detections.append((int(x * (downscale**scale)), int(y * (downscale**scale)), model.decision_function(fds),
int(windowSize[0]*(downscale**scale)), # create a list of all the predictions found
int(windowSize[1]*(downscale**scale))))
scale+=1
clone = resized.copy()
for (x_tl, y_tl, _, w, h) in detections:
cv2.rectangle(img, (x_tl, y_tl), (x_tl + w, y_tl + h), (0, 0, 255), thickness = 2)
rects = np.array([[x, y, x + w, y + h] for (x, y, _, w, h) in detections]) # do nms on the detected bounding boxes
sc = [score[0] for (x, y, score, w, h) in detections]
print("detection confidence score: ", sc)
sc = np.array(sc)
pick = non_max_suppression(rects, probs = sc, overlapThresh = 0.3)
# the peice of code above creates a raw bounding box prior to using NMS
# the code below creates a bounding box after using nms on the detections
# you can choose which one you want to visualise, as you deem fit... simply use the following function:
# cv2.imshow in this right place (since python is procedural it will go through the code line by line).
for (xA, yA, xB, yB) in pick:
cv2.rectangle(img, (xA, yA), (xB, yB), (0,255,0), 2)
cv2.imshow("Raw Detections after NMS", img)
#### Save the images below
= cv2.waitKey(0) & 0xFF
if k == 27: #wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'):
cv2.imwrite('Path\to_the_directory\of_saved_image.png',img)
cv2.destroyAllWindows()
|
py | 1a546b9dfa99b522735d3fab470f94cc6044fce4 | from django.contrib import admin
from . import models
@admin.register(models.Image)
class ImageAdmin(admin.ModelAdmin):
list_display_links= (
'location',
'caption',
)
search_fields=(
'location',
'caption',
)
list_filter=(
'location',
'creator',
)
list_display = (
'creator',
'file',
'location',
'caption',
'created_at',
'updated_at',
)
@admin.register(models.Like)
class LikeAdmin(admin.ModelAdmin):
list_display = (
'creator',
'image',
'created_at',
'updated_at',
)
@admin.register(models.Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = (
'message',
'creator',
'image',
'created_at',
'updated_at',
)
|
py | 1a546c7da21e1e6457614f3393ebc93f61e89841 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint:disable=line-too-long
r"""Counts average audio length.
"""
# pylint:enable=line-too-long
import os
from typing import Any, Dict, Iterable, Optional, List, Tuple
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
import numpy as np
import tensorflow as tf
from non_semantic_speech_benchmark.data_prep import data_prep_utils as utils
flags.DEFINE_string('output_file', None, 'Output file.')
flags.DEFINE_string('debug', None, 'If present, only count this dataset.')
flags.DEFINE_list(
'audio_keys', ['audio', 'processed/audio_samples', 'audio_waveform',
'WAVEFORM/feature/floats'],
'Possible audio keys in tf.Examples.')
flags.DEFINE_list(
'sr_keys', ['audio_sample_rate'],
'Possible sample rate keys in tf.Examples.')
flags.DEFINE_integer(
'batch_size', None, 'Number of examples to process at once.')
FLAGS = flags.FLAGS
def duration_from_tfex(k_v):
"""Duration from a tf.Example."""
k, ex = k_v
audio_vals = []
for possible_audio_key in FLAGS.audio_keys:
if possible_audio_key in ex.features.feature:
logging.info('Found audio key: %s', possible_audio_key)
audio_feats = ex.features.feature[possible_audio_key]
cur_audio_vals = (audio_feats.int64_list.value or
audio_feats.float_list.value)
assert cur_audio_vals
audio_vals.append(cur_audio_vals)
assert len(audio_vals) == 1, ex
audio_vals = audio_vals[0]
logging.info('%s audio: %s', k, audio_vals)
sr_vals = []
for possible_sr_key in FLAGS.sr_keys:
if possible_sr_key in ex.features.feature:
logging.info('Found sample rate key: %s', possible_sr_key)
cur_audio = ex.features.feature[possible_sr_key].int64_list.value[0]
sr_vals.append(cur_audio)
assert len(sr_vals) in [0, 1], ex
if len(sr_vals) == 1:
sr = sr_vals[0]
else:
logging.info('Used default sr.')
sr = 16000
return len(audio_vals) / float(sr)
def durations_from_tfexs(k_vs):
for k_v in k_vs:
yield duration_from_tfex(k_v)
def durations(root, ds_file, ds_name,
reader_type, suffix):
"""Beam pipeline for durations from a particular file or glob."""
logging.info('Reading from %s: (%s, %s)', reader_type, ds_name, ds_file)
input_examples = utils.reader_functions[reader_type](
root, ds_file, f'Read-{suffix}')
if FLAGS.batch_size:
input_examples = input_examples | f'Batch-{suffix}' >> beam.BatchElements(
min_batch_size=FLAGS.batch_size, max_batch_size=FLAGS.batch_size)
return input_examples | f'Lens-{suffix}' >> beam.FlatMap(
durations_from_tfexs)
else:
return input_examples | f'Lens-{suffix}' >> beam.Map(duration_from_tfex)
def duration_and_num_examples(
root, ds_files, ds_name,
reader_type):
"""Beam pipeline for durations from a list of files or globs."""
durations_l = []
for i, ds_file in enumerate(ds_files):
cur_dur = durations(
root, ds_file, ds_name, reader_type, suffix=f'{ds_name}_{i}')
durations_l.append(cur_dur)
def _mean_and_count(durs):
return np.mean(durs), len(durs)
return (durations_l
| f'Flatten-{ds_name}' >> beam.Flatten()
| f'ToList-{ds_name}' >> beam.combiners.ToList()
| f'Stats-{ds_name}' >> beam.Map(_mean_and_count))
def get_dataset_info_dict(
debug):
"""Get dictionary of dataset info."""
def _tfds_fns(ds_name):
fns = [
x # pylint:disable=g-complex-comprehension
for s in ('train', 'validation', 'test')
for x in utils.tfds_filenames(ds_name, s)] # pylint:disable=protected-access
fns = [fns] # TFRecords require a list.
return (fns, 'tfrecord')
dss = {
'crema_d':
_tfds_fns('crema_d'),
'savee':
_tfds_fns('savee'),
'speech_commands':
_tfds_fns('speech_commands'),
'voxceleb':
_tfds_fns('voxceleb'),
}
if debug:
dss = {debug: dss[debug]}
return dss
def main(unused_argv):
dss = get_dataset_info_dict(FLAGS.debug)
out_file = FLAGS.output_file
assert not tf.io.gfile.exists(out_file)
if not tf.io.gfile.exists(os.path.dirname(out_file)):
tf.io.gfile.makedirs(os.path.dirname(out_file))
pipeline_option = None
with beam.Pipeline(pipeline_option) as root:
stats = [] # (ds name, avg duration, num examples)
for ds_name, (ds_files, reader_type) in dss.items():
cur_stat = duration_and_num_examples(root, ds_files, ds_name, reader_type)
cur_stat = cur_stat | f'AddName-{ds_name}' >> beam.Map(
lambda x, name: (name, x[0], x[1]), name=ds_name)
stats.append(cur_stat)
# Write output.
_ = (
stats
| 'CombineDSes' >> beam.Flatten()
| 'ToStr' >> beam.Map(lambda x: ','.join([str(r) for r in x]))
| 'WriteOutput' >> beam.io.WriteToText(out_file, num_shards=1))
if __name__ == '__main__':
flags.mark_flag_as_required('output_file')
app.run(main)
|
py | 1a546cf3a1febecdee189e76e9e0862ca31b90ca | # -*- coding: utf-8 -*-
from . import controllers
from . import models
from . import wizard
from . import reports
|
py | 1a546dab07ea1e3fbd19621351ee321a1e0ffd92 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Copyright (c) 2020-2021 The Hive Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from datetime import datetime, timezone
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
import subprocess
from subprocess import CalledProcessError
import time
import socket
from contextlib import closing
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
##########################################################################################
# Assert functions
##########################################################################################
def assert_approx(v, vexp, vspan=0.00001):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
def assert_contains(val, arr):
if not (val in arr):
raise AssertionError("val %s not in arr" % val)
def assert_does_not_contain(val, arr):
if val in arr:
raise AssertionError("val %s is in arr" % val)
def assert_contains_pair(key, val, dict_data):
if not (key in dict_data and val == dict_data[key]):
raise AssertionError("k/v pair (%s,%s) not in dict" % (key, val))
def assert_contains_key(key, dict_data):
if key not in dict_data:
raise AssertionError("key %s is not in dict" % key)
def assert_does_not_contain_key(key, dict_data):
if key in dict_data:
raise AssertionError("key %s is in dict" % key)
def assert_fee_amount(fee, tx_size, fee_per_kb):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kb / 1000
if fee < target_fee:
raise AssertionError("Fee of %s HVN too low! (Should be %s HVN)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kb / 1000:
raise AssertionError("Fee of %s HVN too high! (Should be %s HVN)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2, err_msg="Greater"):
if thing1 <= thing2:
raise AssertionError("%s ~~ %s <= %s" % (err_msg, str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
def assert_happening(date_str, within_secs=120):
""" Make sure date_str happened withing within_secs seconds of now.
Assumes date_str is in rpc results cust_format e.g. '2019-11-07 17:50:06' and assumed to represent UTC.
Using a big default to eliminate inaccurate wall clocks...
"""
cust_format = '%Y-%m-%d %H:%M:%S'
then = datetime.strptime(date_str, cust_format).replace(tzinfo=timezone.utc)
now = datetime.now(timezone.utc)
diff_secs = (now - then).total_seconds()
if abs(diff_secs) > within_secs:
raise AssertionError("More than expected %s second difference between %s and now(%s) (%ss)" % (within_secs, date_str, now, diff_secs))
##########################################################################################
# Utility functions
##########################################################################################
def check_json_precision():
"""Make sure json library being used does not lose precision converting HVN values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
x16r_hash_cmd = os.path.dirname(os.path.realpath(__file__)) + "/../../../src/test/test_hive_hash"
def x16_hash_block(hex_str, algorithm="2"):
"""
:param hex_str: Blockhash to convert
:param algorithm: Which algorithm ~~ "1" = x16r "2" = x16rv2
:return: Converted hash
"""
cmd = [x16r_hash_cmd, hex_str, algorithm]
blk_hash = subprocess.run(cmd, stdout=subprocess.PIPE, check=True).stdout.decode('ascii')
return blk_hash
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, err_msg, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
assert_greater_than(attempts, attempt, err_msg + " ~~ Exceeded Attempts")
assert_greater_than(time.ctime(timeout), time.ctime(), err_msg + " ~~ Exceeded Timeout")
raise RuntimeError('Unreachable')
##########################################################################################
# RPC/P2P connection constants and functions
##########################################################################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
# List to store P2P ports
p2p_ports = [-1, -1, -1, -1, -1, -1, -1, -1]
# List to store RPC ports
rpc_ports = [-1, -1, -1, -1, -1, -1, -1, -1]
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def find_free_port():
"""
Ask the system for a free port.
In case of error return error message.
:return: {Tuple}
"""
port = None
error = {}
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.bind(('', 0))
sock_name = s.getsockname()
if type(sock_name) is tuple and len(sock_name) == 2:
port = sock_name[1]
except socket.error as e:
error = {'errno': e.errno, 'msg': str(e)}
return port, error
def get_rpc_proxy(url, node_number, timeout=None, coverage_dir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
timeout: time to wait
coverage_dir: directory to watch
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coverage_dir, node_number) if coverage_dir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
if p2p_ports[n] is -1:
# Port isn't in the list, find one that is available
p2p_ports[n] = find_free_port()[0]
return p2p_ports[n]
else:
return p2p_ports[n]
def rpc_port(n):
if rpc_ports[n] is -1:
# Port isn't in the list, find one that is available
rpc_ports[n] = find_free_port()[0]
return rpc_ports[n]
else:
return rpc_ports[n]
def rpc_url(data_dir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(data_dir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
##########################################################################################
# Node functions
##########################################################################################
def initialize_data_dir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "hive.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "hive.conf")):
with open(os.path.join(datadir, "hive.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
user_pass = f.read()
split_user_pass = user_pass.split(':')
user = split_user_pass[0]
password = split_user_pass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
def log_filename(dirname, n_node, log_name):
return os.path.join(dirname, "node" + str(n_node), "regtest", log_name)
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
from_connection.disconnectnode(nodeid=peer_id)
for _ in range(50):
if not [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
break
time.sleep(0.1)
else:
raise AssertionError("timed out waiting for disconnect")
def disconnect_all_nodes(nodes):
for i in range(0, len(nodes)):
for j in range(i + 1, len(nodes)):
disconnect_nodes(nodes[i], j)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b, wait=False):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
if wait:
wait_for_block_sync(nodes, a, b)
def connect_all_nodes_bi(nodes, wait=False):
for i in range(0, len(nodes)):
for j in range(i + 1, len(nodes)):
connect_nodes_bi(nodes, i, j, wait)
def wait_for_block_sync(nodes, a, b, timeout=60):
# Wait for the nodes to connect and sync which caused some tests to randomly fail.
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
block_count_diff = abs(nodes[a].getblockcount() - nodes[b].getblockcount())
if block_count_diff == 0:
return
time.sleep(0.1)
cur_time = time.time()
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
max_height = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
tips = None
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(max_height, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == max_height for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format("".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(max_height, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
##########################################################################################
# Transaction/Block functions
##########################################################################################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert (confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return total_in, inputs
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return txid, signresult["hex"], fee
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for _ in range(iterations):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while node.getmempoolinfo()['size'] > 0:
node.generate(1)
utxos = node.listunspent()
assert (len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for _ in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for _ in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
|
py | 1a546dcd1212f4edb8e3e528de44eba9eb69d899 | # 导入类
from collections import OrderedDict
# 创建有序空字典
glossary = OrderedDict()
# 给有序空字典添加键-值对
glossary['print'] = '打印'
glossary['title'] = '首字母大写'
glossary['lower'] = '全部小写'
glossary['upper'] = '全部大写'
glossary['str'] = '字符串'
glossary['key'] = '键'
glossary['value'] = '值'
glossary['items'] = '项目'
glossary['sorted'] = '排序'
glossary['set'] = '集合'
# 遍历字典并打印
for vocabulary, explanation in glossary.items():
print(f"{vocabulary.title()}'s explanation is {explanation.title()}") |
py | 1a546f4f5a74ec859a3c315d231c277c57a70291 | import ctypes
import sys,os,re
import numpy
import time
import arcscriptsdir
import sdnapy
#print "attach debugger"
#sys.stdin.readline()
serial_dll_path = os.environ["sdnadll"]
parallel_dll_path = re.sub("debug","parallel_debug",serial_dll_path,flags=re.IGNORECASE)
if os.path.getmtime(serial_dll_path)-os.path.getmtime(parallel_dll_path) > 3600*10:
print "dlls built more than 10 hours apart"
sys.exit(1)
#print "serial dll",serial_dll_path
#print "parallel dll",parallel_dll_path
parallel_dll = ctypes.windll.LoadLibrary(parallel_dll_path)
serial_dll = ctypes.windll.LoadLibrary(serial_dll_path)
current_net_arcids = None
# dummy progress bar callback func
CALLBACKFUNCTYPE = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_long)
def set_progressor(x):
return 0
set_progressor_callback = CALLBACKFUNCTYPE(set_progressor)
WARNINGCALLBACKFUNCTYPE = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_char_p)
def warning(x):
#print x
return 0
warning_callback = WARNINGCALLBACKFUNCTYPE(warning)
def add_polyline(dll,net,arcid,points,elev):
global current_net_arcids
current_net_arcids += [arcid]
point_array_x = (ctypes.c_double*len(points))()
point_array_y = (ctypes.c_double*len(points))()
for i,(x,y) in enumerate(points):
point_array_x[i] = x
point_array_y[i] = y
dll.net_add_polyline(net,arcid,len(points),point_array_x,point_array_y)
dll.net_add_polyline_data(net,arcid,"arcid",ctypes.c_float(arcid))
dll.net_add_polyline_text_data(net,arcid,"zone",ctypes.c_char_p("%d"%(arcid%1)))
def add_random_kink_link(dll,net,my_id,squaresize,origin,destination):
point_list = [origin,destination]
num_segs = int(numpy.random.exponential(2.15))
for _ in range(num_segs):
kink_point = (origin+destination)/2+numpy.random.random(2)/2
point_list.insert(1,kink_point)
point_list = [point*squaresize for point in point_list]
add_polyline(dll,net,my_id,point_list,(0,0))
def grid_test(dll,net,net_size,desired_num_links):
numpy.random.seed(seed=13)
squares_per_side = int(pow(desired_num_links/2,0.5))
grid_square_size = net_size/squares_per_side
my_id = 0
for x in range(squares_per_side):
for y in range(squares_per_side):
add_random_kink_link(dll,net,my_id,grid_square_size,numpy.array((x,y)),numpy.array((x+1,y)))
my_id += 1
add_random_kink_link(dll,net,my_id,grid_square_size,numpy.array((x,y)),numpy.array((x,y+1)))
my_id += 1
def test_net(dll,net_definition,euclidean_radii,cont_space,prob_link):
global current_net_arcids
current_net_arcids = []
net = ctypes.c_void_p(dll.net_create())
config_string = "radii=%s;"%",".join(map(str,euclidean_radii))
config_string += "metric=euclidean_angular;outputskim;skimzone=zone;"
config_string += "cont=%s;"%cont_space
# can't set restype as c_void_p or it truncates 64 bit addresses
calculation = ctypes.c_void_p(dll.integral_calc_create(net,config_string,set_progressor_callback,warning_callback))
if not calculation:
print "calc create failed"
sys.exit(0)
net_definition(dll,net)
dll.calc_run.restype = ctypes.c_int
savestdout = os.dup(1)
os.close(1)
calcres = dll.calc_run(calculation)
os.dup(savestdout)
if not calcres:
print "calc run failed"
sys.exit(0)
dll.icalc_get_short_output_names.restype = ctypes.POINTER(ctypes.c_char_p)
dll.icalc_get_output_length.restype = ctypes.c_int
outlength = dll.icalc_get_output_length(calculation)
names = dll.icalc_get_short_output_names(calculation)
n=[]
for i in range(outlength):
n += [names[i]]
out_buffer_type = ctypes.c_float * outlength
out_buffer = out_buffer_type()
out_array = []
for i in current_net_arcids:
dll.icalc_get_all_outputs(calculation,out_buffer,i)
out_array += [list(out_buffer)]
dll.calc_get_num_geometry_outputs.restype = ctypes.c_long
numgeomouts = dll.calc_get_num_geometry_outputs(calculation)
for i in range(numgeomouts):
geom = sdnapy.GeometryLayer(ctypes.c_void_p(dll.calc_get_geometry_output(calculation,ctypes.c_long(i))))
if geom.name=="skim":
skimdata = [item.data for item in geom.get_items()]
dll.net_destroy(net)
dll.calc_destroy(calculation)
return n,out_array,skimdata
desired_num_links = 50
bound_grid_test = lambda d,n: grid_test(d,n,5000,desired_num_links)
start = time.time()
print "testing serial"
snames,sdata,sskim = test_net(serial_dll,bound_grid_test,[400,1000,"n"],True,1)
serial_end = time.time()
print serial_end-start,"secs"
print "testing parallel"
parallel_start = time.time()
pnames,pdata,pskim = test_net(parallel_dll,bound_grid_test,[400,1000,"n"],True,1)
parallel_end = time.time()
print parallel_end-parallel_start,"secs"
print "though most of this is io so times will be very similar"
assert(pnames==snames)
all_matches = True
for link in current_net_arcids:
for i,name in enumerate(pnames):
if str(pdata[link][i])!=str(sdata[link][i]):
print link,name,sdata[link][i],pdata[link][i]
all_matches = False
for sd,pd in zip(sskim,pskim):
if sd!=pd:
print "skim mismatch: "
print sd
print pd
all_matches = False
assert(all_matches)
print "Serial and parallel results match"
|
py | 1a546f771022f93efbf29dd1f3c5fc5e9113a861 | from courses.models import Course, Topic, Events, Books, Notification
from rest_framework import serializers
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('id', 'title', 'image_url', 'url', 'note')
class TopicSerializer(serializers.ModelSerializer):
class Meta:
model = Topic
fields = ('id', 'image_url', 'topic',)
class EventsSerializer(serializers.ModelSerializer):
class Meta:
model = Events
fields = ('id', 'event', 'title','note', 'url', 'image_url')
class BooksSerializer(serializers.ModelSerializer):
class Meta:
model = Books
fields = ('id', 'image_url', 'title','note', 'url')
class NotificationSerializer(serializers.ModelSerializer):
class Meta:
model = Notification
fields = ('id', 'notice', 'title',)
#[email protected]
|
py | 1a54701773078c16ef1a64a571b52450ee61cc9e | class Solution:
def floodFill(self, grid, sr, sc, newColor):
m, n = len(grid), len(grid[0])
self.target = grid[sr][sc]
def dfs(x, y):
grid[x][y] = newColor
for i, j in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
if (0 <= x + i < m and 0 <= y + j < n) and grid[x + i][y + j] == self.target:
dfs(x + i, y + j)
if self.target == newColor:
return grid
dfs(sr, sc)
return grid
|
py | 1a54727a6c2b23e4818954bc2caa12c9224db871 | '''
snpPriority.py - score SNPs based on their LD score and SE weighted effect sizes
===============================================================================
:Author: Mike Morgan
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. Score SNPs based on their LD score and SE weighted effect sizes from
association analysis.
Usage
-----
.. Example use case
Example::
python snpPriority.py
Type::
python snpPriority.py --help
for command line help.
Command line options
--------------------
'''
import sys
import CGAT.Experiment as E
import PipelineGWAS as gwas
import re
import pandas as pd
import CGAT.IOTools as IOTools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("--score-method", dest="method", type="choice",
choices=["PICS", "LDscore", "ABF", "R2_rank",
"get_eigen", "calc_prior", "credible_set",
"summarise"],
help="SNP scoring/prioritisation method to apply.")
parser.add_option("--database", dest="database", type="string",
help="SQL database containing LD information "
"in table format. Expects columns SNP_A, "
"SNP_B, R2, BP_A and BP_B (Plink --r2 output)")
parser.add_option("--ld-directory", dest="ld_dir", type="string",
help="directory containing tabix-index BGZIP "
"LD files. Assumes Plink used to calculate LD")
parser.add_option("--table-name", dest="table", type="string",
help="name of the SQL table containing the LD"
"values")
parser.add_option("--chromosome", dest="chromosome", type="string",
help="chromosome to subset the association results "
"file on")
parser.add_option("--ld-threshold", dest="ld_threshold", type="float",
help="the threshold of LD above which variants will "
"be taken forward.")
parser.add_option("--rank-threshold", dest="rank_threshold", type="float",
help="the threshold in terms of the top n% SNPs to "
"output based on the ranking metric. e.g. "
"--rank-threshold=0.01 is the top 1% SNPs")
parser.add_option("--credible-interval", dest="interval", type="float",
help="The credible set interval size to generate the "
"credible set of SNPs")
parser.add_option("--prior-variance", dest="prior_var", type="float",
help="the prior variance used to weight the SNP "
"variance")
parser.add_option("--fine-map-window", dest="map_window", type="int",
help="the region size to included around the index "
"SNP as the fine-mapping region.")
parser.add_option("--eigen-score-directory", dest="eigen_dir", type="string",
help="PATH to directory containing tabix indexed "
"eigen score files")
parser.add_option("--flat-prior", dest="flat_prior", action="store_true",
help="Ignore functional annotation information and "
"use an uninformative prior on each SNP")
parser.add_option("--snp-set", dest="snp_set", type="string",
help="Pre-defined SNP set as a list of SNP IDs."
"If used to calculate priors contains column of scores.")
parser.add_option("--distribution", dest="dist", type="choice",
choices=["normal", "t", "gamma", "lognormal",
"exponential"],
help="distribution from which to draw prior "
"probabilities")
parser.add_option("--distribution-parameters", dest="dist_params", type="string",
help="distribution parameters as a comma-separated list")
parser.add_option("--lead-snp-id", dest="lead_snp", type="int",
help="0-based item number in filename")
parser.add_option("--filename-separator", dest="separator", type="string",
help="filename separator to extract information")
parser.add_option("--snp-column", dest="snp_col", type="int",
help="0-based index of SNP ID column number")
parser.add_option("--probability-column", dest="prob_col", type="int",
help="0-based index of posterior probabilities column"
" number")
parser.set_defaults(ld_dir=None,
dist="normal",
dist_params=None,
snp_set=None,
prior_var=0.04,
interval=0.99,
eigen_dir=None,
map_window=100000,
ld_threshold=0.5,
database=None,
table=None,
flat_prior=False,
lead_snp=2,
separator="_",
snp_col=0,
prob_col=1,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
infile = argv[-1]
if len(infile.split(",")) > 1:
pass
else:
peek = pd.read_table(infile, nrows=5, sep="\s*", header=0)
try:
if len(peek["TEST"] != "ADD"):
clean = False
else:
clean = True
except KeyError:
clean = True
if options.method == "LDscore":
snpscores = gwas.snpPriorityScore(gwas_results=infile,
database=options.database,
table_name=options.table,
chromosome=options.chromosome,
ld_dir=options.ld_dir,
clean=clean)
# take top 1%, all SNPs doesn't achieve anything useful
ranks = int(len(snpscores.index) * 0.01)
snpscores = snpscores.iloc[:ranks]
elif options.method == "PICS":
snp_list = {}
if options.snp_set and not options.flat_prior:
with IOTools.openFile(options.snp_set, "r") as sfile:
for line in sfile.readlines():
snp = line.split("\t")[0]
try:
score = float(line.split("\t")[-1].rstrip("\n"))
except ValueError:
score = 0
snp_list[snp] = float(score)
# get the parameter estimates for the distribution
# if they have not been provided
if not options.dist_params:
dist_params = gwas.estimateDistributionParameters(data=snp_list.values(),
distribution=options.dist)
else:
dist_params = tuple([float(fx) for fx in options.dist_params.split(",")])
E.info("Calculating priors on SNPs")
priors = gwas.calcPriorsOnSnps(snp_list=snp_list,
distribution=options.dist,
params=dist_params)
elif options.snp_set and options.flat_prior:
with IOTools.openFile(options.snp_set, "r") as sfile:
for line in sfile.readlines():
snp = line.split("\t")[0]
snp_list[snp] = 1.0
priors = snp_list
else:
# allow for no priors or scores to be set,
# use of priors will be ignored,
# i.e. when prior and likelihood are not from
# conjugate distributions
priors = None
# PICS scores expects the gwas results file to
# only contain the region of interest, which
# represents an independent association signal
snpscores = gwas.PICSscore(gwas_results=infile,
database=options.database,
table_name=options.table,
chromosome=options.chromosome,
priors=priors,
clean=clean,
ld_dir=options.ld_dir,
ld_threshold=options.ld_threshold)
snpscores.columns = ["SNP", "PICS"]
posterior_sum = 0
snpscores.sort_values(ascending=False,
inplace=True)
post_snps = []
for snp in snpscores.index:
if posterior_sum < 99.0:
posterior_sum += snpscores.loc[snp]
post_snps.append(snp)
else:
break
snpscores = snpscores.loc[post_snps]
snpscores.drop_duplicates(inplace=True)
elif options.method == "R2_rank":
# rank SNPs based on their LD with the lead
# SNP, take the top n% SNPs
snpscores = gwas.LdRank(gwas_results=infile,
database=options.database,
table_name=options.table,
ld_dir=options.ld_dir,
chromosome=options.chromosome,
ld_threshold=options.ld_threshold,
top_snps=options.rank_threshold,
clean=clean)
elif options.method == "ABF":
snpscores = gwas.ABFScore(gwas_results=infile,
region_size=options.map_window,
chromosome=options.chromosome,
prior_variance=options.prior_var,
clean=clean)
elif options.method == "get_eigen":
E.info("Fetching Eigen scores")
snpscores = gwas.getEigenScores(eigen_dir=options.eigen_dir,
bim_file=infile,
snp_file=options.snp_set)
snpscores = pd.DataFrame(snpscores).T
elif options.method == "credible_set":
E.info("Creating credible set")
snpscores = gwas.makeCredibleSet(probs_file=infile,
credible_set=options.interval,
lead_snp_indx=options.lead_snp,
filename_sep=options.separator,
snp_column=options.snp_col,
probs_column=options.prob_col)
elif options.method == "summarise":
E.info("Collating SNP prioritisation resuslts")
file_list = infile.split(",")
snpscores = gwas.summariseResults(file_list=file_list)
snpscores.to_csv(options.stdout, index_label="SNP",
sep="\t")
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
py | 1a5473832dc64d9d7872c4005ef9d76075c31758 | """
track, part of glbase
"""
import pickle, sys, os, struct, math, sqlite3, zlib, time, csv, zlib
from operator import itemgetter
from .progress import progressbar
from .errors import AssertionError
from .location import location
from . import genelist as Genelist
from . import utils, config
from .data import positive_strand_labels, negative_strand_labels
from .draw import draw
import matplotlib.cm as cm
import matplotlib.pyplot as plot
import scipy.stats as stats
from scipy.stats import pearsonr
from .base_track import base_track
import numpy
from numpy import array, zeros, set_printoptions, int32, append, linspace, argmax, amax, delete
TRACK_CACHE_SIZE = 10 # number of track segments to cache.
class track(base_track):
"""
track definition, used for things like sequence reads across the genome
**Arguments**
name (string)
name for the track (defaults to filename)
filename (string)
directory location of the track file.
only respected if dir_name is set.
new (Optional, default=False)
Use seqToTrk() in preference of this. But if you know what you are
doing then this will generate a new (empty) db.
norm_factor (Optional, default = 1.0)
An optional normalization factor. Data is multiplied by this number before display.
Can only be specified at creation time, cannot be modified later.
mem_cache (Optional, default=False)
Instead of doing the whole thing from disk, first cache the DB in memory for
super fast access. Just make sure you have enough memory!
pre_build (Optional, default=[0, 100, 200])
prebuild genome arrays for various read_extend parameters for fast lookups.
By default glbase builds indexes for 100 and 200 bp read_extends and unextended
(i.e. the complete frag size, useful for e.g. paired-end data). Raw frag sizes are returned
when read_extend and pre_build are 0
"""
def __init__(self, name=None, new=False, filename=None, norm_factor=None, mem_cache=False, pre_build=[0, 100, 200], **kargs):
base_track.__init__(self, name, new, filename, norm_factor, mem_cache)
if new:
if norm_factor is None:
norm_factor = 1.0
self.meta_data['norm_factor'] = str(norm_factor)
self.pre_build = pre_build
self.meta_data['pre_build'] = pre_build
self.__setup_tables(filename)
config.log.info('Generating new track')
else:
assert not norm_factor, 'norm_factor can only be modified at creation time'
# if not new, get pre_build from the metadatum
try:
self.pre_build = self.meta_data['pre_build']
except KeyError:
raise AssertionError('meta data not found in trk file, this suggests the trk file is incomplete, please check your trk file and regenerate if required')
self.norm_factor = float(self.meta_data['norm_factor'])
if self.norm_factor != 1.0:
config.log.info('track: Using norm_factor=%.3f' % self.norm_factor)
#print norm_factor, self.norm_factor, str(norm_factor) != str(self.norm_factor)
#if str(norm_factor) != str(self.norm_factor):
# config.log.error('the norm_factor supplied here does not match the norm_factor used during the creation of the track!')
# raise AssertionError, 'norm_factor != norm_factor (%.2f != %.2f) stored in the trk file.' % (norm_factor, self.norm_factor)
self.__warned_about_zlib = False # To deprecate
def __repr__(self):
return("glbase.track")
# I use these ones here as tracks prefer numpy arrays
# LATER: port the flat_tracks to use numpy arrays
def _format_data(self, data):
"""
array('i', []) --> whatever it's stored as in db
"""
return(sqlite3.Binary(zlib.compress(data.dumps(), 1)))
def _unformat_data(self, data):
"""
whatever stored as in db --> array('i', [])
"""
#print "ret:",[d for d in data], ":"
try:
a = numpy.loads(zlib.decompress(data))
except UnicodeDecodeError:
print(data.decode('utf-8'))
'''
a = numpy.loads(zlib.decompress(data))
if not self.__warned_about_zlib:
config.log.warning('Tracks are no no longer Zlib compressed by default. Tracks will need to be regenerated')
config.log.warning('Benefit is that they are now faster!')
config.log.warning('In future versions of glbase this warning will be removed and an error will be produced')
self.__warned_about_zlib = True
'''
return(a)
def __setup_tables(self, filename):
"""
No pre-defined file - I want a new database track.
"""
# If i make it here, then a lot of grunt work already done in base_track
c = self._connection.cursor()
c.execute("CREATE TABLE main (chromosome TEXT PRIMARY KEY, seq_reads INT)")
c.execute("CREATE TABLE pre_build (chromosome TEXT, read_extend TEXT, array_blob BLOB)")
self._connection.commit()
c.close()
def finalise(self):
c = self._connection.cursor()
# Do the prebuilds:
list_of_all_chroms_in_db = self.get_chromosome_names()
for read_extend in self.pre_build:
for chrom in list_of_all_chroms_in_db:
a = self.get_array_chromosome(chrom, read_extend=read_extend, _silent=True) # Force a cache miss
c.execute('INSERT INTO pre_build VALUES (?, ?, ?)', (chrom, read_extend, self._format_data(a)))
config.log.info('Cached chrom=%s, read_extend=%s' % (chrom, str(read_extend)))
#print self.meta_data
base_track.finalise(self)
def __add_chromosome(self, chromosome):
"""
add a chromosome to the main table.
add a chromosome table.
returns True if succesfully created/already present.
"""
c = self._connection.cursor()
# check chromosome is not already present.
if self.__has_chromosome(chromosome):
return(True)
c.execute("INSERT INTO main VALUES (?, ?)", (chromosome, 0)) # add chr to master table.
# make the new chromsome table:
table_name = "chr_%s" % str(chromosome)
c.execute("CREATE TABLE %s (left INT, right INT, strand TEXT)" % (table_name, ))
#c.execute("CREATE INDEX %s_com_idx ON %s(left, right)" % (table_name, table_name))
#c.execute("CREATE INDEX %s_lef_idx ON %s(left)" % (table_name, table_name))
#c.execute("CREATE INDEX %s_rig_idx ON %s(right)" % (table_name, table_name))
c.close()
return(True)
def __has_chromosome(self, chromosome):
"""
do we have that chromosome?
"""
c = self._connection.cursor()
c.execute("SELECT chromosome FROM main WHERE chromosome=?", (chromosome, ))
result = c.fetchone()
c.close()
return bool(result)
def add_location(self, loc, strand="+", increment=1):
"""
**Purpose**
Add a location to the track.
Increments the score by 'increment' from loc["left"] to
loc["right"]
**Arguments**
loc
strand
increment
**Returns**
True, if completes succesfully, or exception.
"""
if not self.__has_chromosome(loc["chr"]):
self.__add_chromosome(loc["chr"])
# convert strand to +, -
if strand in positive_strand_labels:
strand = "+"
elif strand in negative_strand_labels:
strand = "-"
if not self._c:
self._c = self._connection.cursor()
# insert location into new array.
table_name = "chr_%s" % str(loc["chr"])
# get the old number of seq_reads
self._c.execute("SELECT seq_reads FROM main WHERE chromosome=?", (loc["chr"], ))
current_seq_reads = self._c.fetchone()[0] # always returns a tuple
self._c.execute("UPDATE main SET seq_reads=? WHERE chromosome=?", (current_seq_reads+1, loc["chr"]))
# add the location to the seq table:
self._c.execute("INSERT INTO %s VALUES (?, ?, ?)" % table_name, (loc["left"], loc["right"], strand))
#c.close()
def get(self, loc, resolution=1, read_extend=0, kde_smooth=False,
view_wid=0, strand=False, **kargs):
"""
**Purpose**
get the data between location 'loc' and return it formatted as
a nbp resolution array
**Arguments**
loc (Required)
a valid location or string location.
resolution (Optional, default = 1bp)
nbp resolution required (you should probably send a float for accurate rendering)
read_extend (Optional, default = 0)
extend the read length to 'fill in the peak'
if the original reads are 36bp, then add ~70bp to give an
estimated size of the peak.
If the reads are end-based, then set this to the estimated
size of the DNA shear.
kde_smooth (Experimental)
perform kde smoothng on the data, using the integer specified as an option.
In this case the read_extend acts as a tag shift instead of a read_extend
Hence set that to half of the expected shear size.
strand (Optional, default=False)
collect only reads on the specified strand. (track will use read strand
information intelligently, if present).
**Returns**
an 'numpy.array([0, 1, 2 ... n])' contiginous array
or a tuple containing two arrays, one for each strand.
"""
if not isinstance(loc, location):
loc = location(loc=loc)
extended_loc = loc.expand(read_extend)
result = self.get_reads(extended_loc, strand=strand)
if kde_smooth:
return(self.__kde_smooth(loc, result, resolution, 0, view_wid, read_extend))
loc_left = loc["left"]
loc_right = loc["right"]
# make a single array
a = [0] * int( (loc_right-loc_left+resolution)/resolution ) # Fast list allocation
# Python lists are much faster for this than numpy or array
len_a = len(a)
for r in result:
read_left, read_right, strand = r
if strand == "+":
read_right += (read_extend + 1) # coords are open
elif strand == "-" :
read_left -= read_extend
read_right += 1 # coords are open
rel_array_left = int((read_left - loc_left) // resolution)
rel_array_right = int((read_right - loc_left) // resolution)
rel_array_left = max(rel_array_left, 0)
rel_array_right = min(rel_array_right, len_a)
for array_relative_location in range(rel_array_left, rel_array_right, 1):
a[array_relative_location] += 1
#a[rel_array_left:rel_array_right] += 1 # Why is this slower than the for loop? # could be done with num_expr?
#[a[array_relative_location].__add__(1) for array_relative_location in xrange(rel_array_left, rel_array_right, 1)] # just returns the exact item, a is unaffected?
return(numpy.array(a)*self.norm_factor)
def __kde_smooth(self, loc, reads, resolution, bandwidth, view_wid, read_shift=100):
"""
Internal abstraction for kde smoothing
Returns a new array
"""
# Shift the reads
newr = []
for r in reads:
if r[2] in positive_strand_labels:
newr.append(float((r[0] + read_shift) - loc["left"])) # Must be floats for kde to work
elif r[2] in negative_strand_labels:
newr.append(float((r[1] - read_shift) - loc["left"]))
a = linspace(0, loc["right"] - loc["left"], view_wid)
# Hack gaussian_kde()
def covariance_factor(self):
return 0.02
kde = stats.gaussian_kde(newr)
setattr(kde, 'covariance_factor', covariance_factor.__get__(kde, type(kde)))
kde._compute_covariance()
kk = kde.evaluate(a) * 1000000 # resacle to get in integer range.
res = array(kk)
return(res)
def get_all_reads_on_chromosome(self, chrom, strand=None):
"""
**Purpose**
Get all of the reads on chromosomes.
**Arguments**
chromosome (Required)
the name of the chromosome to pull from.
strand (Optional)
selct + or - strands and only collect those.
**Returns**
An iterator to collect the data from.
You must process the data in some sort of for loop:
for read in trk.get_all_reads_on_chromosome("1")
"""
assert chrom, "You must provide a chromosome"
assert chrom in self.get_chromosome_names(), "chromsome '%s' not found in this track" % chromosome
if not self._c:
self._c = self._connection.cursor()
if len(chrom) < 30: # small security check
table_name = "chr_%s" % chrom
if strand:
result = self._c.execute("SELECT * FROM %s WHERE strand=?" % table_name, strand)
else:
result = self._c.execute("SELECT * FROM %s" % table_name)
#reads = self._c.fetchall()
return(result)
def get_array_chromosome(self, chrom, read_extend=0, strand=None, resolution=1, _silent=False, **kargs):
"""
**Purpose**
get the entire array data for the chromosome
**Arguments**
chromosome (Required)
a number '1', string "1", or X, Y
strand (Optional, default = None, ie. collect and merge both strands)
strand, but only valid for stranded tracks
if "+" return only that strand, if '-' return only the negative
strand (will recognise several forms of strand, e.g. F/R, +/-
NOT SUPPORTED AT THIS TIME
resolution (default = 1bp)
nbp resolution required (you should probably send a float for accurate rendering)
read_extend (Optional, default = 0)
extend the read length to 'fill in the peak'
if the original reads are 36bp, then add ~70bp to give an
estimated size of the peak.
If the reads are end-based, then set this to the estimated
size of the DNA shear.
Use a read_extend of 0 to return the actual frags.
**Returns**
an 'numpy.array([0, 1, 2 ... n], dtype=integer)' contiginous array of integers
or a tuple containing two arrays, one for each strand.
"""
if strand:
raise NotImplementedError("Eh... strand not supported yet...")
c = self._connection.cursor()
# Find out if we already have this array:
c.execute("SELECT chromosome FROM pre_build WHERE (chromosome=? AND read_extend=?)", (chrom, read_extend))
result = c.fetchone()
if not result:
if not _silent: # It purposely misses the cache when building the track
config.log.warning('Cache miss on chromosome=%s, read_extend=%s' % (chrom, read_extend))
return(self.__cache_miss_get_array_chromosome(chromosome=chrom, read_extend=read_extend)) # Don't have... :(
# The above is already * self.norm_factor
# have a changed copy:
c.execute("SELECT array_blob FROM pre_build WHERE (chromosome=? AND read_extend=?)", (chrom, read_extend))
return(self._unformat_data(c.fetchone()[0])) # DO NOT multiply the below result by norm_factor
# The prebuilt causes a cache _miss and a return above. norm_factor is applied at the end of
# __cache_miss_get_array_chromosome() and the result is stored in the array_blob
def __cache_miss_get_array_chromosome(self, chromosome, strand=None, resolution=1, read_extend=0, **kargs):
# Generate the chromosome array for a cache miss
if not self._c:
self._c = self._connection.cursor()
table_name = "chr_%s" % chromosome
self._c.execute("SELECT * FROM %s" % table_name)
reads = sorted(self._c.fetchall(), key=itemgetter(0)) # shouldn't this be 1?
# I need to find the right most read to estimate the size of the track array.
right_most = reads[-1][1]+read_extend+1000 # Add a large enough pad onto the end, particularly for weird data with ultra long reads
# make an array.
a = [0] * int( (right_most+resolution)/resolution ) # Fast list allocation
# Python lists are much faster for this than numpy or array
len_a = len(a) # == right_most+resolution
for r in reads:
read_left, read_right, strand = r
if read_extend > 0: # if == 0 then use the total frag size
if strand == "+":
read_right += (read_extend + 1) # coords are open
elif strand == "-" :
read_left -= read_extend
read_right += 1 # coords are open
rel_array_left = read_left
rel_array_right = read_right
if resolution != 1:
rel_array_left = int(read_left // resolution)
rel_array_right = int(read_right // resolution)
if rel_array_left <= 0:
rel_array_left = 0
if rel_array_right > len_a: # This should never happen?
rel_array_right = len_a
# fold up to 1 liner
# This one liner does not work for some reason.
#[a[array_relative_location] + 1 for array_relative_location in xrange(rel_array_left, rel_array_right, 1)]
for array_relative_location in range(rel_array_left, rel_array_right, 1):
a[array_relative_location] += 1
#print "array_len", len(a)
return(numpy.array(a)*self.norm_factor) # NORMFACTOR (If any) is done HERE!
def get_reads(self, loc, strand=None):
"""
**Purpose**
get all of the sequence reads between location 'loc' and return
it formatted as a list of tuples: (left, right, strand), seq reads.
**Arguments**
loc (Required)
a valid location or string location.
**Returns**
a list containing all of the reads between loc.
"""
if not isinstance(loc, location):
loc = location(loc=loc)
if self.gl_mem_cache: # Use the mem cache if available
# work out which of the buckets is required:
left_buck = int((loc["left"]-1-delta)/config.bucket_size)*config.bucket_size
right_buck = int((loc["right"]+delta)/config.bucket_size)*config.bucket_size
buckets_reqd = list(range(left_buck, right_buck+config.bucket_size, config.bucket_size)) # make sure to get the right spanning and left spanning sites
# get the ids reqd.
loc_ids = set()
if buckets_reqd:
for buck in buckets_reqd:
if buck in self.gl_mem_cache.buckets[loc["chr"]]:
loc_ids.update(self.gl_mem_cache.buckets[loc["chr"]][buck]) # set = unique ids
# loc_ids is a set, and has no order.
#print loc_ids
for index in loc_ids:
#print loc.qcollide(self.linearData[index]["loc"]), loc, self.linearData[index]["loc"]
if loc.qcollide(self.linearData[index]["loc"]):
# result expected in form :read_left, read_right, strand
result.append((self.linearData[index]["loc"]['left'], self.linearData[index]["loc"]['right'], self.linearData[index]["strand"]))
#if len(loc["chr"]) < 30: # small security measure.
table_name = "chr_%s" % loc["chr"]
#result = self._connection.execute("SELECT * FROM %s WHERE (?>=left AND ?<=right) OR (?>=left AND ?<=right) OR (left<=? AND right>=?) OR (?<=left AND ?>=right)" % table_name,
# (loc["left"], loc["left"], loc["right"], loc["right"], loc["left"], loc["right"], loc["left"], loc["right"]))
# This is the code used in location.collide():
#self["right"] >= loc["left"] and self["left"] <= loc["right"]
result = self._connection.execute("SELECT left, right, strand FROM %s WHERE (right >= ? AND left <= ?)" % table_name,
(loc["left"], loc["right"]))
#result = None
result = result.fetchall() # safer for empty lists and reusing the cursor
if result and strand: # sort out only this strand
if strand in positive_strand_labels:
strand_to_get = positive_strand_labels
elif strand in negative_strand_labels:
strand_to_get = negative_strand_labels
newl = [r for r in result if r[2] in strand_to_get]
result = newl
return(result)
def get_read_count(self, loc):
"""
**Purpose**
get the number of reads within the location specified
**Arguments**
loc (Required)
a valid location or string location.
**Returns**
an float (or 0.0) containing the number of reads falling within
the location string.
"""
if not self._c:
self._c = self._connection.cursor()
if not isinstance(loc, location):
loc = location(loc=loc)
table_name = "chr_%s" % loc["chr"]
self._c.execute("SELECT left, right, strand FROM %s WHERE (right >= ? AND left <= ?)" % table_name,
(loc["left"], loc["right"]))
return(len(self._c.fetchall())*self.norm_factor)
def get_chromosome_names(self):
"""
**Purpose**
Return a list of all the valid chromosome names in the database
**Arguments**
None
**Returns**
A list of strings containing all of the chromosome names in the track
"""
if not self._c:
self._c = self._connection.cursor()
self._c.execute("SELECT chromosome FROM main")
r = [i[0] for i in self._c.fetchall()]
return(set(r))
def get_numreads_on_chromosome(self, name):
"""
**Purpose**
Return the number of reads on chromosme name
**Arguments**
name (Required)
get the number of reads on chromsome 'name'
**Returns**
An integer containing the number of reads
"""
if not self._c:
self._c = self._connection.cursor()
self._c.execute("SELECT chromosome, seq_reads FROM main WHERE chromosome=?", (str(name), ))
r = self._c.fetchone()
return(r[1])
def get_total_num_reads(self):
"""
**Purpose**
Return the number total number of reads for this track.
**Arguments**
None
**Returns**
An integer containing the number of reads
"""
if not self._c:
self._c = self._connection.cursor()
self._c.execute("SELECT chromosome, seq_reads FROM main")
r = [int(i[1]) for i in self._c.fetchall()]
return(sum(r))
def _debug__print_all_tables(self):
c = self._connection.cursor()
c.execute("SELECT * FROM main")
result = c.fetchall()
print("Main:")
for item in result:
print(item)
print("Chr_Tables:")
for item in result:
table_name = "chr_%s" % str(item[0])[0] # stop injections.
print(" Table", table_name)
c.execute("SELECT * FROM %s" % table_name)
chr_table_res = c.fetchall()
for i in chr_table_res:
print(" ", i)
c.close()
def saveBedGraph(self, filename, bin_size=100, read_extend=0):
'''
**Purpose**
Save the track as a BedGraph
Will take into account the norm_factor if available
**Arguments**
filename (Required)
filename to save BG to
bin_size (Optional, default=100)
the size for each bin (resolution) of the BedGraph
read_extend (Optional, default=0)
extend the reads on the 3' end by this many base pairs.
set to 0 if your reads are the total fragments (e.g. paired-end data).
**Returns**
None
'''
assert filename, 'You must provide a filename'
with open(filename, 'w') as oh:
min_position = 0 # Could guess, but the below code will trim the padding zeros
for chrom in sorted(self.get_chromosome_names()):
this_chrom = self.get_array_chromosome(chrom, read_extend=read_extend)
config.log.info("Doing Chromosome '%s'" % chrom)
max_position = len(this_chrom)
for l in range(min_position, max_position, bin_size):
value = numpy.mean(this_chrom[l:l+bin_size])
if value > 0.0: # If zero then it is okay to skip the loc.
oh.write('chr%s\t%s\t%s\t%s\n' % (chrom, l, l+bin_size, numpy.mean(this_chrom[l:l+bin_size]))) # data is already norm_factor corrected
config.log.info("saveBedGraph: Saved '%s'" % filename)
return(None)
def pileup(self, genelist=None, key="loc", filename=None, heatmap_filename=None,
bin_size=500, window_size=5000, read_extend=200, use_kde=False, simple_cleanup=False,
only_do=False, stranded=False, respect_strand=True, raw_tag_filename=None,
norm_by_read_count=False, pointify=True,
**kargs):
"""
**Purpose**
draw cumulative 'pileups' of the tag density in the track based on a genelist
containing a "loc" tag
**Arguments**
genelist (Required)
A genelist-like object containing a "loc"-like key tag
key (Optional, default="loc")
the key to use for the locations. Must contain valid location data:
chr1:1000-1011 etc. draw_pileup() will use the centre of the location if it is a
span.
filename (Required)
the name of the image file to save the pileup graph to.
normalize (Optional, default=True)
IMPORTANT
If you are using the norm_factor system, you MUST set this to False!
bin_size (Optional, default=500)
bin_size to use for the heatmap
pointify (Optional, default=True)
convert ythe genomic locations in 'genelist' to a single point
(Usually used in combination with window_size).
window_size (Optional, default=5000)
The window size +- around the centre of the peak to build the tag density map
from
read_extend (Optional, default=200)
extend the read x bp either 5' or 3' depending upon the strand of the read.
If use_kde is true then this will be the 'tag shift' value instead.
use_kde (Optional)
Use KDE versions of the tracks instead (Experimental)
simple_cleanup (False)
remove rows from the pileup that have < simple_cleanup tag counts
stranded (Optional, default=False)
build a stranded pileup, with + reads in blue and - reads in red
respect_strand (Optional, default=True)
If available, respect the orientation of the strand from the genelist.
This is useful if you are, say, using the TSS's and want to maintain the
orientation with respect to the transcription direction.
norm_by_read_count (Optional, default=False)
If you are not using a norm_factor for this library then you probably want to set this to True.
It will divide the resulting number of reads by the total number of reads,
i.e. it will account for differences in library sizes.
**Returns**
If succesful returns a list of lists containing the a single entry for each
entry in the original genelist (in the same order as the genelist).
"""
assert filename, "you must specify a filename"
assert genelist, "you must specify a genelist"
assert key in genelist.linearData[0], "the genelist appears to be lacking a loc key"
if stranded:
return(self.__draw_pileup_stranded(genelist, filename, window_size, **kargs))
else:
return(self.__draw_pileup_normal(genelist=genelist, key=key, filename=filename,
heatmap_filename=heatmap_filename,
bin_size=bin_size, window_size=window_size, read_extend=read_extend, use_kde=use_kde,
simple_cleanup=simple_cleanup, pointify=pointify,
norm_by_read_count=norm_by_read_count,
only_do=only_do, raw_tag_filename=raw_tag_filename, respect_strand=respect_strand,
**kargs))
'''
def __draw_pileup_normal(self, genelist=None, key="loc", filename=None, heatmap_filename=None,
bin_size=500, window_size=5000, read_extend=200, use_kde=False, simple_cleanup=False,
only_do=False, respect_strand=True, raw_tag_filename=None, mask_zero=False,
norm_by_read_count=False, normalize=False,
**kargs):
'''
def __draw_pileup_stranded(self, genelist=None, filename=None, bandwidth=300, **kargs):
"""
**Purpose**
Build a histogram density plot of the paired reads.
This will estimate the approximate observed shear size in your chip-seq data.
It pairs the data together for all pairs of all reads within a specified bandwidth
then outputs a plot of the resulting histogram.
**Arguments**
filename
the filename to save the image(s) to.
genelist (Required)
some sort of genelistlike object with a 'loc' key containing genomic coordinates
bandwidth (Optional, default=300)
area around the centre of the peak to build the cumulative distributions.
**Returns**
None
and some image files in <base_filename>.<draw_mode>
"""
# step along each chromosome. Quit if there are no reads in the window
if not self._c:
self._c = self._connection.cursor()
hist = {"+": zeros(bandwidth+1), "-": zeros(bandwidth+1)}
# get a sorted list of all the locs I am going to use.
all_locs = genelist['loc']
all_locs.sort()
p = progressbar(len(genelist))
for n, read in enumerate(all_locs):
loc = read.pointify().expand(bandwidth//2)
for s in self.get_reads(loc): # get reads returns all overlapping reads. I need to trim
# the edges to stop negative array positions
loc_left = s[0] - loc["left"]
loc_right = s[1] - loc["left"]
if s[2] == "+" and loc_left > 0:
loc_left = s[0] - loc["left"]
hist["+"][loc_left] += 1
elif s[2] == "-" and loc_right < bandwidth-1:
loc_right = s[1] - loc["left"]
hist["-"][loc_right] += 1
p.update(n)
if not self._draw:
self._draw = draw(self)
# now plot:
fig = self._draw.getfigure()
ax = fig.add_subplot(111)
ax.plot(hist["+"], color="blue")
ax.plot(hist["-"], color="red")
max_left = argmax(hist["+"])
max_right = argmax(hist["-"])
realfilename = self._draw._saveFigure(fig, filename)
config.log.info("Saved shear_size_pileup '%s'" % realfilename)
return(hist)
def __draw_pileup_normal(self, genelist=None, key="loc", filename=None, heatmap_filename=None,
bin_size=500, window_size=5000, read_extend=200, use_kde=False, simple_cleanup=False,
only_do=False, respect_strand=True, raw_tag_filename=None, mask_zero=False,
norm_by_read_count=False, pointify=True,
**kargs):
"""
The normal pileup views
"""
# See if there is a proper stransd key in there somewhere:
if respect_strand and (not "strand" in genelist.linearData[0]):
config.log.warning("I could not find the 'strand' key, setting respect_strand to False")
respect_strand = False
n = 0
h = 0
pileup = None
binned_data = None
setup_bins = False
number_of_tags_in_library = 1.0 # For code laziness
if norm_by_read_count:
number_of_tags_in_library = self.get_total_num_reads()/float(1e6) # div 1e6 for number nicness.
# get a sorted list of all the locs I am going to use.
# I need to do this:
gl_sorted = genelist.deepcopy()
gl_sorted.sort(key)
all_locs = gl_sorted[key]
strands = ['+'] * len(all_locs)
if respect_strand:
strands = gl_sorted['strand']
curr_cached_chrom = None
cached_chrom = None
p = progressbar(len(genelist))
for i, read in enumerate(zip(all_locs, strands)):
l = read[0]
if pointify:
l = l.pointify()
if window_size:
l = l.expand(window_size)
# I can dispose and free memory as the locations are now sorted:
# See if the read_extend is already in the cache:
if l['chr'] != curr_cached_chrom:
curr_cached_chrom = l['chr']
cached_chrom = self.get_array_chromosome(l['chr'], read_extend) # auto-deals with cahce issues
# UseKDE is now unimplemented:
'''
if not use_kde:
a = self.get(l, read_extend=read_extend) # read_extend=shear size
else:
a = self.get(l, read_extend=read_extend, kde_smooth=True, view_wid=window_size) # read_extend=tag shift
'''
assert l['left'] >= 0, 'left genome coordinate is less than zero "%s"' % l
if l['right'] > cached_chrom.shape[0]:
# Trouble, need to fill in the part of the array with zeros
# Possible error here it l['left'] is also off the array?
expected_width = l['right'] - l['left']
actual_width = cached_chrom.shape[0] - l['left']
a = cached_chrom[l['left']:cached_chrom.shape[0]] # stop wrap around
a = numpy.pad(a, (0,expected_width-actual_width), mode='constant')
#print a, a.shape
config.log.warning('Asked for part of the chomosome outside of the array "%s", skipping this loc' % str(l))
continue
else:
a = cached_chrom[l['left']:l['right']]
#print read
if respect_strand:
# positive strand is always correct, so I leave as is.
# For the reverse strand all I have to do is flip the array.
if read[1] in negative_strand_labels:
a = a[::-1]
# It is possible to ask for an array longer than the length of the array
# NOFIX?
#print l, a.shape
if pileup is None: # numpy __nonzero__ retardedness
pileup = a
binned_data = array([utils.bin_data(a, bin_size)])
setup_bins = True
else:
if sum(a) > simple_cleanup: # Only keep if tag count is > simple_cleanup
pileup = pileup + a
if heatmap_filename or raw_tag_filename:
if setup_bins:
#print binned_data, [utils.bin_data(a, bin_size)]
binned_data = append(binned_data, [utils.bin_data(a, bin_size)], axis=0)
if only_do and n > only_do:
#print only_do, n
break
p.update(i)
if not self._draw:
self._draw = draw()
if pileup is None: # numpy iszero testing:
raise AssertionError('no data found, either the bed is empty, has no regions or the trk file is empty')
if norm_by_read_count:
config.log.info('Normalized by read count')
pileup /= float(number_of_tags_in_library) # This one should not be used if norm_factor is also used
# This one SHOULD be used, even if norm_factor is True
pileup /= float(len(genelist)) # convert it back to a relative tag density.
# matplotlib pileup graph
fig = self._draw.getfigure(**kargs)
ax = fig.add_subplot(111)
ax.plot(pileup)
real_filename = self._draw.savefigure(fig, filename)
config.log.info("Saved pileup tag density to '%s'" % filename)
other_args = {}
if "vmax" in kargs:
other_args["vmax"] = kargs["vmax"]
if "vmin" in kargs:
other_args["vmin"] = kargs["vmin"]
# spin this off into a .heatmap() method?
if heatmap_filename or raw_tag_filename:
binned_data = numpy.delete(binned_data, numpy.s_[-1:], 1) # kill the rightmost empty col.
if raw_tag_filename:
binned_data = numpy.delete(binned_data, numpy.s_[-1:], 1)
oh = open(raw_tag_filename, "w")
writer = csv.writer(oh, dialect=csv.excel_tab)
writer.writerows(binned_data)
oh.close()
config.log.info("saved raw_tag_file to '%s'" % raw_tag_filename)
if heatmap_filename:
if other_args:
real_filename = self._draw._simple_heatmap(data=binned_data, filename=heatmap_filename,
dpi=150, figsize=(6, 24), aspect="long", **other_args)
else:
real_filename = self._draw._simple_heatmap(data=binned_data, filename=heatmap_filename,
dpi=150, figsize=(6, 24), aspect="long")
config.log.info("saved heatmap to '%s'" % real_filename)
ret = {"pileup": pileup}
if heatmap_filename or raw_tag_filename: #if binned_data:
# __nonzero__ not set in numpy arrays, so assume binned_data is valid
# if doing heatmap
ret["binned_data"] = binned_data
return(ret)
def measure_frip(self, genelist=None, sample=None, delta=None, pointify=False):
"""
**Purpose**
Measure the FRiP 'Fraction of Reads in Peaks' as defined by Landt et al., 2012;
Gen Res, 22:1813-1831.
Essentially the fraction of reads inside a list of peaks (from the genelist).
**Arguments**
genelist (Required)
A list of peaks, must have a "loc" key containing the location data.
Ideally this is the peak spans reported by your peak discovery tool,
but you can provide delta=xxx bp argument to expand the locations.
sample (Optional)
sample only the first n peaks. By default, all peaks are used.
delta (Optional, default=None)
a value to expand the locations by + and - delta.
pointify (Optional, default=False)
'pointify' (Convert a span of base pairs to the centre point).
Executed before 'delta'
**Returns**
The FRiP score, the total number of reads and the number of reads inside the peaks
"""
assert genelist, "You must provide a genelist"
assert "loc" in genelist.linearData[0], "The genelist does not appear to have a 'loc' key"
if sample:
gl = gl[sample]
else:
gl = genelist.deepcopy() # get a copy as I may mess with it.
if pointify:
gl = gl.pointify("loc")
if delta:
gl = gl.expand("loc", delta)
# work out the total number of reads in this library
chr_names = self.get_chromosome_names()
total_reads = 0
for chrom in chr_names:
total_reads += self.get_numreads_on_chromosome(chrom)
num_reads = 0
p = progressbar(len(gl))
for idx, item in enumerate(gl):
num_reads += self.get_read_count(item["loc"])
p.update(idx)
return({"FRiP": num_reads/float(total_reads), "Total number of reads": total_reads, "Number of reads in peaks": num_reads})
def qc_encode_idr(self, chrom_sizes=None, filename=None, max_shift=400, **kargs):
"""
**Purpose**
Perform QC for ChIP-seq libraries, as explained
https://sites.google.com/site/anshulkundaje/projects/idr
and in Landt et al., 2012, Gen Res, 22:1813-1831.
**Arguments**
chromosome_sizes (Required)
You must provide a dict, containing the chromosome names and the
number of base pairs.
For mm9 this data is available as part of glbase and can be specified:
trk.qc_encode_idr(chromosome_sizes=gldata.chromsizes["mm9"])
Potentially, hg18, hg19, mm8 and mm9 will be available too. maybe.
filename (Required)
filename to save the plot to
**Returns**
NSC and RSC values. (See Landt et al., 2012; Gen Res, 22:1813-1831) for
details.
"""
assert chrom_sizes, "You must provide chromosome sizes"
assert filename, "You must provide a filename"
if not self._draw:
self._draw = draw()
# I only need to generate the + strand once.
plus_strands = {}
minu_strands = {}
# constructing a numpy array is excessively large. I only need to store pairs of reads
all_chroms = set(self.get_chromosome_names()) & set([i.replace("chr", "") for i in list(chrom_sizes.keys())]) # only iterate ones in both list
all_p = numpy.array([])
all_m = numpy.array([])
res = []
pears = numpy.zeros([max_shift, len(all_chroms)])
for idx, chrom in enumerate(all_chroms):
this_p = numpy.array([r[0] for r in self.get_all_reads_on_chromosome(chrom, "+")])
this_m = numpy.array([r[1] for r in self.get_all_reads_on_chromosome(chrom, "-")])
p = progressbar(max_shift)
for n in range(max_shift):
this_m = this_m - 1
union = numpy.union1d(this_p, this_m) # only ones I will have to look at
#union = numpy.intersect1d(this_p, this_m)
#xor_union = numpy.setxor1d(this_p, this_m)
#union = numpy.append(union, xor_union)
pair_p = numpy.bincount(this_p, minlength=max(this_p.max(), this_m.max())+1)[union]
pair_m = numpy.bincount(this_m, minlength=max(this_p.max(), this_m.max())+1)[union]
pears[n, idx] = pearsonr(pair_p, pair_m)[0]
p.update(n)
"""
fig = self._draw.getfigure()
ax = fig.add_subplot(111)
ax.plot(pair_p, pair_m, 'o', mec="none", alpha=0.2)
ax.set_title("Pearson: %.3f" % pears[n, idx])
fig.savefig("plots/test_%s_%s.png"% (chrom, n))
"""
print("Done chromosome '%s'" % chrom)
print(pears)
for row in pears:
res.append(numpy.average(row))
print(res)
fig = self._draw.getfigure(**kargs)
ax = fig.add_subplot(111)
ax.plot(numpy.arange(len(res)), res)
self._draw.savefigure(fig, filename)
ret = {"NSC": 0.0, "RSC": 0.0}
return(ret)
if __name__ == "__main__":
"""
Current 15 s
"""
import random, time
from .location import location
from .genelist import genelist
s = time.time()
print("Building...")
t = track(filename="testold.trk2", name="test", new=True)
for _ in range(10000):
l = random.randint(0, 100000)
t.add_location(location(chr="1", left=l, right=l+35), strand="+")
t.finalise()
e = time.time()
print(e-s, "s")
#t.finalise()
print(t.get_reads('chr1:100-200'))
s = time.time()
print("Fake bed...")
# fake a bed
newb = []
for _ in range(1000):
l = random.randint(1000, 100000) # 1000 is for the window size. -ve locs are real bad.
newb.append({"loc": location(chr="1", left=l, right=l+200), "strand": "+"})
bed = genelist()
bed.load_list(newb)
e = time.time()
print(e-s, "s")
t = track(filename="testold.trk2")
print("Pileup...")
import cProfile, pstats
cProfile.run("t.pileup(genelist=bed, filename='test.png', bin_size=10, window_size=1000)", "profile.pro")
p = pstats.Stats("profile.pro")
p.strip_dirs().sort_stats("time").print_stats()
print(t.pileup(genelist=bed, filename='/tmp/test2.png', respect_strand=True))
print(t.pileup(genelist=bed, filename='/tmp/test2.png', pointify=False, respect_strand=True))
print(bed.all())
print(t.pileup(genelist=bed, filename='/tmp/test2.png', pointify=False, window_size=0, respect_strand=True))
print(t.heatmap(genelist=bed, raw_heatmap_filename="/tmp/test.tsv", filename='/tmp/test.png', bin_size=10, window_size=1000))
print(t.heatmap(genelist=bed, raw_heatmap_filename="/tmp/test.tsv", filename='/tmp/test.png', bin_size=10, window_size=1000, log=None))
print(t.heatmap(genelist=bed, raw_heatmap_filename="/tmp/test.tsv", filename='/tmp/test.png', bin_size=10, window_size=1000, log=None, respect_strand=True))
|
py | 1a5473ca415333813a1e2f70b9c92f89d0880416 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'estocator.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | 1a5473f61bafff610870ad07b0a227dbfaf9d2b5 | from __future__ import unicode_literals
import datetime
import decimal
import itertools
from wtforms import widgets
from wtforms.compat import text_type, izip
from wtforms.i18n import DummyTranslations
from wtforms.validators import StopValidation
from wtforms.utils import unset_value
__all__ = (
'BooleanField', 'DecimalField', 'DateField', 'DateTimeField', 'FieldList',
'FloatField', 'FormField', 'IntegerField', 'RadioField', 'SelectField',
'SelectMultipleField', 'StringField',
)
class Field(object):
"""
Field base class
"""
errors = tuple()
process_errors = tuple()
raw_data = None
validators = tuple()
widget = None
_formfield = True
_translations = DummyTranslations()
do_not_call_in_templates = True # Allow Django 1.4 traversal
def __new__(cls, *args, **kwargs):
if '_form' in kwargs and '_name' in kwargs:
return super(Field, cls).__new__(cls)
else:
return UnboundField(cls, *args, **kwargs)
def __init__(self, label=None, validators=None, filters=tuple(),
description='', id=None, default=None, widget=None,
_form=None, _name=None, _prefix='', _translations=None,
_meta=None):
"""
Construct a new field.
:param label:
The label of the field.
:param validators:
A sequence of validators to call when `validate` is called.
:param filters:
A sequence of filters which are run on input data by `process`.
:param description:
A description for the field, typically used for help text.
:param id:
An id to use for the field. A reasonable default is set by the form,
and you shouldn't need to set this manually.
:param default:
The default value to assign to the field, if no form or object
input is provided. May be a callable.
:param widget:
If provided, overrides the widget used to render the field.
:param _form:
The form holding this field. It is passed by the form itself during
construction. You should never pass this value yourself.
:param _name:
The name of this field, passed by the enclosing form during its
construction. You should never pass this value yourself.
:param _prefix:
The prefix to prepend to the form name of this field, passed by
the enclosing form during construction.
:param _translations:
A translations object providing message translations. Usually
passed by the enclosing form during construction. See
:doc:`I18n docs <i18n>` for information on message translations.
:param _meta:
If provided, this is the 'meta' instance from the form. You usually
don't pass this yourself.
If `_form` and `_name` isn't provided, an :class:`UnboundField` will be
returned instead. Call its :func:`bind` method with a form instance and
a name to construct the field.
"""
if _translations is not None:
self._translations = _translations
if _meta is not None:
self.meta = _meta
elif _form is not None:
self.meta = _form.meta
else:
raise TypeError("Must provide one of _form or _meta")
self.default = default
self.description = description
self.filters = filters
self.flags = Flags()
self.name = _prefix + _name
self.short_name = _name
self.type = type(self).__name__
self.validators = validators or list(self.validators)
self.id = id or self.name
self.label = Label(self.id, label if label is not None else self.gettext(_name.replace('_', ' ').title()))
if widget is not None:
self.widget = widget
for v in self.validators:
flags = getattr(v, 'field_flags', ())
for f in flags:
setattr(self.flags, f, True)
def __unicode__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the `__call__` method.
"""
return self()
def __str__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the `__call__` method.
"""
return self()
def __html__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the :meth:`__call__` method.
"""
return self()
def __call__(self, **kwargs):
"""
Render this field as HTML, using keyword args as additional attributes.
This delegates rendering to
:meth:`meta.render_field <wtforms.meta.DefaultMeta.render_field>`
whose default behavior is to call the field's widget, passing any
keyword arguments from this call along to the widget.
In all of the WTForms HTML widgets, keyword arguments are turned to
HTML attributes, though in theory a widget is free to do anything it
wants with the supplied keyword arguments, and widgets don't have to
even do anything related to HTML.
"""
return self.meta.render_field(self, kwargs)
def gettext(self, string):
"""
Get a translation for the given message.
This proxies for the internal translations object.
:param string: A unicode string to be translated.
:return: A unicode string which is the translated output.
"""
return self._translations.gettext(string)
def ngettext(self, singular, plural, n):
"""
Get a translation for a message which can be pluralized.
:param str singular: The singular form of the message.
:param str plural: The plural form of the message.
:param int n: The number of elements this message is referring to
"""
return self._translations.ngettext(singular, plural, n)
def validate(self, form, extra_validators=tuple()):
"""
Validates the field and returns True or False. `self.errors` will
contain any errors raised during validation. This is usually only
called by `Form.validate`.
Subfields shouldn't override this, but rather override either
`pre_validate`, `post_validate` or both, depending on needs.
:param form: The form the field belongs to.
:param extra_validators: A sequence of extra validators to run.
"""
self.errors = list(self.process_errors)
stop_validation = False
# Call pre_validate
try:
self.pre_validate(form)
except StopValidation as e:
if e.args and e.args[0]:
self.errors.append(e.args[0])
stop_validation = True
except ValueError as e:
self.errors.append(e.args[0])
# Run validators
if not stop_validation:
chain = itertools.chain(self.validators, extra_validators)
stop_validation = self._run_validation_chain(form, chain)
# Call post_validate
try:
self.post_validate(form, stop_validation)
except ValueError as e:
self.errors.append(e.args[0])
return len(self.errors) == 0
def _run_validation_chain(self, form, validators):
"""
Run a validation chain, stopping if any validator raises StopValidation.
:param form: The Form instance this field beongs to.
:param validators: a sequence or iterable of validator callables.
:return: True if validation was stopped, False otherwise.
"""
for validator in validators:
try:
validator(form, self)
except StopValidation as e:
if e.args and e.args[0]:
self.errors.append(e.args[0])
return True
except ValueError as e:
self.errors.append(e.args[0])
return False
def pre_validate(self, form):
"""
Override if you need field-level validation. Runs before any other
validators.
:param form: The form the field belongs to.
"""
pass
def post_validate(self, form, validation_stopped):
"""
Override if you need to run any field-level validation tasks after
normal validation. This shouldn't be needed in most cases.
:param form: The form the field belongs to.
:param validation_stopped:
`True` if any validator raised StopValidation.
"""
pass
def process(self, formdata, data=unset_value):
"""
Process incoming data, calling process_data, process_formdata as needed,
and run filters.
If `data` is not provided, process_data will be called on the field's
default.
Field subclasses usually won't override this, instead overriding the
process_formdata and process_data methods. Only override this for
special advanced processing, such as when a field encapsulates many
inputs.
"""
self.process_errors = []
if data is unset_value:
try:
data = self.default()
except TypeError:
data = self.default
self.object_data = data
try:
self.process_data(data)
except ValueError as e:
self.process_errors.append(e.args[0])
if formdata:
try:
if self.name in formdata:
self.raw_data = formdata.getlist(self.name)
else:
self.raw_data = []
self.process_formdata(self.raw_data)
except ValueError as e:
self.process_errors.append(e.args[0])
try:
for filter in self.filters:
self.data = filter(self.data)
except ValueError as e:
self.process_errors.append(e.args[0])
def process_data(self, value):
"""
Process the Python data applied to this field and store the result.
This will be called during form construction by the form's `kwargs` or
`obj` argument.
:param value: The python object containing the value to process.
"""
self.data = value
def process_formdata(self, valuelist):
"""
Process data received over the wire from a form.
This will be called during form construction with data supplied
through the `formdata` argument.
:param valuelist: A list of strings to process.
"""
if valuelist:
self.data = valuelist[0]
def populate_obj(self, obj, name):
"""
Populates `obj.<name>` with the field's data.
:note: This is a destructive operation. If `obj.<name>` already exists,
it will be overridden. Use with caution.
"""
setattr(obj, name, self.data)
class UnboundField(object):
_formfield = True
creation_counter = 0
def __init__(self, field_class, *args, **kwargs):
UnboundField.creation_counter += 1
self.field_class = field_class
self.args = args
self.kwargs = kwargs
self.creation_counter = UnboundField.creation_counter
def bind(self, form, name, prefix='', translations=None, **kwargs):
kw = dict(
self.kwargs,
_form=form,
_prefix=prefix,
_name=name,
_translations=translations,
**kwargs
)
return self.field_class(*self.args, **kw)
def __repr__(self):
return '<UnboundField(%s, %r, %r)>' % (self.field_class.__name__, self.args, self.kwargs)
class Flags(object):
"""
Holds a set of boolean flags as attributes.
Accessing a non-existing attribute returns False for its value.
"""
def __getattr__(self, name):
if name.startswith('_'):
return super(Flags, self).__getattr__(name)
return False
def __contains__(self, name):
return getattr(self, name)
def __repr__(self):
flags = (name for name in dir(self) if not name.startswith('_'))
return '<wtforms.fields.Flags: {%s}>' % ', '.join(flags)
class Label(object):
"""
An HTML form label.
"""
def __init__(self, field_id, text):
self.field_id = field_id
self.text = text
def __str__(self):
return self()
def __unicode__(self):
return self()
def __html__(self):
return self()
def __call__(self, text=None, **kwargs):
if 'for_' in kwargs:
kwargs['for'] = kwargs.pop('for_')
else:
kwargs.setdefault('for', self.field_id)
attributes = widgets.html_params(**kwargs)
return widgets.HTMLString('<label %s>%s</label>' % (attributes, text or self.text))
def __repr__(self):
return 'Label(%r, %r)' % (self.field_id, self.text)
class SelectFieldBase(Field):
option_widget = widgets.Option()
"""
Base class for fields which can be iterated to produce options.
This isn't a field, but an abstract base class for fields which want to
provide this functionality.
"""
def __init__(self, label=None, validators=None, option_widget=None, **kwargs):
super(SelectFieldBase, self).__init__(label, validators, **kwargs)
if option_widget is not None:
self.option_widget = option_widget
def iter_choices(self):
"""
Provides data for choice widget rendering. Must return a sequence or
iterable of (value, label, selected) tuples.
"""
raise NotImplementedError()
def __iter__(self):
opts = dict(widget=self.option_widget, _name=self.name, _form=None, _meta=self.meta)
for i, (value, label, checked) in enumerate(self.iter_choices()):
opt = self._Option(label=label, id='%s-%d' % (self.id, i), **opts)
opt.process(None, value)
opt.checked = checked
yield opt
class _Option(Field):
checked = False
def _value(self):
return text_type(self.data)
class SelectField(SelectFieldBase):
widget = widgets.Select()
def __init__(self, label=None, validators=None, coerce=text_type, choices=None, **kwargs):
super(SelectField, self).__init__(label, validators, **kwargs)
self.coerce = coerce
self.choices = choices
def iter_choices(self):
for value, label in self.choices:
yield (value, label, self.coerce(value) == self.data)
def process_data(self, value):
try:
self.data = self.coerce(value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = self.coerce(valuelist[0])
except ValueError:
raise ValueError(self.gettext('Invalid Choice: could not coerce'))
def pre_validate(self, form):
for v, _ in self.choices:
if self.data == v:
break
else:
raise ValueError(self.gettext('Not a valid choice'))
class SelectMultipleField(SelectField):
"""
No different from a normal select field, except this one can take (and
validate) multiple choices. You'll need to specify the HTML `size`
attribute to the select field when rendering.
"""
widget = widgets.Select(multiple=True)
def iter_choices(self):
for value, label in self.choices:
selected = self.data is not None and self.coerce(value) in self.data
yield (value, label, selected)
def process_data(self, value):
try:
self.data = list(self.coerce(v) for v in value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
try:
self.data = list(self.coerce(x) for x in valuelist)
except ValueError:
raise ValueError(self.gettext('Invalid choice(s): one or more data inputs could not be coerced'))
def pre_validate(self, form):
if self.data:
values = list(c[0] for c in self.choices)
for d in self.data:
if d not in values:
raise ValueError(self.gettext("'%(value)s' is not a valid choice for this field") % dict(value=d))
class RadioField(SelectField):
"""
Like a SelectField, except displays a list of radio buttons.
Iterating the field will produce subfields (each containing a label as
well) in order to allow custom rendering of the individual radio fields.
"""
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.RadioInput()
class StringField(Field):
"""
This field is the base for most of the more complicated fields, and
represents an ``<input type="text">``.
"""
widget = widgets.TextInput()
def process_formdata(self, valuelist):
if valuelist:
self.data = valuelist[0]
else:
self.data = ''
def _value(self):
return text_type(self.data) if self.data is not None else ''
class LocaleAwareNumberField(Field):
"""
Base class for implementing locale-aware number parsing.
Locale-aware numbers require the 'babel' package to be present.
"""
def __init__(self, label=None, validators=None, use_locale=False, number_format=None, **kwargs):
super(LocaleAwareNumberField, self).__init__(label, validators, **kwargs)
self.use_locale = use_locale
if use_locale:
self.number_format = number_format
self.locale = kwargs['_form'].meta.locales[0]
self._init_babel()
def _init_babel(self):
try:
from babel import numbers
self.babel_numbers = numbers
except ImportError:
raise ImportError('Using locale-aware decimals requires the babel library.')
def _parse_decimal(self, value):
return self.babel_numbers.parse_decimal(value, self.locale)
def _format_decimal(self, value):
return self.babel_numbers.format_decimal(value, self.number_format, self.locale)
class IntegerField(Field):
"""
A text field, except all input is coerced to an integer. Erroneous input
is ignored and will not be accepted as a value.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, **kwargs):
super(IntegerField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = int(valuelist[0])
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid integer value'))
class DecimalField(LocaleAwareNumberField):
"""
A text field which displays and coerces data of the `decimal.Decimal` type.
:param places:
How many decimal places to quantize the value to for display on form.
If None, does not quantize value.
:param rounding:
How to round the value during quantize, for example
`decimal.ROUND_UP`. If unset, uses the rounding value from the
current thread's context.
:param use_locale:
If True, use locale-based number formatting. Locale-based number
formatting requires the 'babel' package.
:param number_format:
Optional number format for locale. If omitted, use the default decimal
format for the locale.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, places=unset_value, rounding=None, **kwargs):
super(DecimalField, self).__init__(label, validators, **kwargs)
if self.use_locale and (places is not unset_value or rounding is not None):
raise TypeError("When using locale-aware numbers, 'places' and 'rounding' are ignored.")
if places is unset_value:
places = 2
self.places = places
self.rounding = rounding
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
if self.use_locale:
return text_type(self._format_decimal(self.data))
elif self.places is not None:
if hasattr(self.data, 'quantize'):
exp = decimal.Decimal('.1') ** self.places
if self.rounding is None:
quantized = self.data.quantize(exp)
else:
quantized = self.data.quantize(exp, rounding=self.rounding)
return text_type(quantized)
else:
# If for some reason, data is a float or int, then format
# as we would for floats using string formatting.
format = '%%0.%df' % self.places
return format % self.data
else:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
if self.use_locale:
self.data = self._parse_decimal(valuelist[0])
else:
self.data = decimal.Decimal(valuelist[0])
except (decimal.InvalidOperation, ValueError):
self.data = None
raise ValueError(self.gettext('Not a valid decimal value'))
class FloatField(Field):
"""
A text field, except all input is coerced to an float. Erroneous input
is ignored and will not be accepted as a value.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, **kwargs):
super(FloatField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = float(valuelist[0])
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid float value'))
class BooleanField(Field):
"""
Represents an ``<input type="checkbox">``. Set the ``checked``-status by using the
``default``-option. Any value for ``default``, e.g. ``default="checked"`` puts
``checked`` into the html-element and sets the ``data`` to ``True``
:param false_values:
If provided, a sequence of strings each of which is an exact match
string of what is considered a "false" value. Defaults to the tuple
``('false', '')``
"""
widget = widgets.CheckboxInput()
false_values = ('false', '')
def __init__(self, label=None, validators=None, false_values=None, **kwargs):
super(BooleanField, self).__init__(label, validators, **kwargs)
if false_values is not None:
self.false_values = false_values
def process_data(self, value):
self.data = bool(value)
def process_formdata(self, valuelist):
if not valuelist or valuelist[0] in self.false_values:
self.data = False
else:
self.data = True
def _value(self):
if self.raw_data:
return text_type(self.raw_data[0])
else:
return 'y'
class DateTimeField(Field):
"""
A text field which stores a `datetime.datetime` matching a format.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, format='%Y-%m-%d %H:%M:%S', **kwargs):
super(DateTimeField, self).__init__(label, validators, **kwargs)
self.format = format
def _value(self):
if self.raw_data:
return ' '.join(self.raw_data)
else:
return self.data and self.data.strftime(self.format) or ''
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
try:
self.data = datetime.datetime.strptime(date_str, self.format)
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid datetime value'))
class DateField(DateTimeField):
"""
Same as DateTimeField, except stores a `datetime.date`.
"""
def __init__(self, label=None, validators=None, format='%Y-%m-%d', **kwargs):
super(DateField, self).__init__(label, validators, format, **kwargs)
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
try:
self.data = datetime.datetime.strptime(date_str, self.format).date()
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid date value'))
class FormField(Field):
"""
Encapsulate a form as a field in another form.
:param form_class:
A subclass of Form that will be encapsulated.
:param separator:
A string which will be suffixed to this field's name to create the
prefix to enclosed fields. The default is fine for most uses.
"""
widget = widgets.TableWidget()
def __init__(self, form_class, label=None, validators=None, separator='-', **kwargs):
super(FormField, self).__init__(label, validators, **kwargs)
self.form_class = form_class
self.separator = separator
self._obj = None
if self.filters:
raise TypeError('FormField cannot take filters, as the encapsulated data is not mutable.')
if validators:
raise TypeError('FormField does not accept any validators. Instead, define them on the enclosed form.')
def process(self, formdata, data=unset_value):
if data is unset_value:
try:
data = self.default()
except TypeError:
data = self.default
self._obj = data
self.object_data = data
prefix = self.name + self.separator
if isinstance(data, dict):
self.form = self.form_class(formdata=formdata, prefix=prefix, **data)
else:
self.form = self.form_class(formdata=formdata, obj=data, prefix=prefix)
def validate(self, form, extra_validators=tuple()):
if extra_validators:
raise TypeError('FormField does not accept in-line validators, as it gets errors from the enclosed form.')
return self.form.validate()
def populate_obj(self, obj, name):
candidate = getattr(obj, name, None)
if candidate is None:
if self._obj is None:
raise TypeError('populate_obj: cannot find a value to populate from the provided obj or input data/defaults')
candidate = self._obj
setattr(obj, name, candidate)
self.form.populate_obj(candidate)
def __iter__(self):
return iter(self.form)
def __getitem__(self, name):
return self.form[name]
def __getattr__(self, name):
return getattr(self.form, name)
@property
def data(self):
return self.form.data
@property
def errors(self):
return self.form.errors
class FieldList(Field):
"""
Encapsulate an ordered list of multiple instances of the same field type,
keeping data as a list.
>>> authors = FieldList(StringField('Name', [validators.required()]))
:param unbound_field:
A partially-instantiated field definition, just like that would be
defined on a form directly.
:param min_entries:
if provided, always have at least this many entries on the field,
creating blank ones if the provided input does not specify a sufficient
amount.
:param max_entries:
accept no more than this many entries as input, even if more exist in
formdata.
"""
widget = widgets.ListWidget()
def __init__(self, unbound_field, label=None, validators=None, min_entries=0,
max_entries=None, default=tuple(), **kwargs):
super(FieldList, self).__init__(label, validators, default=default, **kwargs)
if self.filters:
raise TypeError('FieldList does not accept any filters. Instead, define them on the enclosed field.')
assert isinstance(unbound_field, UnboundField), 'Field must be unbound, not a field class'
self.unbound_field = unbound_field
self.min_entries = min_entries
self.max_entries = max_entries
self.last_index = -1
self._prefix = kwargs.get('_prefix', '')
def process(self, formdata, data=unset_value):
self.entries = []
if data is unset_value or not data:
try:
data = self.default()
except TypeError:
data = self.default
self.object_data = data
if formdata:
indices = sorted(set(self._extract_indices(self.name, formdata)))
if self.max_entries:
indices = indices[:self.max_entries]
idata = iter(data)
for index in indices:
try:
obj_data = next(idata)
except StopIteration:
obj_data = unset_value
self._add_entry(formdata, obj_data, index=index)
else:
for obj_data in data:
self._add_entry(formdata, obj_data)
while len(self.entries) < self.min_entries:
self._add_entry(formdata)
def _extract_indices(self, prefix, formdata):
"""
Yield indices of any keys with given prefix.
formdata must be an object which will produce keys when iterated. For
example, if field 'foo' contains keys 'foo-0-bar', 'foo-1-baz', then
the numbers 0 and 1 will be yielded, but not neccesarily in order.
"""
offset = len(prefix) + 1
for k in formdata:
if k.startswith(prefix):
k = k[offset:].split('-', 1)[0]
if k.isdigit():
yield int(k)
def validate(self, form, extra_validators=tuple()):
"""
Validate this FieldList.
Note that FieldList validation differs from normal field validation in
that FieldList validates all its enclosed fields first before running any
of its own validators.
"""
self.errors = []
# Run validators on all entries within
for subfield in self.entries:
if not subfield.validate(form):
self.errors.append(subfield.errors)
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
try:
ivalues = iter(values)
except TypeError:
ivalues = iter([])
candidates = itertools.chain(ivalues, itertools.repeat(None))
_fake = type(str('_fake'), (object, ), {})
output = []
for field, data in izip(self.entries, candidates):
fake_obj = _fake()
fake_obj.data = data
field.populate_obj(fake_obj, 'data')
output.append(fake_obj.data)
setattr(obj, name, output)
def _add_entry(self, formdata=None, data=unset_value, index=None):
assert not self.max_entries or len(self.entries) < self.max_entries, \
'You cannot have more than max_entries entries in this FieldList'
if index is None:
index = self.last_index + 1
self.last_index = index
name = '%s-%d' % (self.short_name, index)
id = '%s-%d' % (self.id, index)
field = self.unbound_field.bind(form=None, name=name, prefix=self._prefix, id=id, _meta=self.meta,
translations=self._translations)
field.process(formdata, data)
self.entries.append(field)
return field
def append_entry(self, data=unset_value):
"""
Create a new entry with optional default data.
Entries added in this way will *not* receive formdata however, and can
only receive object data.
"""
return self._add_entry(data=data)
def pop_entry(self):
""" Removes the last entry from the list and returns it. """
entry = self.entries.pop()
self.last_index -= 1
return entry
def __iter__(self):
return iter(self.entries)
def __len__(self):
return len(self.entries)
def __getitem__(self, index):
return self.entries[index]
@property
def data(self):
return [f.data for f in self.entries]
|
py | 1a54743d675224e52ce6e812ccf2b5d8d608706f | from sklearn.metrics import silhouette_score
## Script parameters
max_clusters = 10
for n_clusters in range(max_clusters):
|
py | 1a54760dd490c74a47be971fea915723bd01b242 | import os
import sys
from datetime import datetime
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms as transforms
from models.Conv3D import r2plus1d_18
from dataset_sign_clip import Sign_Isolated
from train import train_epoch
from validation_clip import val_epoch
from collections import OrderedDict
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self):
super(LabelSmoothingCrossEntropy, self).__init__()
def forward(self, x, target, smoothing=0.1):
confidence = 1. - smoothing
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = confidence * nll_loss + smoothing * smooth_loss
return loss.mean()
# Path setting
exp_name = 'depth_hha_final_finetune'
data_path = "../data/train_val_hha_2_mask"
data_path2 = "../data/test_hha_2_mask"
label_train_path = "data/train_val_labels.csv"
label_val_path = "data/test_labels_pseudo.csv"
model_path = "checkpoint/{}".format(exp_name)
if not os.path.exists(model_path):
os.mkdir(model_path)
if not os.path.exists(os.path.join('results', exp_name)):
os.mkdir(os.path.join('results', exp_name))
log_path = "log/sign_resnet2d+1_{}_{:%Y-%m-%d_%H-%M-%S}.log".format(exp_name, datetime.now())
sum_path = "runs/sign_resnet2d+1_{}_{:%Y-%m-%d_%H-%M-%S}".format(exp_name, datetime.now())
phase = 'Train'
# Log to file & tensorboard writer
logging.basicConfig(level=logging.INFO, format='%(message)s', handlers=[logging.FileHandler(log_path), logging.StreamHandler()])
logger = logging.getLogger('SLR')
logger.info('Logging to file...')
writer = SummaryWriter(sum_path)
# Use specific gpus
os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3"
# Device setting
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparams
num_classes = 226 #100
epochs = 100
# batch_size = 16
batch_size = 24
learning_rate = 1e-4 #1e-3 Train 1e-4 Finetune
weight_decay = 1e-4
log_interval = 80
sample_size = 128
sample_duration = 32
attention = False
drop_p = 0.0
hidden1, hidden2 = 512, 256
num_workers = 24
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
# Train with 3DCNN
if __name__ == '__main__':
# Load data
transform = transforms.Compose([transforms.Resize([sample_size, sample_size]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])])
train_set = Sign_Isolated(data_path=data_path, label_path=label_train_path, frames=sample_duration,
num_classes=num_classes, train=True, transform=transform)
val_set = Sign_Isolated(data_path=data_path2, label_path=label_val_path, frames=sample_duration,
num_classes=num_classes, train=False, transform=transform)
logger.info("Dataset samples: {}".format(len(train_set)+len(val_set)))
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True)
# Create model
model = r2plus1d_18(pretrained=False, num_classes=226)
# load pretrained
checkpoint = torch.load('final_models/val_depth_hha_final.pth')
new_state_dict = OrderedDict()
for k, v in checkpoint.items():
name = k[7:] # remove 'module.'
new_state_dict[name]=v
model.load_state_dict(new_state_dict)
# if phase == 'Train':
# model.fc1 = nn.Linear(model.fc1.in_features, num_classes)
print(model)
model = model.to(device)
# Run the model parallelly
if torch.cuda.device_count() > 1:
logger.info("Using {} GPUs".format(torch.cuda.device_count()))
model = nn.DataParallel(model)
# Create loss criterion & optimizer
# criterion = nn.CrossEntropyLoss()
criterion = LabelSmoothingCrossEntropy()
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, threshold=0.0001)
# Start training
if phase == 'Train':
logger.info("Training Started".center(60, '#'))
for epoch in range(epochs):
print('lr: ', get_lr(optimizer))
# Train the model
train_epoch(model, criterion, optimizer, train_loader, device, epoch, logger, log_interval, writer)
# Validate the model
val_loss = val_epoch(model, criterion, val_loader, device, epoch, logger, writer)
scheduler.step(val_loss)
# Save model
torch.save(model.state_dict(), os.path.join(model_path, "sign_resnet2d+1_epoch{:03d}.pth".format(epoch+1)))
logger.info("Epoch {} Model Saved".format(epoch+1).center(60, '#'))
elif phase == 'Test':
logger.info("Testing Started".center(60, '#'))
val_loss = val_epoch(model, criterion, val_loader, device, 0, logger, writer, phase=phase, exp_name=exp_name)
logger.info("Finished".center(60, '#'))
|
py | 1a547657fc746ae3d72cf23152dfa1ff9c776269 | import torch
from gaussed.distribution.base import Distribution
from gaussed.utils.lin_alg_solvers import DefaultSolver
class GP(Distribution):
def __init__(self, mean, kernel, solver=DefaultSolver()):
self.mean = mean
self.kernel = kernel
self.solver = solver
self.dim = self.kernel.dim
self.t_dim = self.kernel.dim
self.func_dist = True # distribution over functions
self.func_dim = self.kernel.dim
self.kernel = kernel
self.dim = self.kernel.dim
def condition_x(self, x, X, Y, solver=None):
"""Computes the posterior mean and covariance at locations x, conditioned upon output observations Y, observed at input locations X
Args:
x ([torch.Tensor]): [Locations to evaluate posterior mean and covariance amtrix]
X ([torch.Tensor]): [Tensor of input locations]
Y ([type]): [Tensor of output observations]
solver ([Solver], optional): [Solver]. Defaults to None and so to self.solver.
Returns:
[torch.Tensor, torch.Tensor]: [Posterior mean and covariance matrix]
"""
if solver is None:
solver = self.solver
K_XX = self.kernel.eval(X, X)
K_Xx = self.kernel.eval(X, x)
K_xx = self.kernel.eval(x, x)
inverse = solver.inverse(K_XX)
solved_y = solver.solve(inverse, Y.unsqueeze(1))
solved_gram = solver.solve(inverse, K_Xx)
mean = torch.matmul(K_Xx.T, solved_y)
covariance_matrix = K_xx - torch.matmul(K_Xx.T, solved_gram)
return mean, covariance_matrix
def get_prior(self, x):
"""Returns prior mean and covariance at locations x.
Args:
x ([torch.Tensor]): [Locations to evaluate prior mean and prior covariance]
Returns:
[torch.Tensor, torch.Tensor]: [Prior mean and covariance matrix]
"""
K_xx = self.kernel.eval(x, x)
prior_mean = self.mean.eval(x)
return prior_mean, K_xx
def sample(self, mean, covariance, n):
"""Sample from the GP with given mean and covariance, n number of times.
Args:
mean ([torch.Tensor]): [Mean Tensor]
covariance ([torch.Tensor]): [Covariance matrix Tensor]
n ([int]): [Number of samples]
Returns:
[torch.Tensor]: [GP samples]
"""
m = mean.size(0)
U, S, V = torch.svd(covariance)
s_cov = torch.matmul(U, torch.diag(torch.sqrt(S)))
sn = torch.distributions.MultivariateNormal(torch.zeros(m), torch.eye(m))
sn_samples = sn.sample(torch.Size([n])).T
mn_samples = torch.add(torch.matmul(s_cov, sn_samples).T, mean).T
return mn_samples
|
py | 1a54796dbeec3f0df5dafbce616525527890718b | import numpy as np
import pytest
import pandas as pd
from pandas import Series
import pandas._testing as tm
from pandas.core.api import Float64Index
def test_get():
# GH 6383
s = Series(
np.array(
[
43,
48,
60,
48,
50,
51,
50,
45,
57,
48,
56,
45,
51,
39,
55,
43,
54,
52,
51,
54,
]
)
)
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(
np.array(
[
43,
48,
60,
48,
50,
51,
50,
45,
57,
48,
56,
45,
51,
39,
55,
43,
54,
52,
51,
54,
]
),
index=Float64Index(
[
25.0,
36.0,
49.0,
64.0,
81.0,
100.0,
121.0,
144.0,
169.0,
196.0,
1225.0,
1296.0,
1369.0,
1444.0,
1521.0,
1600.0,
1681.0,
1764.0,
1849.0,
1936.0,
]
),
)
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({"i": [0] * 3, "b": [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default="Missing")
assert result == "Missing"
vc = df.b.value_counts()
result = vc.get(False, default="Missing")
assert result == 3
result = vc.get(True, default="Missing")
assert result == "Missing"
def test_get_nan():
# GH 8569
s = Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default="Missing") == "Missing"
def test_get_nan_multiple():
# GH 8569
# ensure that fixing "test_get_nan" above hasn't broken get
# with multiple elements
s = Float64Index(range(10)).to_series()
idx = [2, 30]
assert s.get(idx) is None
idx = [2, np.nan]
assert s.get(idx) is None
# GH 17295 - all missing keys
idx = [20, 30]
assert s.get(idx) is None
idx = [np.nan, np.nan]
assert s.get(idx) is None
def test_get_with_default():
# GH#7725
d0 = ["a", "b", "c", "d"]
d1 = np.arange(4, dtype="int64")
others = ["e", 10]
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
assert s.get(i) == d
assert s.get(i, d) == d
assert s.get(i, "z") == d
for other in others:
assert s.get(other, "z") == "z"
assert s.get(other, other) == other
@pytest.mark.parametrize(
"arr",
[np.random.randn(10), tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")],
)
def test_get_with_ea(arr):
# GH#21260
ser = Series(arr, index=[2 * i for i in range(len(arr))])
assert ser.get(4) == ser.iloc[2]
result = ser.get([4, 6])
expected = ser.iloc[[2, 3]]
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match="label-based"):
result = ser.get(slice(2))
expected = ser.iloc[[0, 1]]
tm.assert_series_equal(result, expected)
assert ser.get(-1) is None
assert ser.get(ser.index.max() + 1) is None
ser = Series(arr[:6], index=list("abcdef"))
assert ser.get("c") == ser.iloc[2]
result = ser.get(slice("b", "d"))
expected = ser.iloc[[1, 2, 3]]
tm.assert_series_equal(result, expected)
result = ser.get("Z")
assert result is None
assert ser.get(4) == ser.iloc[4]
assert ser.get(-1) == ser.iloc[-1]
assert ser.get(len(ser)) is None
# GH#21257
ser = Series(arr)
ser2 = ser[::2]
assert ser2.get(1) is None
def test_getitem_get(string_series, object_series):
for obj in [string_series, object_series]:
idx = obj.index[5]
assert obj[idx] == obj.get(idx)
assert obj[idx] == obj[5]
assert string_series.get(-1) == string_series.get(string_series.index[-1])
assert string_series[5] == string_series.get(string_series.index[5])
def test_get_none():
# GH#5652
s1 = Series(dtype=object)
s2 = Series(dtype=object, index=list("abc"))
for s in [s1, s2]:
result = s.get(None)
assert result is None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.