metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "920232796/NLP_flask",
"score": 3
} |
#### File: bert_seq2seq/model/nezha_model.py
```python
import math
import os
import re
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
def swish(x):
return x * torch.sigmoid(x)
def gelu(x):
"""
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def mish(x):
return x * torch.tanh(nn.functional.softplus(x))
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "mish": mish}
NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class BertConfig:
r"""
This is the configuration class to store the configuration of an :class:`~transformers.AlbertModel`.
It is used to instantiate an ALBERT model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the ALBERT `xxlarge <https://huggingface.co/albert-xxlarge-v2>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 30000):
Vocabulary size of the ALBERT model. Defines the different tokens that
can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.AlbertModel`.
embedding_size (:obj:`int`, optional, defaults to 128):
Dimensionality of vocabulary embeddings.
hidden_size (:obj:`int`, optional, defaults to 4096):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, optional, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_hidden_groups (:obj:`int`, optional, defaults to 1):
Number of groups for the hidden layers, parameters in the same group are shared.
num_attention_heads (:obj:`int`, optional, defaults to 64):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, optional, defaults to 16384):
The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
inner_group_num (:obj:`int`, optional, defaults to 1):
The number of inner repetition of attention and ffn.
hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu_new"):
The non-linear activation function (function or string) in the encoder and pooler.
If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob (:obj:`float`, optional, defaults to 0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, optional, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something
large (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, optional, defaults to 2):
The vocabulary size of the `token_type_ids` passed into :class:`~transformers.AlbertModel`.
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
classifier_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout ratio for attached classifiers.
Example::
from transformers import AlbertConfig, AlbertModel
# Initializing an ALBERT-xxlarge style configuration
albert_xxlarge_configuration = AlbertConfig()
# Initializing an ALBERT-base style configuration
albert_base_configuration = AlbertConfig(
hidden_size=768,
num_attention_heads=12,
intermediate_size=3072,
)
# Initializing a model from the ALBERT-base style configuration
model = AlbertModel(albert_xxlarge_configuration)
# Accessing the model configuration
configuration = model.config
Attributes:
pretrained_config_archive_map (Dict[str, str]):
A dictionary containing all the available pre-trained checkpoints.
"""
pretrained_config_archive_map = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "nezha"
def __init__(
self,
vocab_size=21128,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
max_relative_position=64,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
use_relative_position=True,
):
super().__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.max_relative_position = max_relative_position
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_relative_position=use_relative_position
class BertLayerNorm(nn.Module):
"""LayerNorm层, 见Transformer(一), 讲编码器(encoder)的第3部分"""
def __init__(self, hidden_size, eps=1e-12, conditional=False):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
self.conditional = conditional
if conditional == True:
# 说明是条件 ln
self.weight_dense = nn.Linear(2 * hidden_size, hidden_size, bias=False)
self.weight_dense.weight.data.uniform_(0, 0)
self.bias_dense = nn.Linear(2 * hidden_size, hidden_size, bias=False)
self.bias_dense.weight.data.uniform_(0, 0)
def forward(self, x):
if self.conditional == False:
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
else:
inputs = x[0]
cond = x[1]
for _ in range(len(inputs.shape) - len(cond.shape)):
cond = cond.unsqueeze(dim=1)
weight = self.weight + self.weight_dense(cond)
bias = self.bias + self.bias_dense(cond)
u = inputs.mean(-1, keepdim=True)
s = (inputs - u).pow(2).mean(-1, keepdim=True)
x = (inputs - u) / torch.sqrt(s + self.variance_epsilon)
return weight * x + bias
class NeZhaEmbeddings(nn.Module):
"""
Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.use_relative_position = config.use_relative_position
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class RelativePositionsEncoding(nn.Module):
def __init__(self, length, depth, max_relative_position=127):
super(RelativePositionsEncoding, self).__init__()
vocab_size = max_relative_position * 2 + 1
range_vec = torch.arange(length)
range_mat = range_vec.repeat(length).view(length, length)
distance_mat = range_mat - torch.t(range_mat)
distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position)
final_mat = distance_mat_clipped + max_relative_position
embeddings_table = torch.zeros(vocab_size, depth)
position = torch.arange(0, vocab_size, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, depth, 2).float() * (-math.log(10000.0) / depth))
embeddings_table[:, 0::2] = torch.sin(position * div_term)
embeddings_table[:, 1::2] = torch.cos(position * div_term)
embeddings_table = embeddings_table.unsqueeze(0).transpose(0, 1).squeeze(1)
flat_relative_positions_matrix = final_mat.view(-1)
one_hot_relative_positions_matrix = torch.nn.functional.one_hot(flat_relative_positions_matrix,
num_classes=vocab_size).float()
positions_encoding = torch.matmul(one_hot_relative_positions_matrix, embeddings_table)
my_shape = list(final_mat.size())
my_shape.append(depth)
positions_encoding = positions_encoding.view(my_shape)
self.register_buffer('positions_encoding', positions_encoding)
def forward(self, length):
return self.positions_encoding[:length, :length, :]
class NeZhaSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.relative_positions_encoding = RelativePositionsEncoding(length=config.max_position_embeddings,
depth=self.attention_head_size,
max_relative_position=config.max_relative_position)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
batch_size, num_attention_heads, from_seq_length, to_seq_length = attention_scores.size()
relations_keys = self.relative_positions_encoding(to_seq_length)
query_layer_t = query_layer.permute(2, 0, 1, 3)
query_layer_r = query_layer_t.contiguous().view(from_seq_length, batch_size * num_attention_heads,
self.attention_head_size)
key_position_scores = torch.matmul(query_layer_r, relations_keys.permute(0, 2, 1))
key_position_scores_r = key_position_scores.view(from_seq_length, batch_size,
num_attention_heads, from_seq_length)
key_position_scores_r_t = key_position_scores_r.permute(1, 2, 0, 3)
attention_scores = attention_scores + key_position_scores_r_t
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
relations_values = self.relative_positions_encoding(to_seq_length)
attention_probs_t = attention_probs.permute(2, 0, 1, 3)
attentions_probs_r = attention_probs_t.contiguous().view(from_seq_length, batch_size * num_attention_heads,
to_seq_length)
value_position_scores = torch.matmul(attentions_probs_r, relations_values)
value_position_scores_r = value_position_scores.view(from_seq_length, batch_size,
num_attention_heads, self.attention_head_size)
value_position_scores_r_t = value_position_scores_r.permute(1, 2, 0, 3)
context_layer = context_layer + value_position_scores_r_t
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class NeZhaAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = NeZhaSelfAttention(config)
self.pruned_heads = set()
self.output = BertSelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
self_outputs = self.self(
hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] ## relu
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class NeZhaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = NeZhaAttention(config)
# self.is_decoder = config.is_decoder
# if self.is_decoder:
# self.crossattention = NeZhaAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# if self.is_decoder and encoder_hidden_states is not None:
# cross_attention_outputs = self.crossattention(
# attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask
# )
# attention_output = cross_attention_outputs[0]
# outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs
class NeZhaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([NeZhaLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_all_encoded_layers=True,
):
all_hidden_states = ()
for i, layer_module in enumerate(self.layer):
if output_all_encoded_layers:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, None, encoder_hidden_states, encoder_attention_mask
)
hidden_states = layer_outputs[0]
# Add last layer
outputs = all_hidden_states + (hidden_states,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act]
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertModel(nn.Module):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in `Attention is all you need`_ by <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>.
To behave as an decoder the model needs to be initialized with the
:obj:`is_decoder` argument of the configuration set to :obj:`True`; an
:obj:`encoder_hidden_states` is expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
def __init__(self, config):
super().__init__()
self.config = config
self.embeddings = NeZhaEmbeddings(config)
self.encoder = NeZhaEncoder(config)
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_extended_attention_mask(self, attention_mask, input_shape, device):
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
head_mask=None,
position_ids=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_all_encoded_layers=True,
):
device = input_ids.device
input_shape = input_ids.shape
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = (input_ids > 0).float()
# 注意力矩阵mask: [batch_size, 1, 1, seq_length]
extended_attention_mask = extended_attention_mask.unsqueeze(1).unsqueeze(2)
if attention_mask is not None:
## 如果传进来的注意力mask不是null,那就直接用传进来的注意力mask 乘 原始mask
# 注意 原始mask是extended_attention_mask,这个是用来把pad部分置为0,去掉pad部分影响
extended_attention_mask = attention_mask * extended_attention_mask
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(
input_ids=input_ids, token_type_ids=token_type_ids
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
encoder_hidden_states=encoder_hidden_states,
output_all_encoded_layers=output_all_encoded_layers,
# encoder_attention_mask=encoder_extended_attention_mask,
)
return encoder_outputs, None
```
#### File: nlp_api/test/ner_test.py
```python
import torch
def viterbi_decode(nodes, trans):
"""
维特比算法 解码
nodes: (seq_len, target_size)
trans: (target_size, target_size)
"""
with torch.no_grad():
scores = nodes[0]
scores[1:] -= 100000 # 刚开始标签肯定是"O"
target_size = nodes.shape[1]
seq_len = nodes.shape[0]
labels = torch.arange(0, target_size).view(1, -1)
path = labels
for l in range(1, seq_len):
scores = scores.view(-1, 1)
M = scores + trans + nodes[l].view(1, -1)
scores, ids = M.max(0)
path = torch.cat((path[:, ids], labels), dim=0)
# print(scores)
# print(scores)
return path[:, scores.argmax()]
def ner_print(model, test_data, word2idx, tokenizer, target, device="cpu"):
model.eval()
idxtword = {v: k for k, v in word2idx.items()}
trans = model.state_dict()["crf_layer.trans"]
decode = []
text_encode, text_ids = tokenizer.encode(test_data)
text_tensor = torch.tensor(text_encode, device=device).view(1, -1)
out = model(text_tensor).squeeze(0) # 其实是nodes
labels = viterbi_decode(out, trans)
starting = False
for l in labels:
if l > 0:
label = target[l.item()]
decode.append(label)
else :
decode.append("other")
flag = 0
res = {}
decode_text = [idxtword[i] for i in text_encode]
for index, each_entity in enumerate(decode):
if each_entity != "other":
if flag != each_entity:
cur_text = decode_text[index]
if each_entity in res.keys():
res[each_entity].append(cur_text)
else :
res[each_entity] = [cur_text]
flag = each_entity
elif flag == each_entity:
res[each_entity][-1] += decode_text[index]
else :
flag = 0
return res
```
#### File: webs/utils/utils.py
```python
from bert_seq2seq import load_bert, load_gpt
import torch
import redis
class RedisCache:
def __init__(self, host, port, decode_responses=True):
self.pool = redis.ConnectionPool(host=host, port=port, decode_responses=decode_responses) # redis 连接池
print("构建redis连接池成功")
def get_cache(self, text, prefix):
r = redis.Redis(connection_pool=self.pool)
cache_content = r.get(text + "##" + prefix)
print(f"获取到缓存数据{cache_content}")
if cache_content is None :
return ""
return cache_content
def set_cache(self, text, prefix, res):
r = redis.Redis(connection_pool=self.pool)
k = text + "##" + prefix
r.set(k, res) # # key是"food" value是"mutton" 将键值对存入redis缓存
print(f"set数据k:{k}, v: {res}")
def keys(self):
r = redis.Redis(connection_pool=self.pool)
return r.keys()
def load_model(word2idx, model_path, model_name=None, model_class=None,
target_size=None, is_gpt=False, is_all_params=True, device=torch.device("cpu")):
if is_gpt:
model = load_gpt(word2idx)
model.eval()
model.set_device(device)
if is_all_params:
model.load_all_params(model_path, device=device)
else :
model.load_pretrain_params(model_path)
return model
if target_size is not None:
model = load_bert(word2idx, model_name=model_name, model_class=model_class, target_size=target_size)
model.set_device(device)
model.eval()
if is_all_params:
model.load_all_params(model_path, device=device)
else :
model.load_pretrain_params(model_path)
else :
model = load_bert(word2idx, model_name=model_name, model_class=model_class)
model.set_device(device)
model.eval()
if is_all_params:
model.load_all_params(model_path, device=device)
else :
model.load_pretrain_params(model_path)
return model
``` |
{
"source": "920671233/SpiderItems",
"score": 3
} |
#### File: SpiderItems/scripts/jandan_crawler.py
```python
import re
import urllib.request
from urllib.error import URLError
def crawl():
html = urllib.request.urlopen('http://jandan.net/page/1').read()
html = str(html)
pattern1 = '<div id="content">.*<div class="post f" style="padding-left:210px;">'
result1 = re.compile(pattern1).findall(html)
result1 = result1[0]
pattern2 = '<img src="//(.+?\.jpg)!custom" width="175" height="98" />'
imglist = re.compile(pattern2).findall(result1)
# 添加请求头信息
opener = urllib.request.build_opener()
opener.addheaders = [("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36")]
urllib.request.install_opener(opener)
x = 1
for imgurl in imglist:
imgname = "E:/img/" + str(x) + ".jpg"
imgurl = "http://" + imgurl
try:
urllib.request.urlretrieve(imgurl, imgname, )
except URLError as e:
print(e)
finally:
x += 1
if __name__ == '__main__':
crawl()
``` |
{
"source": "920911love/DINK",
"score": 3
} |
#### File: tf_server/src/tf_server_E.py
```python
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from detector import Detector
import utils
from tf_server.srv import TF_detect
class PeopleObjectDetectionNode(object):
"""docstring for PeopleObjectDetectionNode."""
def __init__(self):
super(PeopleObjectDetectionNode, self).__init__()
# init the node
rospy.init_node('tf_server', anonymous=False)
# Get the parameters
(model_name, num_of_classes, label_file, camera_namespace, video_name,
num_workers) \
= self.get_parameters()
# Create Detector
self._detector = Detector(model_name, num_of_classes, label_file,
num_workers)
self._bridge = CvBridge()
# Advertise the result of Object Detector
# self.pub_detections_data = rospy.Publisher('/detections', DetectedObjectArray, queue_size=1)
# self.pub_detections_data = rospy.Publisher('/detections', DetectionArray, queue_size=1)
# self.pub_detections_data = rospy.Publisher('/detections', DetectionArray, queue_size=1)
# Advertise the result of Object Detector
# self.pub_detections_image = rospy.Publisher( '/result_ripe', Image, queue_size=1)
# self.pub_detections_image = rospy.Publisher( '/object_detection/detections_image', Image, queue_size=1)
SS = rospy.Service('tf_detect_request', TF_detect, self.process_tf_detect)
# self.sub_rgb = rospy.Subscriber('usb_cam/image_raw',Image, self.rgb_callback, queue_size=1, buff_size=2**24)
# spin
rospy.spin()
def get_parameters(self):
"""
Gets the necessary parameters from parameter server
Args:
Returns:
(tuple) (model name, num_of_classes, label_file)
"""
model_name = rospy.get_param("~model_name")
num_of_classes = rospy.get_param("~num_of_classes")
label_file = rospy.get_param("~label_file")
camera_namespace = 'usb_cam/image_raw'
# camera_namespace = rospy.get_param("~camera_namespace")
video_name = rospy.get_param("~video_name")
num_workers = rospy.get_param("~num_workers")
return (model_name, num_of_classes, label_file, \
camera_namespace, video_name, num_workers)
def process_tf_detect(self, req):
print("==========================================")
cv_image = self._bridge.imgmsg_to_cv2(req.image_input, "bgr8")
# Detect
(output_dict, category_index) = \
self._detector.detect(cv_image)
msg,KK=utils.create_detection_aw_msg(req.image_input,output_dict, category_index, self._bridge)
return KK
# return 1
def rgb_callback(self, data):
"""
Callback for RGB images
"""
try:
# .publish(self._cv_bridge.cv2_to_imgmsg(image_np, "bgr8"))
# Convert image to numpy array
cv_image = self._bridge.imgmsg_to_cv2(data, "bgr8")
# Detect
(output_dict, category_index) = \
self._detector.detect(cv_image)
# Create the message
msg= \
utils.create_detection_aw_msg(\
data, output_dict, category_index, self._bridge)
# Draw bounding boxes
image_np = \
self._detector.visualize(cv_image, output_dict)
# Publish the messages
self.pub_detections_data.publish(msg)
self.pub_detections_image.publish(self._bridge.cv2_to_imgmsg(image_np, "bgr8"))
except CvBridgeError as e:
print(e)
def main():
""" main function
"""
node = PeopleObjectDetectionNode()
if __name__ == '__main__':
main()
``` |
{
"source": "921957877/meiduo_mall",
"score": 2
} |
#### File: meiduo_admin/views/sku_views.py
```python
from rest_framework.generics import ListAPIView, DestroyAPIView, CreateAPIView, RetrieveAPIView
from rest_framework.viewsets import ModelViewSet
from goods.models import SKU, GoodsCategory, Goods, GoodsSpecification
from meiduo_admin.pages import MyPage
from meiduo_admin.serializers.sku_serializers import SKUSerializer, GoodsCategorySerializer, SPUSerializer, \
SPUSpecSerializer
# class SKUView(ListAPIView, DestroyAPIView, CreateAPIView, RetrieveAPIView):
class SKUView(ModelViewSet):
queryset = SKU.objects.all()
serializer_class = SKUSerializer
pagination_class = MyPage
# 重写get_queryset方法,根据前端是否传递keyword值返回不同查询结果
def get_queryset(self):
keyword = self.request.query_params.get('keyword')
if keyword:
return self.queryset.filter(name__contains=keyword)
return self.queryset.all()
class GoodsCategoryView(ListAPIView):
# parent_id大于37为三级分类信息
queryset = GoodsCategory.objects.filter(parent_id__gt=37)
serializer_class = GoodsCategorySerializer
class SPUView(ListAPIView):
queryset = Goods.objects.all()
serializer_class = SPUSerializer
class SPUSpecView(ListAPIView):
serializer_class = SPUSpecSerializer
# 已知SPU的id(主表),查询SPU的规格(从表)
def get_queryset(self):
goods_id = self.kwargs.get('pk')
return GoodsSpecification.objects.filter(goods_id=goods_id)
```
#### File: apps/oauth/utils.py
```python
from django.conf import settings
from itsdangerous import TimedJSONWebSignatureSerializer, BadData
def generate_access_token(openid):
"""将openid加密为access_token"""
# 创建对象,第一个参数秘钥,第二个参数有效期(秒)
serializer = TimedJSONWebSignatureSerializer(settings.SECRET_KEY, expires_in=300)
data = {'openid': openid}
# 调用对象的dumps方法,传入字典类型,进行加密,返回bytes类型
access_token = serializer.dumps(data)
# 解码后返回
return access_token.decode()
def check_access_token(access_token):
"""将access_token解密为openid"""
# 创建对象,第一个参数秘钥,第二个参数有效期(秒)
serializer = TimedJSONWebSignatureSerializer(settings.SECRET_KEY, expires_in=300)
try:
# 调用对象的loads方法,传入access_token,进行解密,返回字典类型
data = serializer.loads(access_token)
# 如解密出错,说明access_token遭到破坏
except BadData:
return None
# 如解密成功,则提取openid并返回
else:
openid = data.get('openid')
return openid
``` |
{
"source": "921974496/AD-DL",
"score": 2
} |
#### File: clinicadl/patch_level/utils.py
```python
import torch
import pandas as pd
import numpy as np
import os
from torch.utils.data import Dataset
from time import time
__author__ = "<NAME>"
__copyright__ = "Copyright 2018 The Aramis Lab Team"
__credits__ = ["<NAME>"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
#################################
# AutoEncoder train / test
#################################
def stacked_ae_learning(model, train_loader, valid_loader, criterion, writer_train, writer_valid, options, fi):
"""
This aims to train the stacked AEs together for autoencoder
:param model:
:param train_loader:
:param valid_loader:
:param criterion:
:param writer_train:
:param writer_valid:
:param options:
:return:
Return both the pretrained CNN for future use and also the stacked AEs
"""
from os import path
from ..tools.deep_learning.models import AutoEncoder
from ..tools.deep_learning import save_checkpoint, load_model
from copy import deepcopy
# if the model defined is not already constructed to an AE, then we convert the CNN into an AE
ae = AutoEncoder(model)
ae_finetuning(ae, train_loader, valid_loader, criterion, writer_train, writer_valid, options, fi)
# Updating and setting weights of the convolutional layers
checkpoint_dir = path.join(options.output_dir, 'best_model_dir', "fold_" + str(fi), 'ConvAutoencoder',
'AutoEncoder', 'best_loss')
best_autodecoder, best_epoch = load_model(ae, checkpoint_dir, options.gpu, filename='model_best.pth.tar')
del ae
# save the encoder part of the AEs, the best AEs has been saved in the ae_finetuning part
model.features = deepcopy(best_autodecoder.encoder)
save_checkpoint({'model': model.state_dict(),
'epoch': best_epoch},
False, False,
os.path.join(options.output_dir, 'best_model_dir', "fold_" + str(fi), 'ConvAutoencoder', 'Encoder'),
filename='model_best_encoder.pth.tar')
del best_epoch
return model, best_autodecoder
def ae_finetuning(auto_encoder_all, train_loader, valid_loader, criterion, writer_train_ft, writer_valid_ft, options,
fi, global_step=0):
"""
After training the AEs in a layer-wise way, we fine-tune the whole AEs
:param auto_encoder:
:param train_loader:
:param valid_loader:
:param criterion:
:param gpu:
:param results_path:
:param options:
:return:
"""
from ..tools.deep_learning import save_checkpoint
auto_encoder_all.train()
optimizer = eval("torch.optim." + options.optimizer)(filter(lambda x: x.requires_grad, auto_encoder_all.parameters()),
options.learning_rate)
if options.gpu:
auto_encoder_all.cuda()
# Initialize variables
best_loss_valid = np.inf
print("Beginning fine-tuning")
tend = time()
total_time = 0
for epoch in range(options.epochs):
print("Fine-tuning at %d-th epoch." % epoch)
auto_encoder_all.zero_grad()
for i, data in enumerate(train_loader):
t0 = time()
total_time = total_time + t0 - tend
if options.gpu:
imgs = data['image'].cuda()
else:
imgs = data['image']
train_output = auto_encoder_all(imgs)
loss = criterion(train_output, imgs)
loss.backward()
# monitor the training loss for each batch using tensorboardX
writer_train_ft.add_scalar('loss', loss, i + epoch * len(train_loader))
# update the global steps
global_step = i + epoch * len(train_loader)
del imgs, train_output, loss
optimizer.step()
optimizer.zero_grad()
torch.cuda.empty_cache()
tend = time()
print('Mean time per batch (train):', total_time / len(train_loader))
# Always test the results and save them once at the end of the epoch
loss_valid = test_ae(auto_encoder_all, valid_loader, options.gpu, criterion)
mean_loss_valid = loss_valid / (len(valid_loader))
writer_valid_ft.add_scalar('loss', mean_loss_valid, global_step)
print("Mean validation loss is %f for the -th batch %d" % (mean_loss_valid, global_step))
# reset the model to train mode after evaluation
auto_encoder_all.train()
is_best_loss = loss_valid < best_loss_valid
# Save best based on smallest loss
best_loss_valid = min(loss_valid, best_loss_valid)
save_checkpoint({'model': auto_encoder_all.state_dict(),
'iteration': i,
'epoch': epoch,
'best_loss': best_loss_valid},
False, is_best_loss,
os.path.join(options.output_dir, "best_model_dir", "fold_" + str(fi), "ConvAutoencoder",
"AutoEncoder"))
del optimizer, auto_encoder_all
def test_ae(model, dataloader, gpu, criterion):
"""
Computes the loss of the model, either the loss of the layer-wise AE or all the AEs in a big graph one time.
:param model: the network (subclass of nn.Module)
:param dataloader: a DataLoader wrapping a dataset
:param gpu: (bool) if True a gpu is used
:param criterion:
:return: loss of the model (float)
"""
model.eval()
total_loss = 0
for i, data in enumerate(dataloader, 0):
if gpu:
inputs = data['image'].cuda()
else:
inputs = data['image']
hidden = inputs
outputs = model(hidden)
hidden_requires_grad_no = hidden.detach()
hidden_requires_grad_no.requires_grad = False
loss = criterion(outputs, hidden_requires_grad_no)
total_loss += loss.item()
torch.cuda.empty_cache()
del inputs, outputs, loss
return total_loss
def visualize_ae(ae, data, results_path):
"""
To reconstruct one example batch and save it in nifti format for visualization
:param ae:
:param data: tensor, shape [1, 1, height, width, length]
:param results_path:
:return:
"""
import nibabel as nib
import os
if not os.path.exists(results_path):
os.makedirs(results_path)
# set the model to be eval
ae.eval()
output = ae(data)
reconstructed_nii = nib.Nifti1Image(output[0][0].cpu().detach().numpy(), np.eye(4))
input_nii = nib.Nifti1Image(data[0][0].cpu().detach().numpy(), np.eye(4))
nib.save(reconstructed_nii, os.path.join(results_path, 'example_patch_reconstructed.nii.gz'))
nib.save(input_nii, os.path.join(results_path, 'example_patch_original.nii.gz'))
#################################
# Transfer learning
#################################
def load_model_after_ae(model, checkpoint_dir, filename='checkpoint.pth.tar'):
"""
Load and copy the weights and biases of a previously trained Encoder part of an autoencoder.
:param model: (nn.Module) the object in which the weights and biases are copied.
:param checkpoint_dir: (str) path to the directory in which the pretrained Autoencoder is saved.
:param filename: (str) name of the file in which the pretrained Autoencoder is saved.
:return:
- model_updated (nn.Module) model initialized with the pretrained CNN
- best_epoch (int) the number of the epoch at which the pretrained CNN corresponds
"""
from copy import deepcopy
model_after_ae = deepcopy(model)
model_dict = model_after_ae.state_dict()
param_dict = torch.load(os.path.join(checkpoint_dir, filename))
ae_pretrained_dict = param_dict['model']
ae_pretrained_dict_copy = deepcopy(ae_pretrained_dict)
# remove the classifier's weight, only take the convolutional part.
for k in ae_pretrained_dict.keys():
if 'classifier' not in k:
pass
else:
del ae_pretrained_dict_copy[k]
model_dict.update(ae_pretrained_dict_copy)
model_after_ae.load_state_dict(model_dict)
return model_after_ae, param_dict['epoch']
def load_model_after_cnn(model, checkpoint_dir, filename='checkpoint.pth.tar'):
"""
Load and copy the weights and biases of a previously trained CNN.
:param model: (nn.Module) the object in which the weights and biases are copied.
:param checkpoint_dir: (str) path to the directory in which the pretrained CNN is saved.
:param filename: (str) name of the file in which the pretrained CNN is saved.
:return:
- model_updated (nn.Module) model initialized with the pretrained CNN
- best_epoch (int) the number of the epoch at which the pretrained CNN corresponds
"""
from copy import deepcopy
model.eval()
model_updated = deepcopy(model)
param_dict = torch.load(os.path.join(checkpoint_dir, filename))
model_updated.load_state_dict(param_dict['model'])
return model_updated, param_dict['epoch']
#################################
# CNN train / test
#################################
def train(model, data_loader, use_cuda, loss_func, optimizer, writer, epoch, model_mode="train",
selection_threshold=None):
"""
This is the function to train, validate or test the model, depending on the model_mode parameter.
:param model:
:param data_loader:
:param use_cuda:
:param loss_func:
:param optimizer:
:param writer:
:param epoch:
:return:
"""
global_step = None
softmax = torch.nn.Softmax(dim=1)
if model_mode == "train":
columns = ['participant_id', 'session_id', 'patch_id', 'true_label', 'predicted_label', 'proba0', 'proba1']
results_batch_df = pd.DataFrame(columns=columns)
total_loss = 0.0
model.train() # set the model to training mode
for i, data in enumerate(data_loader):
# update the global steps
global_step = i + epoch * len(data_loader)
if use_cuda:
imgs, labels = data['image'].cuda(), data['label'].cuda()
else:
imgs, labels = data['image'], data['label']
gound_truth_list = labels.data.cpu().numpy().tolist()
output = model(imgs)
normalized_output = softmax(output)
_, predicted = torch.max(output.data, 1)
predict_list = predicted.data.cpu().numpy().tolist()
batch_loss = loss_func(output, labels)
total_loss += batch_loss.item()
# calculate the batch balanced accuracy and loss
batch_metrics = evaluate_prediction(gound_truth_list, predict_list)
batch_accuracy = batch_metrics['balanced_accuracy']
writer.add_scalar('classification accuracy', batch_accuracy, global_step)
writer.add_scalar('loss', batch_loss.item(), global_step)
optimizer.zero_grad()
batch_loss.backward()
optimizer.step()
# Generate detailed DataFrame
for idx, sub in enumerate(data['participant_id']):
row = [sub, data['session_id'][idx], data['patch_id'][idx].item(),
labels[idx].item(), predicted[idx].item(),
normalized_output[idx, 0].item(), normalized_output[idx, 1].item()]
row_df = pd.DataFrame(np.array(row).reshape(1, -1), columns=columns)
results_batch_df = pd.concat([results_batch_df, row_df])
# delete the temporary variables taking the GPU memory
del imgs, labels, output, predicted, batch_loss, batch_accuracy
torch.cuda.empty_cache()
results_batch_df.reset_index(inplace=True, drop=True)
epoch_metrics = evaluate_prediction(results_batch_df.true_label.values.astype(int),
results_batch_df.predicted_label.values.astype(int))
accuracy_batch_mean = epoch_metrics['balanced_accuracy']
loss_batch_mean = total_loss / len(data_loader)
torch.cuda.empty_cache()
elif model_mode == "valid":
results_batch_df, metrics_batch = test(model, data_loader, use_cuda, loss_func)
# calculate the balanced accuracy
_, metrics_subject = soft_voting(results_batch_df, results_batch_df, selection_threshold=selection_threshold)
accuracy_batch_mean = metrics_subject['balanced_accuracy']
total_loss = metrics_batch['total_loss']
loss_batch_mean = total_loss / len(data_loader)
writer.add_scalar('classification accuracy', accuracy_batch_mean, epoch)
writer.add_scalar('loss', loss_batch_mean, epoch)
torch.cuda.empty_cache()
else:
raise ValueError('This mode %s was not implemented. Please choose between train and valid' % model_mode)
return results_batch_df, accuracy_batch_mean, loss_batch_mean, global_step
def test(model, dataloader, use_cuda, criterion):
"""
Computes the balanced accuracy of the model
:param model: the network (subclass of nn.Module)
:param dataloader: a DataLoader wrapping a dataset
:param use_cuda: if True a gpu is used
:param criterion: (loss) function to calculate the loss
:return:
(DataFrame) results of each session
(dict) ensemble of metrics + total loss
"""
softmax = torch.nn.Softmax(dim=1)
columns = ['participant_id', 'session_id', 'patch_id', 'true_label', 'predicted_label', 'proba0', 'proba1']
results_df = pd.DataFrame(columns=columns)
total_loss = 0
if use_cuda:
model.cuda()
model.eval() # set the model to evaluation mode
torch.cuda.empty_cache()
with torch.no_grad():
for i, data in enumerate(dataloader):
if use_cuda:
imgs, labels = data['image'].cuda(), data['label'].cuda()
else:
imgs, labels = data['image'], data['label']
output = model(imgs)
normalized_output = softmax(output)
loss = criterion(output, labels)
total_loss += loss.item()
_, predicted = torch.max(output.data, 1)
# Generate detailed DataFrame
for idx, sub in enumerate(data['participant_id']):
row = [sub, data['session_id'][idx], data['patch_id'][idx].item(),
labels[idx].item(), predicted[idx].item(),
normalized_output[idx, 0].item(), normalized_output[idx, 1].item()]
row_df = pd.DataFrame(np.array(row).reshape(1, -1), columns=columns)
results_df = pd.concat([results_df, row_df])
del imgs, labels, output
torch.cuda.empty_cache()
# calculate the balanced accuracy
results = evaluate_prediction(results_df.true_label.values.astype(int),
results_df.predicted_label.values.astype(int))
results_df.reset_index(inplace=True, drop=True)
results['total_loss'] = total_loss
torch.cuda.empty_cache()
return results_df, results
def evaluate_prediction(y, y_hat):
"""
This is a function to calculate the different metrics based on the list of true label and predicted label
:param y: list of labels
:param y_hat: list of predictions
:return: (dict) ensemble of metrics
"""
true_positive = 0.0
true_negative = 0.0
false_positive = 0.0
false_negative = 0.0
tp = []
tn = []
fp = []
fn = []
for i in range(len(y)):
if y[i] == 1:
if y_hat[i] == 1:
true_positive += 1
tp.append(i)
else:
false_negative += 1
fn.append(i)
else: # -1
if y_hat[i] == 0:
true_negative += 1
tn.append(i)
else:
false_positive += 1
fp.append(i)
accuracy = (true_positive + true_negative) / (true_positive + true_negative + false_positive + false_negative)
if (true_positive + false_negative) != 0:
sensitivity = true_positive / (true_positive + false_negative)
else:
sensitivity = 0.0
if (false_positive + true_negative) != 0:
specificity = true_negative / (false_positive + true_negative)
else:
specificity = 0.0
if (true_positive + false_positive) != 0:
ppv = true_positive / (true_positive + false_positive)
else:
ppv = 0.0
if (true_negative + false_negative) != 0:
npv = true_negative / (true_negative + false_negative)
else:
npv = 0.0
balanced_accuracy = (sensitivity + specificity) / 2
results = {'accuracy': accuracy,
'balanced_accuracy': balanced_accuracy,
'sensitivity': sensitivity,
'specificity': specificity,
'ppv': ppv,
'npv': npv,
'confusion_matrix': {'tp': len(tp), 'tn': len(tn), 'fp': len(fp), 'fn': len(fn)}
}
return results
#################################
# Voting systems
#################################
def patch_level_to_tsvs(output_dir, results_df, results, fold, selection, dataset='train', cnn_index=None):
"""
Save the outputs of the test function to tsv files.
:param output_dir: (str) path to the output directory.
:param results_df: (DataFrame) the individual results per patch.
:param results: (dict) the performances obtained on a series of metrics.
:param fold: (int) the fold for which the performances were obtained.
:param selection: (str) the metrics on which the model was selected (best_acc, best_loss)
:param dataset: (str) the dataset on which the evaluation was performed.
:param cnn_index: (int) provide the cnn_index only for a multi-cnn framework.
:return:
"""
if cnn_index is None:
performance_dir = os.path.join(output_dir, 'performances', 'fold_' + str(fold), selection)
else:
performance_dir = os.path.join(output_dir, 'performances', 'fold_' + str(fold), 'cnn-' + str(cnn_index),
selection)
if not os.path.exists(performance_dir):
os.makedirs(performance_dir)
results_df.to_csv(os.path.join(performance_dir, dataset + '_patch_level_result-patch_index.tsv'), index=False,
sep='\t')
del results['confusion_matrix']
pd.DataFrame(results, index=[0]).to_csv(os.path.join(performance_dir, dataset + '_patch_level_metrics.tsv'),
index=False, sep='\t')
def retrieve_patch_level_results(output_dir, fold, selection, dataset, num_cnn):
"""Retrieve performance_df for single or multi-CNN framework."""
if num_cnn is None:
result_tsv = os.path.join(output_dir, 'performances', 'fold_%i' % fold, selection,
dataset + '_patch_level_result-patch_index.tsv')
performance_df = pd.read_csv(result_tsv, sep='\t')
else:
performance_df = pd.DataFrame()
for cnn in range(num_cnn):
tsv_path = os.path.join(output_dir, 'performances', 'fold_%i' % fold, 'cnn-%i' % cnn, selection,
dataset + '_patch_level_result-patch_index.tsv')
cnn_df = pd.read_csv(tsv_path, sep='\t')
performance_df = pd.concat([performance_df, cnn_df])
performance_df.reset_index(drop=True, inplace=True)
return performance_df
def soft_voting_to_tsvs(output_dir, fold, selection, dataset='test', num_cnn=None, selection_threshold=None):
"""
Save soft voting results to tsv files.
:param output_dir: (str) path to the output directory.
:param fold: (int) Fold number of the cross-validation.
:param selection: (str) criterion on which the model is selected (either best_loss or best_acc)
:param dataset: (str) name of the dataset for which the soft-voting is performed. If different from training or
validation, the weights of soft voting will be computed on validation accuracies.
:param num_cnn: (int) if given load the patch level results of a multi-CNN framework.
:param selection_threshold: (float) all patches for which the classification accuracy is below the
threshold is removed.
"""
# Choose which dataset is used to compute the weights of soft voting.
if dataset in ['train', 'validation']:
validation_dataset = dataset
else:
validation_dataset = 'validation'
test_df = retrieve_patch_level_results(output_dir, fold, selection, dataset, num_cnn)
validation_df = retrieve_patch_level_results(output_dir, fold, selection, validation_dataset, num_cnn)
performance_path = os.path.join(output_dir, 'performances', 'fold_%i' % fold, selection)
if not os.path.exists(performance_path):
os.makedirs(performance_path)
df_final, metrics = soft_voting(test_df, validation_df, selection_threshold=selection_threshold)
df_final.to_csv(os.path.join(os.path.join(performance_path, dataset + '_subject_level_result_soft_vote.tsv')),
index=False, sep='\t')
pd.DataFrame(metrics, index=[0]).to_csv(os.path.join(output_dir, 'performances', 'fold_%i' % fold, selection,
dataset + '_subject_level_metrics_soft_vote.tsv'),
index=False, sep='\t')
def soft_voting(performance_df, validation_df, selection_threshold=None):
"""
Computes soft voting based on the probabilities in performance_df. Weights are computed based on the accuracies
of validation_df.
ref: <NAME>. Python Machine Learning., 2015
:param performance_df: (DataFrame) results on patch level of the set on which the combination is made.
:param validation_df: (DataFrame) results on patch level of the set used to compute the weights.
:param selection_threshold: (float) if given, all patches for which the classification accuracy is below the
threshold is removed.
:return:
- df_final (DataFrame) the results on the subject level
- results (dict) the metrics on the subject level
"""
# Compute the patch accuracies on the validation set:
right_classified_df = validation_df[validation_df['true_label'] == validation_df['predicted_label']]
n_valid = len(validation_df.groupby(['participant_id', 'session_id']).nunique())
patch_accuracies = right_classified_df['patch_id'].value_counts() / n_valid
if selection_threshold is not None:
patch_accuracies[patch_accuracies < selection_threshold] = 0
weight_series = patch_accuracies / patch_accuracies.sum()
# Add the weights to performance_df
for idx in performance_df.index.values:
patch_id = performance_df.loc[idx, 'patch_id']
weight = weight_series.loc[patch_id]
performance_df.loc[idx, 'weight'] = weight
# do soft majority vote
columns = ['participant_id', 'session_id', 'true_label', 'predicted_label']
df_final = pd.DataFrame(columns=columns)
for subject_session, subject_df in performance_df.groupby(['participant_id', 'session_id']):
subject, session = subject_session
num_patch = len(subject_df.predicted_label)
p0_all = 0
p1_all = 0
# reindex the subject_df.probability
proba0_series_reindex = subject_df.proba0.reset_index()
proba1_series_reindex = subject_df.proba1.reset_index()
weight_series_reindex = subject_df.weight.reset_index()
y_series_reindex = subject_df.true_label.reset_index()
y = y_series_reindex.true_label[0]
for i in range(num_patch):
p0 = weight_series_reindex.weight[i] * float(proba0_series_reindex.proba0[i])
p1 = weight_series_reindex.weight[i] * float(proba1_series_reindex.proba1[i])
p0_all += p0
p1_all += p1
proba_list = [p0_all, p1_all]
y_hat = proba_list.index(max(proba_list))
row_array = np.array(list([subject, session, y, y_hat])).reshape(1, 4)
row_df = pd.DataFrame(row_array, columns=columns)
df_final = df_final.append(row_df)
results = evaluate_prediction(df_final.true_label.values.astype(int),
df_final.predicted_label.values.astype(int))
del results['confusion_matrix']
return df_final, results
#################################
# Datasets
#################################
class MRIDataset_patch(Dataset):
def __init__(self, caps_directory, data_file, patch_size, stride_size, transformations=None, prepare_dl=False,
patch_index=None):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string): File name of the train/test split file.
transformations (callable, optional): Optional transformations to be applied on a sample.
"""
self.caps_directory = caps_directory
self.transformations = transformations
self.diagnosis_code = {'CN': 0, 'AD': 1, 'sMCI': 0, 'pMCI': 1, 'MCI': 1}
self.patch_size = patch_size
self.stride_size = stride_size
self.prepare_dl = prepare_dl
self.patch_index = patch_index
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = pd.read_csv(data_file, sep='\t')
elif isinstance(data_file, pd.DataFrame):
self.df = data_file
else:
raise Exception('The argument datafile is not of correct type.')
if ('diagnosis' not in list(self.df.columns.values)) or ('session_id' not in list(self.df.columns.values)) or \
('participant_id' not in list(self.df.columns.values)):
raise Exception("the data file is not in the correct format."
"Columns should include ['participant_id', 'session_id', 'diagnosis']")
self.patchs_per_patient = self.num_patches_per_session()
def __len__(self):
return len(self.df) * self.patchs_per_patient
def __getitem__(self, idx):
sub_idx = idx // self.patchs_per_patient
img_name = self.df.loc[sub_idx, 'participant_id']
sess_name = self.df.loc[sub_idx, 'session_id']
img_label = self.df.loc[sub_idx, 'diagnosis']
label = self.diagnosis_code[img_label]
if self.patch_index is None:
patch_idx = idx % self.patchs_per_patient
else:
patch_idx = self.patch_index
if self.prepare_dl:
patch_path = os.path.join(self.caps_directory, 'subjects', img_name, sess_name, 't1', 'preprocessing_dl',
img_name + '_' + sess_name + '_space-MNI_res-1x1x1_patchsize-' + str(self.patch_size)
+ '_stride-' + str(self.stride_size) + '_patch-' + str(patch_idx) + '.pt')
patch = torch.load(patch_path)
else:
image_path = os.path.join(self.caps_directory, 'subjects', img_name, sess_name, 't1', 'preprocessing_dl',
img_name + '_' + sess_name + '_space-MNI_res-1x1x1.pt')
image = torch.load(image_path)
patch = extract_patch_from_mri(image, patch_idx, self.patch_size, self.stride_size)
# check if the patch has NaN value
if torch.isnan(patch).any():
print("Double check, this patch has NaN value: %s" % str(img_name + '_' + sess_name + str(patch_idx)))
patch[torch.isnan(patch)] = 0
if self.transformations:
patch = self.transformations(patch)
sample = {'image_id': img_name + '_' + sess_name + '_patch' + str(patch_idx), 'image': patch, 'label': label,
'participant_id': img_name, 'session_id': sess_name, 'patch_id': patch_idx}
return sample
def num_patches_per_session(self):
if self.patch_index is not None:
return 1
img_name = self.df.loc[0, 'participant_id']
sess_name = self.df.loc[0, 'session_id']
image_path = os.path.join(self.caps_directory, 'subjects', img_name, sess_name, 't1', 'preprocessing_dl',
img_name + '_' + sess_name + '_space-MNI_res-1x1x1.pt')
image = torch.load(image_path)
patches_tensor = image.unfold(1, self.patch_size, self.stride_size
).unfold(2, self.patch_size, self.stride_size
).unfold(3, self.patch_size, self.stride_size).contiguous()
patches_tensor = patches_tensor.view(-1, self.patch_size, self.patch_size, self.patch_size)
num_patches = patches_tensor.shape[0]
return num_patches
class MRIDataset_patch_hippocampus(Dataset):
def __init__(self, caps_directory, data_file, transformations=None):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string): File name of the train/test split file.
transformations (callable, optional): Optional transformations to be applied on a sample.
"""
self.caps_directory = caps_directory
self.transformations = transformations
self.diagnosis_code = {'CN': 0, 'AD': 1, 'sMCI': 0, 'pMCI': 1, 'MCI': 1}
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = pd.read_csv(data_file, sep='\t')
elif isinstance(data_file, pd.DataFrame):
self.df = data_file
else:
raise Exception('The argument datafile is not of correct type.')
if ('diagnosis' not in list(self.df.columns.values)) or ('session_id' not in list(self.df.columns.values)) or \
('participant_id' not in list(self.df.columns.values)):
raise Exception("the data file is not in the correct format."
"Columns should include ['participant_id', 'session_id', 'diagnosis']")
self.patchs_per_patient = 2
def __len__(self):
return len(self.df) * self.patchs_per_patient
def __getitem__(self, idx):
sub_idx = idx // self.patchs_per_patient
img_name = self.df.loc[sub_idx, 'participant_id']
sess_name = self.df.loc[sub_idx, 'session_id']
img_label = self.df.loc[sub_idx, 'diagnosis']
label = self.diagnosis_code[img_label]
# 1 is left hippocampus, 0 is right
left_is_odd = idx % self.patchs_per_patient
if left_is_odd == 1:
patch_path = os.path.join(self.caps_directory, 'subjects', img_name, sess_name, 't1',
'preprocessing_dl',
img_name + '_' + sess_name + '_space-MNI_res-1x1x1_hippocampus_hemi-left.pt')
else:
patch_path = os.path.join(self.caps_directory, 'subjects', img_name, sess_name, 't1',
'preprocessing_dl',
img_name + '_' + sess_name + '_space-MNI_res-1x1x1_hippocampus_hemi-right.pt')
patch = torch.load(patch_path)
# check if the patch has NaN value
if torch.isnan(patch).any():
print("Double check, this patch has NaN value: %s" % str(img_name + '_' + sess_name + str(left_is_odd)))
patch[torch.isnan(patch)] = 0
if self.transformations:
patch = self.transformations(patch)
sample = {'image_id': img_name + '_' + sess_name + '_patch' + str(left_is_odd), 'image': patch, 'label': label,
'participant_id': img_name, 'session_id': sess_name, 'patch_id': left_is_odd}
return sample
def extract_patch_from_mri(image_tensor, index_patch, patch_size, stride_size):
# use classifiers tensor.upfold to crop the patch.
patches_tensor = image_tensor.unfold(1, patch_size, stride_size
).unfold(2, patch_size, stride_size
).unfold(3, patch_size, stride_size).contiguous()
patches_tensor = patches_tensor.view(-1, patch_size, patch_size, patch_size)
extracted_patch = patches_tensor[index_patch, ...].unsqueeze_(0).clone()
return extracted_patch
```
#### File: preprocessing/model/squezenet_qc.py
```python
import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.init as init
from torchvision import models
from torch.nn.parameter import Parameter
# based on https://github.com/pytorch/vision/blob/master/torchvision/models/squeezenet.py
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat([
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x))
], 1)
class SqueezeNetQC(nn.Module):
def __init__(self, version=1.0, num_classes=2, use_ref=False):
super(SqueezeNetQC, self).__init__()
self.use_ref = use_ref
self.feat = 3
if version not in [1.0, 1.1]:
raise ValueError("Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version))
self.num_classes = num_classes
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(2 if use_ref else 1, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(2 if use_ref else 1, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
# Final convolution is initialized differently form the rest
final_conv = nn.Conv2d(512*self.feat, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
final_conv,
nn.ReLU(inplace=True),
nn.AvgPool2d(13, stride=1)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal(m.weight.data, mean=0.0, std=0.01)
else:
init.kaiming_uniform(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
# split feats into batches, so each view is passed separately
x = x.view(-1, 2 if self.use_ref else 1 ,224,224)
x = self.features(x)
# reshape input to take into account 3 views
x = x.view(-1, 512*self.feat,13,13)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
def load_from_std(self, std_model):
# import weights from the standard ResNet model
# TODO: finish
# first load all standard items
own_state = self.state_dict()
for name, param in std_model.state_dict().items():
if name == 'features.0.weight':
if isinstance(param, Parameter):
param = param.data
# convert to mono weight
# collaps parameters along second dimension, emulating grayscale feature
mono_param=param.sum( 1, keepdim=True )
if self.use_ref:
own_state[name].copy_( torch.cat((mono_param,mono_param),1) )
else:
own_state[name].copy_( mono_param )
pass
elif name == 'classifier.1.weight' or name == 'classifier.1.bias':
# don't use at all
pass
elif name in own_state:
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
def squeezenet_qc(pretrained=False, **kwargs):
"""Constructs a SqueezeNet 1.1 model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = SqueezeNetQC(version=1.1, **kwargs)
if pretrained:
# load basic Resnet model
model_ft = models.squeezenet1_1(pretrained=True)
model.load_from_std(model_ft)
return model
```
#### File: clinicadl/preprocessing/T1_postprocessing.py
```python
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2018-2020 The Aramis Lab Team"
__credits__ = ["<NAME>"]
__license__ = "See LICENSE.txt file"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def postprocessing_t1w(caps_directory,
tsv,
patch_size,
stride_size,
working_directory=None,
extract_method='slice',
slice_direction=0,
slice_mode='original'):
"""
This is a postprocessing pipeline to prepare the slice-level and
patch-level data from the entire MRI and save them on disk. This
facilitate the training process:
- For slice-level CNN, all slices were extracted from the entire
MRI from three different axis. The first and last 15 slice were
discarded due to the lack of information.
- For patch-level CNN, the 3D patch (with specific patch size)
were extracted by a 3D window.
Parameters
----------
caps_directory: str
CAPS directory where stores the output of preprocessing.
tsv: str
TVS file with the subject list (participant_id and session_id).
patch_size: int
Size for extracted 3D patches.
stride_size: int
Sliding size window of the slice feature.
working_directory: str
Folder containing a temporary space to save intermediate results.
Returns
-------
wf: class nipype.pipeline.engine.workflows.Workflow
A class du type nypipe workflow to control, setup, and execute a process
as a nypipe pipeline.
"""
# test.py
print(__name__)
# try:
# # Trying to find module in the parent package
# import preprocessing.T1_postprocessing_utils
# print(T1_postprocessing_utils.debug)
# del T1_postprocessing_utils
# except ModuleNotFoundError:
# print('Relative import failed')
#
# try:
# # Trying to find module on sys.path
# import T1_postprocessing_utils
# print(T1_postprocessing_utils.debug)
# except ModuleNotFoundError:
# print('Absolute import failed')
#
import nipype.interfaces.io as nio
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
import tempfile
from .T1_postprocessing_utils import (get_caps_t1,
extract_slices, extract_patches, save_as_pt)
if working_directory is None:
working_directory = tempfile.mkdtemp()
inputnode = npe.Node(
nutil.IdentityInterface(
fields=['caps_directory', 'tsv', 'patch_size', 'stride_size']),
name='inputnode'
)
inputnode.inputs.caps_directory = caps_directory
inputnode.inputs.tsv = tsv
inputnode.inputs.patch_size = patch_size
inputnode.inputs.stride_size = stride_size
get_subject_session_list = npe.Node(
name='get_subject_session_list',
interface=nutil.Function(
function=get_caps_t1,
input_names=['caps_directory', 'tsv'],
output_names=['preprocessed_T1']
)
)
## save nii.gz into classifiers .pt format.
save_as_pt = npe.MapNode(
name='save_as_pt',
iterfield=['input_img'],
interface=nutil.Function(
function=save_as_pt,
input_names=['input_img'],
output_names=['output_file']
)
)
## extract the slices from 3 directions.
extract_slices = npe.MapNode(
name='extract_slices',
iterfield=['preprocessed_T1'],
interface=nutil.Function(
function=extract_slices,
input_names=[
'preprocessed_T1', 'slice_direction',
'slice_mode'
],
output_names=['preprocessed_T1']
)
)
extract_slices.inputs.slice_direction = slice_direction
extract_slices.inputs.slice_mode = slice_mode
## extract the patches.
extract_patches = npe.MapNode(
name='extract_patches',
iterfield=['preprocessed_T1'],
interface=nutil.Function(
function=extract_patches,
input_names=['preprocessed_T1', 'patch_size', 'stride_size'],
output_names=['preprocessed_T1']
)
)
outputnode = npe.Node(
nutil.IdentityInterface(
fields=['preprocessed_T1']),
name='outputnode'
)
wf = npe.Workflow(name='t1w_postprocessing_dl')
wf.base_dir = working_directory
if extract_method == 'slice':
wf.connect(
[
(inputnode, get_subject_session_list, [('tsv', 'tsv')]),
(inputnode, get_subject_session_list, [
('caps_directory', 'caps_directory')
]),
(get_subject_session_list, save_as_pt, [
('preprocessed_T1', 'input_img')
]),
(save_as_pt, extract_slices, [
('output_file', 'preprocessed_T1')
]),
(extract_slices, outputnode, [
('preprocessed_T1', 'preprocessed_T1')
]),
]
)
elif extract_method == 'patch':
wf.connect([
(inputnode, get_subject_session_list, [('tsv', 'tsv')]),
(inputnode, get_subject_session_list, [('caps_directory', 'caps_directory')]),
(get_subject_session_list, save_as_pt, [('preprocessed_T1', 'input_img')]),
(save_as_pt, extract_patches, [('output_file', 'preprocessed_T1')]),
(inputnode, extract_patches, [('patch_size', 'patch_size')]),
(inputnode, extract_patches, [('stride_size', 'stride_size')]),
])
else:
wf.connect([
(inputnode, get_subject_session_list, [('tsv', 'tsv')]),
(inputnode, get_subject_session_list, [('caps_directory', 'caps_directory')]),
(get_subject_session_list, save_as_pt, [('preprocessed_T1', 'input_img')]),
(save_as_pt, extract_slices, [('output_file', 'preprocessed_T1')]),
(get_subject_session_list, save_as_pt, [('preprocessed_T1', 'input_img')]),
(save_as_pt, extract_patches, [('output_file', 'preprocessed_T1')]),
(inputnode, extract_patches, [('patch_size', 'patch_size')]),
(inputnode, extract_patches, [('stride_size', 'stride_size')]),
(extract_slices, outputnode, [('preprocessed_T1', 'preprocessed_T1')]),
])
return wf
```
#### File: clinicadl/slice_level/train_CNN.py
```python
import argparse
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
import copy
import torch
import os
import numpy as np
from time import time
from .utils import MRIDataset_slice, train, test, slice_level_to_tsvs, soft_voting_to_tsvs
from ..tools.deep_learning import EarlyStopping, save_checkpoint, commandline_to_json, create_model, load_model
from ..tools.deep_learning.data import load_data, MinMaxNormalization
__author__ = "<NAME>"
__copyright__ = "Copyright 2018-2020 The Aramis Lab Team"
__credits__ = ["<NAME>" "<NAME>" "<NAME>"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "<NAME>, <NAME>, <NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
# Train 2D CNN - Slice level network
# The input MRI's dimension is 169*208*179 after cropping"
def train_slice(params):
# Initialize the model
print('Do transfer learning with existed model trained on ImageNet!\n')
print('The chosen network is %s !' % params.model)
model = create_model(params.model, params.gpu)
trg_size = (224, 224) # most of the imagenet pretrained model has this input size
# All pre-trained models expect input images normalized in the same way, i.e. mini-batches of 3-channel RGB
# images of shape (3 x H x W), where H and W are expected to be at least 224. The images have to be loaded in
# to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225].
transformations = transforms.Compose([MinMaxNormalization(),
transforms.ToPILImage(),
transforms.Resize(trg_size),
transforms.ToTensor()])
# calculate the time consummation
total_time = time()
init_state = copy.deepcopy(model.state_dict())
if params.split is None:
fold_iterator = range(params.n_splits)
else:
fold_iterator = [params.split]
for fi in fold_iterator:
training_tsv, valid_tsv = load_data(params.tsv_path, params.diagnoses, fi,
n_splits=params.n_splits, baseline=params.baseline)
print("Running for the %d-th fold" % fi)
data_train = MRIDataset_slice(params.input_dir, training_tsv, transformations=transformations,
mri_plane=params.mri_plane, prepare_dl=params.prepare_dl)
data_valid = MRIDataset_slice(params.input_dir, valid_tsv, transformations=transformations,
mri_plane=params.mri_plane, prepare_dl=params.prepare_dl)
# Use argument load to distinguish training and testing
train_loader = DataLoader(data_train,
batch_size=params.batch_size,
shuffle=True,
num_workers=params.num_workers,
pin_memory=True)
valid_loader = DataLoader(data_valid,
batch_size=params.batch_size,
shuffle=False,
num_workers=params.num_workers,
pin_memory=True)
# chosen optimizer for back-propagation
optimizer = eval("torch.optim." + params.optimizer)(filter(lambda x: x.requires_grad, model.parameters()),
params.learning_rate, weight_decay=params.weight_decay)
model.load_state_dict(init_state)
# Binary cross-entropy loss
loss = torch.nn.CrossEntropyLoss()
# parameters used in training
best_accuracy = 0.0
best_loss_valid = np.inf
writer_train_batch = SummaryWriter(log_dir=(os.path.join(params.output_dir, "log_dir", "fold_%i" % fi,
"train_batch")))
writer_train_all_data = SummaryWriter(log_dir=(os.path.join(params.output_dir, "log_dir", "fold_%i" % fi,
"train_all_data")))
writer_valid = SummaryWriter(log_dir=(os.path.join(params.output_dir, "log_dir", "fold_%i" % fi, "valid")))
# initialize the early stopping instance
early_stopping = EarlyStopping('min', min_delta=params.tolerance, patience=params.patience)
for epoch in range(params.epochs):
print("At %i-th epoch." % epoch)
# train the model
train_df, acc_mean_train, loss_batch_mean_train, global_step \
= train(model, train_loader, params.gpu, loss, optimizer, writer_train_batch, epoch,
model_mode='train', selection_threshold=params.selection_threshold)
# calculate the accuracy with the whole training data for subject level balanced accuracy
train_all_df, acc_mean_train_all, loss_batch_mean_train_all, _\
= train(model, train_loader, params.gpu, loss, optimizer, writer_train_all_data, epoch,
model_mode='valid', selection_threshold=params.selection_threshold)
print("For training, subject level balanced accuracy is %f at the end of epoch %d" % (acc_mean_train_all, epoch))
# at then end of each epoch, we validate one time for the model with the validation data
valid_df, acc_mean_valid, loss_batch_mean_valid, _ =\
train(model, valid_loader, params.gpu, loss, optimizer, writer_valid, epoch,
model_mode='valid', selection_threshold=params.selection_threshold)
print("For validation, subject level balanced accuracy is %f at the end of epoch %d" % (acc_mean_valid, epoch))
# save the best model based on the best loss and accuracy
acc_is_best = acc_mean_valid > best_accuracy
best_accuracy = max(best_accuracy, acc_mean_valid)
loss_is_best = loss_batch_mean_valid < best_loss_valid
best_loss_valid = min(loss_batch_mean_valid, best_loss_valid)
save_checkpoint({
'epoch': epoch + 1,
'model': model.state_dict(),
'loss': loss_batch_mean_valid,
'accuracy': acc_mean_valid,
'optimizer': optimizer.state_dict(),
'global_step': global_step},
acc_is_best, loss_is_best,
os.path.join(params.output_dir, "best_model_dir", "fold_" + str(fi), "CNN"))
# try early stopping criterion
if early_stopping.step(loss_batch_mean_valid) or epoch == params.epochs - 1:
print("By applying early stopping or at the last epoch defined by user, "
"the training is stopped at %d-th epoch" % epoch)
break
# Final evaluation for all criteria
for selection in ['best_loss', 'best_acc']:
model, best_epoch = load_model(model, os.path.join(params.output_dir, 'best_model_dir', 'fold_%i' % fi,
'CNN', str(selection)),
gpu=params.gpu, filename='model_best.pth.tar')
train_df, metrics_train = test(model, train_loader, params.gpu, loss)
valid_df, metrics_valid = test(model, valid_loader, params.gpu, loss)
# write the information of subjects and performances into tsv files.
slice_level_to_tsvs(params.output_dir, train_df, metrics_train, fi,
dataset='train', selection=selection)
slice_level_to_tsvs(params.output_dir, valid_df, metrics_valid, fi,
dataset='validation', selection=selection)
soft_voting_to_tsvs(params.output_dir, fi, dataset='train', selection=selection,
selection_threshold=params.selection_threshold)
soft_voting_to_tsvs(params.output_dir, fi, dataset='validation', selection=selection,
selection_threshold=params.selection_threshold)
del optimizer
torch.cuda.empty_cache()
total_time = time() - total_time
print("Total time of computation: %d s" % total_time)
```
#### File: clinicadl/subject_level/train_autoencoder.py
```python
from __future__ import print_function
import argparse
from os import path
from time import time
import sys
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from .utils import ae_finetuning
from ..tools.deep_learning.iotools import Parameters
from ..tools.deep_learning.data import MinMaxNormalization, MRIDataset, load_data
from ..tools.deep_learning import create_autoencoder, commandline_to_json
def train_autoencoder(params):
""" Parameters
params: class from utils module containing all the parameters for training a
CNN.
"""
if params.evaluation_steps % params.accumulation_steps != 0 and params.evaluation_steps != 1:
raise Exception('Evaluation steps %d must be a multiple of accumulation steps %d' %
(params.evaluation_steps, params.accumulation_steps))
if params.minmaxnormalization:
transformations = MinMaxNormalization()
else:
transformations = None
total_time = time()
criterion = torch.nn.MSELoss()
training_tsv, valid_tsv = load_data(params.tsv_path, params.diagnoses,
params.split, params.n_splits,
params.baseline)
data_train = MRIDataset(params.input_dir, training_tsv,
params.preprocessing, transformations)
data_valid = MRIDataset(params.input_dir, valid_tsv,
params.preprocessing, transformations)
# Use argument load to distinguish training and testing
train_loader = DataLoader(data_train,
params.batch_size,
shuffle=True,
num_workers=params.num_workers,
drop_last=True
)
valid_loader = DataLoader(data_valid,
)
valid_loader = DataLoader(data_valid,
batch_size=params.batch_size,
shuffle=False,
num_workers=params.num_workers,
drop_last=False
)
text_file = open(path.join(params.output_dir, 'python_version.txt'), 'w')
text_file.write('Version of python: %s \n' % sys.version)
text_file.write('Version of pytorch: %s \n' % torch.__version__)
text_file.close()
decoder = create_autoencoder(params.model, params.pretrained_path,
difference=params.pretrained_difference)
optimizer = eval("torch.optim." + params.optimizer)(filter(lambda x: x.requires_grad, decoder.parameters()), params.learning_rate, weight_decay=params.weight_decay)
if params.add_sigmoid:
if isinstance(decoder.decoder[-1], nn.ReLU):
decoder.decoder = nn.Sequential(*list(decoder.decoder)[:-1])
decoder.decoder.add_module("sigmoid", nn.Sigmoid())
ae_finetuning(decoder, train_loader, valid_loader, criterion, optimizer, False, params)
total_time = time() - total_time
print('Total time', total_time)
#if __name__ == "__main__":
# commandline = parser.parse_known_args()
# commandline_to_json(commandline, 'ConvAutoencoder')
# options = commandline[0]
# if commandline[1]:
# print("unknown arguments: %s" % parser.parse_known_args()[1])
# train_params_autoencoder = Parameters(tsv_path, output_dir, input_dir, model)
# train_params_autoencoder.write(options)
# train_autoencoder(train_parameters_autoencoder)
```
#### File: deep_learning/models/iotools.py
```python
def save_checkpoint(state, accuracy_is_best, loss_is_best, checkpoint_dir, filename='checkpoint.pth.tar',
best_accuracy='best_acc', best_loss='best_loss'):
import torch
import os
import shutil
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
torch.save(state, os.path.join(checkpoint_dir, filename))
if accuracy_is_best:
best_accuracy_path = os.path.join(checkpoint_dir, best_accuracy)
if not os.path.exists(best_accuracy_path):
os.makedirs(best_accuracy_path)
shutil.copyfile(os.path.join(checkpoint_dir, filename), os.path.join(best_accuracy_path, 'model_best.pth.tar'))
if loss_is_best:
best_loss_path = os.path.join(checkpoint_dir, best_loss)
if not os.path.exists(best_loss_path):
os.makedirs(best_loss_path)
shutil.copyfile(os.path.join(checkpoint_dir, filename), os.path.join(best_loss_path, 'model_best.pth.tar'))
def load_model(model, checkpoint_dir, gpu, filename='model_best.pth.tar'):
"""
Load the weights written in checkpoint_dir in the model object.
:param model: (Module) CNN in which the weights will be loaded.
:param checkpoint_dir: (str) path to the folder containing the parameters to loaded.
:param gpu: (bool) if True a gpu is used.
:param filename: (str) Name of the file containing the parameters to loaded.
:return: (Module) the update model.
"""
from copy import deepcopy
import torch
import os
best_model = deepcopy(model)
param_dict = torch.load(os.path.join(checkpoint_dir, filename), map_location="cpu")
best_model.load_state_dict(param_dict['model'])
if gpu:
best_model = best_model.cuda()
return best_model, param_dict['epoch']
def load_optimizer(optimizer_path, model):
"""
Creates and load the state of an optimizer.
:param optimizer_path: (str) path to the optimizer.
:param model: (Module) model whom parameters will be optimized by the created optimizer.
:return: optimizer initialized with specific state and linked to model parameters.
"""
from os import path
import torch
if not path.exists(optimizer_path):
raise ValueError('The optimizer was not found at path %s' % optimizer_path)
print('Loading optimizer')
optimizer_dict = torch.load(optimizer_path)
name = optimizer_dict["name"]
optimizer = eval("torch.optim." + name)(filter(lambda x: x.requires_grad, model.parameters()))
optimizer.load_state_dict(optimizer_dict["optimizer"])
``` |
{
"source": "921kiyo/3d-dl",
"score": 2
} |
#### File: 921kiyo/3d-dl/main.py
```python
from kerasmodels import retrain
from src.rendering import render_pipeline
import os
"This script generates artefacts which are saved in folder render_workspace"
def main():
############################################################################
################################ PARAMETERS ################################
############################################################################
############################### INPUT PATHS ###############################
# test and validation data
validation_dir = os.path.join(os.getcwd(), "demo_images", "test")
test_dir = os.path.join(os.getcwd(), "demo_images", "validation")
# path to blender executable
bl_path = "PATH/TO/BLENDER/INSTALLATION"
# path to render workspace folder
workspace = os.path.join(os.getcwd(), "render_workspace")
# path to folder containing a set of .model files
obj_set = os.path.join(workspace, "object_files", "two_set") # obj files
model_filename = "model.h5"
############################################################################
############################## NEURAL NETWORK ##############################
# Neural Network Parameters
dense_layers = 1
dense_dim = 1024
dropout = 0
# if true, some of the inceptionV3 layers will be trained for 5 epochs at the end of training
fine_tune = False
# if True, it adds SP noise
add_salt_pepper_noise = False
# 0 = no augmentation, 1 = rotation only, 2 = rotation & zoom
augmentation_mode = 0
epochs = 10
input_dim = 224
############################################################################
################################ BACKGROUND ################################
# Choose background type: 'SUN', 'random', 'white', 'indoor', 'outdoor'
background_type = "indoor"
# Choose whether to adjust background brightness to product brightness
adjust_brightness = False
############################################################################
################################ RENDERING ################################
# choose how many images to render per class
renders_per_class = 10
# Rendering Parameters
blender_attributes = {
"attribute_distribution_params": [
# number of lamps is a DISCRETE UNIFORM DISTRIBUTION over NON_NEGATIVE INTEGERS,
# params l and r are lower and upper bounds of distributions, need to be positive integers
["num_lamps", "mid", 6],
["num_lamps", "scale", 0.3],
# lamp energy is a TRUNCATED NORMAL DISTRIBUTION, param descriptions same as above
["lamp_energy", "mu", 5000.0],
["lamp_energy", "sigmu", 0.3],
# camera location is a COMPOSITE SHELL RING DISTRIBUTION
# param normals define which rings to use, based on their normals, permitted values are 'X','Y','Z' and a combination of the three
# phi sigma needs to be non-negative, and defines the spread of the ring in terms of degrees
# phi sigma of roughly 30.0 corresponds to a unifrom sphere
["camera_loc", "phi_sigma", 10.0],
# camera radius is a Truncated Normal Distribution
["camera_radius", "mu", 6.0],
["camera_radius", "sigmu", 0.3],
],
"attribute_distribution": [],
}
############################################################################
############################################################################
################################ EXECUTION ################################
############################################################################
# Set backround image database path
background_database = os.path.join(workspace, "bg_database", background_type)
# determine whether to generate random backgrounds
generate_background = False
if background_type is "random":
generate_background = True
# construct rendering parameters
arguments = {
"obj_set": obj_set,
"blender_path": bl_path,
"renders_per_class": renders_per_class,
"work_dir": workspace,
"generate_background": generate_background,
"background_database": background_database,
"blender_attributes": blender_attributes,
}
# run blender pipeline and produce a zip with all rendered images
path_of_zip = render_pipeline.full_run(**arguments)
# load train images from the zip file
unzipped_dir = retrain.unzip_and_return_path_to_folder(path_of_zip)
train_dir = unzipped_dir + "/images"
# get path for classes.txt
main_dir, filename = os.path.split(path_of_zip)
# default batch size = 64 but choose lower batch size if few images rendered
batch_size = min(renders_per_class // 2, 64)
# initialize & train model
model = retrain.KerasInception(
input_dim=input_dim,
batch_size=batch_size,
dense_layers=dense_layers,
dropout=dropout,
dense_dim=dense_dim,
)
# train the network
history = model.train(
train_dir=train_dir,
validation_dir=validation_dir,
fine_tune=fine_tune,
epochs=epochs,
salt_pepper=add_salt_pepper_noise,
augmentation_params=retrain.get_augmentation_params(augmentation_mode),
# classes_txt_dir=main_dir,
save_model=True,
steps_per_epoch=renders_per_class,
)
loss, acc = model.evaluate(test_dir)
print("test accuracy of the model is: ", acc)
print("Model is being saved in ", os.path.join(os.getcwd(), model_filename))
model.save_model(os.path.join(os.getcwd(), model_filename))
if os.path.exists(path_of_zip):
os.remove(path_of_zip)
if __name__ == "__main__":
main()
``` |
{
"source": "921kiyo/symbolic-rl",
"score": 3
} |
#### File: 921kiyo/symbolic-rl/analysis.py
```python
from lib import plotting
import gym
import gym_vgdl
import os.path
base_dir = os.path.dirname(os.path.abspath(__file__))
def experiment_learning(exp_no):
pkl_dir = os.path.join(base_dir, "result_pkl/experiment{}_x".format(str(exp_no)))
pkl_dir_q = os.path.join(base_dir, "result_pkl/experiment{}_q".format(str(exp_no)))
# pkl_dir = os.path.join(base_dir, "result_pkl/experiment1_delete")
# pkl_dir_q = os.path.join(base_dir, "result_pkl/experiment1_q_del")
# make average score for training
plotting.average_score(base_dir, pkl_dir, "exp{}_v".format(str(exp_no)), 100, 30)
# plotting.average_score(base_dir, pkl_dir_q, "exp{}_v".format(str(exp_no)), 100, 30)
# make average score for test
plotting.average_score(base_dir, pkl_dir, "exp{}_test_v".format(str(exp_no)), 100, 30)
# plotting.average_score(base_dir, pkl_dir_q, "exp{}_test_v".format(str(exp_no)), 100, 30)
runtime, total = plotting.average_ILASP(base_dir, pkl_dir, "exp{}_ilasp_v".format(str(exp_no)), 100, 250, 30)
# Load the pkl files
stats = plotting.load_stats(pkl_dir, "exp{}_v_average".format(str(exp_no)))
stats_q = plotting.load_stats(pkl_dir_q, "exp{}_v_average".format(str(exp_no)))
stats_test = plotting.load_stats(pkl_dir, "exp{}_test_v_average".format(str(exp_no)))
stats_q_test = plotting.load_stats(pkl_dir_q, "exp{}_test_v_average".format(str(exp_no)))
stats_ilasp = plotting.load_stats(pkl_dir, "exp{}_ilasp_v_average".format(str(exp_no)))
# plotting.plot_episode_stats_learning(stats, stats_q)
plotting.plot_episode_stats_learning(stats_test, stats_q_test)
plotting.plot_episode_stats_runtime(stats, stats_q)
plotting.plot_ILASP_progress(stats_ilasp)
# plotting.plot_episode_stats_runtime(stats, stats_q)
# def ilasp_runtime(exp_no):
# pkl_dir = os.path.join(base_dir, "result_pkl/experiment{}".format(str(exp_no)))
# pkl_dir_q = os.path.join(base_dir, "result_pkl/experiment{}_q".format(str(exp_no)))
# # pkl_dir = os.path.join(base_dir, "result_pkl/experiment1_delete")
# # pkl_dir_q = os.path.join(base_dir, "result_pkl/experiment1_q_del")
# # make average score for training
# plotting.average_ILASP(base_dir, pkl_dir, "exp{}_v".format(str(exp_no)), 100, 30)
# plotting.average_ILASP(base_dir, pkl_dir_q, "exp{}_v".format(str(exp_no)), 100, 30)
# plotting.plot_ILASP_progress
def experiment_transfer():
pkl_dir = os.path.join(base_dir, "result_pkl/experiment3_after_noTL_noGoal")
pkl_dir2 = os.path.join(base_dir, "result_pkl/experiment3_after_noTL_goal")
pkl_dir3 = os.path.join(base_dir, "result_pkl/experiment3_after_TL")
pkl_dir_q = os.path.join(base_dir, "result_pkl/experiment3_q")
# make average score
plotting.average_score(base_dir, pkl_dir, "exp3_test_v", 100, 30)
plotting.average_score(base_dir, pkl_dir2, "exp4_test_v", 100, 30)
# plotting.average_score(base_dir, pkl_dir3, "exp3_v", 100, 30)
plotting.average_score(base_dir, pkl_dir_q, "exp3_v", 100, 30)
# Load the pkl files
stats = plotting.load_stats(pkl_dir, "exp3_v_average")
stats2 = plotting.load_stats(pkl_dir2, "exp4_v_average")
stats3 = plotting.load_stats(pkl_dir3, "exp4_after_TL_v_average")
stats_q = plotting.load_stats(pkl_dir_q, "exp3_v_average")
# import ipdb; ipdb.set_trace()
plotting.plot_episode_stats_transfer(stats, stats2, stats3, stats_q)
def experiment3_test():
pkl_dir = os.path.join(base_dir, "result_pkl/experiment3_after_noTL_noGoal")
pkl_dir2 = os.path.join(base_dir, "result_pkl/experiment3_after_noTL_goal")
pkl_dir3 = os.path.join(base_dir, "result_pkl/experiment3_after_TL")
# pkl_dir3 = os.path.join(base_dir, "result_pkl/experiment3_q")
# make average score
# plotting.average_score(base_dir, pkl_dir, "exp3_test_v", 100, 30)
# plotting.average_score(base_dir, pkl_dir2, "exp4_test_v", 100, 30)
# plotting.average_score(base_dir, pkl_dir3, "exp3_test_TL_v", 100, 30)
# plotting.average_score(base_dir, pkl_dir_q, "exp3_v", 100, 30)
# Load the pkl files
stats = plotting.load_stats(pkl_dir, "exp3_test_v_average")
stats2 = plotting.load_stats(pkl_dir2, "exp4_test_v_average")
# stats3 = plotting.load_stats(pkl_dir3, "exp4_test_after_TL_v_average")
stats3 = plotting.load_stats(pkl_dir3, "exp3_test_TL_v_average")
plotting.plot_episode_stats_transfer(stats, stats2, stats3)
# experiment3_test()
def experiment4_test():
pkl_dir = os.path.join(base_dir, "result_pkl/experiment4_after_noTL_noGoal")
pkl_dir2 = os.path.join(base_dir, "result_pkl/experiment4_after_noTL_goal")
pkl_dir_afterTL = os.path.join(base_dir, "result_pkl/experiment4_after_TL")
# make average score
# plotting.average_score(base_dir, pkl_dir, "exp4_test_v", 100, 30)
# plotting.average_score(base_dir, pkl_dir2, "exp4_test_v", 100, 9)
plotting.average_score(base_dir, pkl_dir_afterTL, "exp4_test_TL_v", 100, 1)
# plotting.average_score(base_dir, pkl_dir_q, "exp4_test_v", 100, 30)
# Load the pkl files
stats = plotting.load_stats(pkl_dir, "exp4_test_v_average")
stats2 = plotting.load_stats(pkl_dir2, "exp4_test_v_average")
stats3 = plotting.load_stats(pkl_dir_afterTL, "exp4_test_TL_v_average")
# XXX = os.path.join(base_dir, "result_pkl/experiment_keep_first/experiment4_after_noTL")
# stats2 = plotting.load_stats(XXX, "exp4_test_after_noTL_v_average")
# stats3 = plotting.load_stats(pkl_dir_afterTL, "exp4_test_TL_v0")
# stats_q = plotting.load_stats(pkl_dir_q, "exp4_test_v_average")
# import ipdb; ipdb.set_trace()
plotting.plot_episode_stats_transfer(stats, stats2, stats3)
# plotting.plot_episode_stats_simple(stats2)
# experiment4_test()
def experiment5_test():
pkl_noTL = os.path.join(base_dir, "result_pkl/experiment_keep_first/experiment4_after_noTL")
pkl_noTL_goal = os.path.join(base_dir, "result_pkl/experiment4_after_noTL_goal")
pkl_TL = os.path.join(base_dir, "result_pkl/experiment_keep_first/experiment4_after_TL")
# pkl_dir2 = os.path.join(base_dir, "result_pkl/experiment4_after_noTL_goal")
# pkl_dir_afterTL = os.path.join(base_dir, "result_pkl/experiment4_after_TL")
# pkl_dir2 = os.path.join(base_dir, "result_pkl/experiment4_after_noTL_goal")
# make average score
# plotting.average_score(base_dir, pkl_noTL, "exp4_test_after_noTL_v", 100, 30)
# plotting.average_score(base_dir, pkl_TL, "exp4_test_after_TL_v", 100, 30)
# plotting.average_score(base_dir, pkl_noTL_noGoal, "exp4_test_v", 100, 9)
# plotting.average_score(base_dir, pkl_dir_q, "exp4_test_v", 100, 30)
# Load the pkl files
stats = plotting.load_stats(pkl_noTL, "exp4_test_after_noTL_v_average")
stats2 = plotting.load_stats(pkl_noTL_goal, "exp4_test_v_average")
stats3 = plotting.load_stats(pkl_TL, "exp4_test_after_TL_v_average")
# stats3 = plotting.load_stats(pkl_dir_afterTL, "exp4_test_TL_v0")
# stats_q = plotting.load_stats(pkl_dir_q, "exp4_test_v_average")
# import ipdb; ipdb.set_trace()
plotting.plot_episode_stats_transfer(stats, stats2, stats3)
experiment5_test()
# pkl_dir = os.path.join(base_dir, "result_pkl/experiment4_after_TL")
# stats = plotting.load_stats(pkl_dir, "exp4_test_TL_v0")
# stats = plotting.load_stats(pkl_dir, "exp4_TL_v0")
# import ipdb; ipdb.set_trace()
# plotting.plot_episode_stats_simple(stats)
# experiment3_test()
# experiment_learning(1)
# experiment_transfer()
# plotting.average_score(base_dir, pkl_dir, "exp4_after_TL_v", 100, 30)
# plotting.average_score(base_dir, pkl_dir, "exp1_v", 100, 30)
# plotting.average_score(base_dir, pkl_dir, "exp3_test_v", 100, 30)
# plotting.average_score(base_dir, pkl_dir_q, "exp3_test_v", 100, 30)
# plotting.average_score(base_dir, pkl_dir, "exp5_test_TL_v", 100, 30)
# plotting.average_score(base_dir, pkl_dir_q, "exp5_test_noTL_v", 100, 30)
# plotting.average_ILASP(base_dir, pkl_dir, "exp2_v", 2, 250, 30)
# stats = plotting.load_stats(pkl_dir, "exp1_v_average")
# stats_q = plotting.load_stats(pkl_dir_q, "temp_test_v_average")
# stats = plotting.load_stats(pkl_dir, "exp1_test_v_average")
# plotting.plot_ILASP_progress(stats)
# stats = plotting.load_stats(pkl_dir, "exp2_v_average")
# import ipdb; ipdb.set_trace()
# stats_q = plotting.load_stats(pkl_dir_q, "exp5_test_noTL_v_average")
# stats_q = plotting.load_stats(pkl_dir_q, "exp3_test_v_average")
# for i in range(12):
# print("-----------------------")
# print("No.", i)
# stats = plotting.load_stats(pkl_dir, "exp3_test_v{}".format(str(i)))
# print(stats)
# plotting.plot_episode_stats_simple(stats, smoothing_window=1)
# plotting.plot_episode_stats_multiple(stats, stats_q)
# plotting.plot_ILASP_progress(stats)
```
#### File: gym_vgdl/gym_vgdl/register_samples.py
```python
from gym.envs.registration import register
from gym_vgdl.vgdl_env import VGDLEnv
import os
# Location of sample games
DATA_DIR = os.path.join( os.path.dirname(__file__), 'vgdl', 'sample_games')
sample_games = [
'test',
'aaa_field',
'aaa_H_shape',
'aaa_teleport',
'aaa_medium',
'aaa_small',
'experiment1',
'experiment2',
'experiment3_before',
'experiment3_after',
'experiment4_before',
'experiment4_after',
'aliens',
'boulderdash',
'chase',
'frogs',
'missilecommand',
'portals',
'survivezombies',
'zelda' ]
# A list of relevant classes for each sample game
classes = {
'test': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'aaa_field': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'aaa_H_shape': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'aaa_teleport': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'aaa_medium': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'aaa_small': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'experiment1': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'experiment2': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'experiment3_before': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'experiment3_after': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'experiment4_before': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'experiment4_after': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'aliens': ['avatar', 'alien', 'base', 'bomb', 'sam'],
'boulderdash': ['avatar', 'boulder', 'butterfly', 'crab', 'diamond',
'exitdoor', 'wall'],
'chase': ['avatar', 'angry', 'carcass', 'scared', 'wall'],
'frogs': ['avatar', 'goal', 'log', 'truck', 'wall'],
'missilecommand': ['avatar', 'city', 'explosion', 'incoming'],
'portals': ['avatar', 'goal', 'portalentry', 'portalexit', 'random',
'straight', 'wall' ],
'survivezombies': ['avatar', 'bee', 'flower', 'hell', 'honey', 'zombie'],
'zelda': ['avatar', 'enemy', 'goal', 'key', 'wall']
}
# A list of relevant resources for each sample game
resources = {
'test': [],
'aaa_field': [],
'aaa_H_shape': [],
'aaa_teleport': [],
'aaa_medium': [],
'aaa_small': [],
'experiment1': [],
'experiment2': [],
'experiment3_before': [],
'experiment3_after': [],
'experiment4_before': [],
'experiment4_after': [],
'aliens': [],
'boulderdash': ['diamond'],
'chase': [],
'frogs': [],
'missilecommand': [],
'portals': [],
'survivezombies': ['honey'],
'zelda': []
}
suffixes = {
'image': "",
'objects': "_objects",
'features': "_features",
}
# Register the sample games
def register_sample_games():
for game in sample_games:
for obs_type, suffix in suffixes.items():
register(
id='vgdl_{}{}-v0'.format(game, suffix),
entry_point='gym_vgdl:VGDLEnv',
kwargs={
'game_file': os.path.join( DATA_DIR, game + '.txt'),
'level_file': os.path.join( DATA_DIR, game + '_lvl0.txt'),
'obs_type': obs_type,
'notable_sprites': classes[game],
'notable_resources': resources[game],
# Use 24 (size of sprites) to render the full sprites
'block_size': 24 if obs_type == 'image' else 10
},
timestep_limit=1000,
nondeterministic=True,
)
```
#### File: py-vgdl/vgdl/ai.py
```python
import math
from .core import VGDLSprite
#from tools import logToFile
class AStarNode(object):
def __init__(self, index, vgdlSprite):
self.vgdlSprite = vgdlSprite
self.sprite = vgdlSprite
self.index = index
class AStarWorld(object):
def __init__(self, game):
self.game = game
#ghost_sprites = game.getSprites('ghost')
#pacman_sprite = game.getSprites('pacman')[0]
self.food = game.getSprites('food')
self.nest = game.getSprites('nest')
self.moving = game.getSprites('moving')
self.empty = [VGDLSprite(pos, (self.game.block_size, self.game.block_size)) for pos in self.game.emptyBlocks()]
##print "food=%s, nest=%s, moving=%s" %(len(food), len(nest), len(moving))
##print "empty=%s" % (len(empty))
##print "total=%s" %(len(food)+len(nest)+len(moving)+len(empty))
##print "len(sprites)=%s" %len(sprites)
#print "game.width=%s, game.height=%s" %(game.width, game.height)
#print "pacman_sprite=%s" %(pacman_sprite)
#print "x=%s, y=%s" %(pacman_sprite.rect.left/game.block_size, pacman_sprite.rect.top/game.block_size)
self.save_walkable_tiles()
def get_walkable_tiles(self):
return self.food + self.nest + self.moving + self.empty
def save_walkable_tiles(self):
self.walkable_tiles = {}
self.walkable_tile_indices = []
combined = self.food + self.nest + self.moving + self.empty
#print combined
for sprite in combined:
#print sprite
tileX, tileY = self.get_sprite_tile_position(sprite)
index = self.get_index(tileX, tileY)
self.walkable_tile_indices.append(index)
self.walkable_tiles[index] = AStarNode(index, sprite)
def get_index(self, tileX, tileY):
#return tileX * self.game.width + tileY
return tileY * self.game.width + tileX
def get_tile_from_index(self, index):
return index/self.game.width, index%self.game.width
def h(self, start, goal):
#return self.euclidean(start, goal)
return self.distance(start, goal)
def euclidean(self, node1, node2):
x1, y1 = self.get_sprite_tile_position(node1.sprite)
x2, y2 = self.get_sprite_tile_position(node2.sprite)
#print "x1:%s, y1:%s, x2:%s, y2:%s" %(x1,y1,x2,y2)
a = x2-x1
b = y2-y1
#print "a:%s, b:%s" %(a,b)
return math.sqrt(a*a + b*b)
def get_sprite_tile_position(self, sprite):
tileX = sprite.rect.left/self.game.block_size
tileY = sprite.rect.top/self.game.block_size
return tileX, tileY
def get_lowest_f(self, nodes, f_score):
f_best = 9999
node_best = None
for node in nodes:
if f_score[node.index] < f_best:
f_best = f_score[node.index]
node_best = node
return node_best
def reconstruct_path(self, came_from, current):
#print self.get_tile_from_index(current.index)
if current.index in came_from:
p = self.reconstruct_path(came_from, came_from[current.index])
p.append(current)
return p
else:
return [current]
def neighbor_nodes(self, node):
sprite = node.sprite;
return self.neighbor_nodes_of_sprite(sprite)
def neighbor_nodes_of_sprite(self, sprite):
tileX, tileY = self.get_sprite_tile_position(sprite)
tiles = [ (tileX-1,tileY), (tileX+1, tileY), (tileX,tileY-1), (tileX, tileY+1)]
neighbors = []
for (tilex, tiley) in tiles:
if (tilex >= 0 and tilex < self.game.width and tiley >= 0 and tiley < self.game.height):
index = self.get_index(tilex, tiley)
if index in self.walkable_tile_indices:
neighbors.append(self.walkable_tiles[index])
# neighbor_indices = [neighbor.index for neighbor in neighbors]
# print 'neighbors(%s,%s):%s' %(tileX, tileY, map(self.get_tile_from_index, neighbor_indices))
return neighbors
def distance(self, node1, node2):
x1, y1 = self.get_sprite_tile_position(node1.sprite)
x2, y2 = self.get_sprite_tile_position(node2.sprite)
return abs(x2-x1) + abs(y2-y1)
def getMoveFor(self, startSprite):
tileX, tileY = self.get_sprite_tile_position(startSprite)
index = self.get_index(tileX, tileY)
startNode = AStarNode(index, startSprite)
pacman = self.game.getSprites('pacman')[0]
goalX, goalY = self.get_sprite_tile_position(pacman)
goalIndex = self.get_index(goalX, goalY)
goalNode = AStarNode(goalIndex, pacman)
# logToFile('Goal: (%s,%s) --> (%s, %s)' %(tileX, tileY, goalX, goalY))
return self.search(startNode, goalNode)
def search(self, start, goal):
# Initialize the variables.
closedset = []
openset = []
came_from = {}
g_score = {}
f_score = {}
openset = [start]
g_score[start.index] = 0
f_score[start.index] = g_score[start.index] + self.h(start, goal)
while (len(openset) > 0):
current = self.get_lowest_f(openset, f_score)
if current.index == goal.index:
# print came_from
path = self.reconstruct_path(came_from, goal)
# path_sprites = [node.sprite for node in path]
# pathh = map(self.get_sprite_tile_position, path_sprites)
# print pathh
return path
openset.remove(current)
closedset.append(current)
for neighbor in self.neighbor_nodes(current):
temp_g = g_score[current.index] + self.distance(current, neighbor)
if self.nodeInSet(neighbor, closedset) and temp_g >= g_score[neighbor.index]:
continue
if not self.nodeInSet(neighbor, openset) or temp_g < g_score[neighbor.index]:
came_from[neighbor.index] = current
#print 'came_from[%s]=%s' % (self.get_tile_from_index(neighbor.index), self.get_tile_from_index(current.index))
g_score[neighbor.index] = temp_g
f_score[neighbor.index] = g_score[neighbor.index] + self.h(neighbor, goal)
if neighbor not in openset:
openset.append(neighbor)
return None
def nodeInSet(self, node, nodeSet):
nodeSetIndices = [n.index for n in nodeSet]
return node.index in nodeSetIndices
```
#### File: vgdl/ontology/sprites.py
```python
import itertools
import logging
from typing import NewType, Optional, Union, Dict, List, Tuple
import numpy as np
import pygame
from pygame.math import Vector2
from vgdl.core import VGDLSprite, Action, Resource
from vgdl.core import Color
from vgdl.tools import triPoints, unitVector
from .constants import *
from .physics import GridPhysics, ContinuousPhysics
__all__ = [
'AStarChaser',
'Bomber',
'Chaser',
'Conveyor',
'ErraticMissile',
'Fleeing',
'Flicker',
'Immovable',
'Missile',
'OrientedFlicker',
'OrientedSprite',
'Passive',
'Portal',
'RandomInertial',
'RandomMissile',
'RandomNPC',
'ResourcePack',
'SpawnPoint',
'Spreader',
'SpriteProducer',
'WalkJumper',
'Walker',
]
class Immovable(VGDLSprite):
""" A gray square that does not budge. """
color = GRAY
is_static = True
class Passive(VGDLSprite):
""" A square that may budge. """
color = RED
class ResourcePack(Resource):
""" Can be collected, and in that case adds/increases a progress bar on the collecting sprite.
Multiple resource packs can refer to the same type of base resource. """
is_static = True
class Flicker(VGDLSprite):
""" A square that persists just a few timesteps. """
color = RED
limit = 1
def __init__(self, **kwargs):
self._age = 0
VGDLSprite.__init__(self, **kwargs)
def update(self, game):
VGDLSprite.update(self, game)
if self._age > self.limit:
game.kill_sprite(self)
self._age += 1
class Spreader(Flicker):
""" Spreads to its four canonical neighbor positions, and replicates itself there,
if these are unoccupied. """
spreadprob = 1.
def update(self, game):
Flicker.update(self, game)
if self._age == 2:
for u in BASEDIRS:
if game.random_generator.random() < self.spreadprob:
game.create_sprite(self.name, (self.lastrect.left + u[0] * self.lastrect.size[0],
self.lastrect.top + u[1] * self.lastrect.size[1]))
class SpriteProducer(VGDLSprite):
""" Superclass for all sprites that may produce other sprites, of type 'stype'. """
stype = None
class Portal(SpriteProducer):
is_static = True
color = BLUE
class SpawnPoint(SpriteProducer):
prob = None
total = None
color = BLACK
is_static = True
def __init__(self, cooldown=1, prob=1, total=None, **kwargs):
SpriteProducer.__init__(self, **kwargs)
if prob:
self.prob = prob
self.is_stochastic = (prob > 0 and prob < 1)
if cooldown:
self.cooldown = cooldown
if total:
self.total = total
self.counter = 0
def update(self, game):
if (game.time % self.cooldown == 0 and game.random_generator.random() < self.prob):
game.create_sprite(self.stype, (self.rect.left, self.rect.top))
self.counter += 1
if self.total and self.counter >= self.total:
game.kill_sprite(self)
class RandomNPC(VGDLSprite):
""" Chooses randomly from all available actions each step. """
speed = 1
is_stochastic = True
def update(self, game):
VGDLSprite.update(self, game)
self.physics.activeMovement(self, game.random_generator.choice(BASEDIRS))
class OrientedSprite(VGDLSprite):
""" A sprite that maintains the current orientation. """
draw_arrow = False
orientation = RIGHT
state_attributes = VGDLSprite.state_attributes + ['orientation']
def _draw(self, game):
""" With a triangle that shows the orientation. """
VGDLSprite._draw(self, game)
if self.draw_arrow:
col = (self.color[0], 255 - self.color[1], self.color[2])
pygame.draw.polygon(game.screen, col, triPoints(self.rect, unitVector(self.orientation)))
class Conveyor(OrientedSprite):
""" A static object that used jointly with the 'conveySprite' interaction to move
other sprites around."""
is_static = True
color = BLUE
strength = 1
draw_arrow = True
class Missile(OrientedSprite):
""" A sprite that constantly moves in the same direction. """
speed = 1
class OrientedFlicker(OrientedSprite, Flicker):
""" Preserves directionality """
draw_arrow = True
speed = 0
class Walker(Missile):
""" Keep moving in the current horizontal direction. If stopped, pick one randomly. """
airsteering = False
is_stochastic = True
def update(self, game):
if self.airsteering or self.lastdirection[0] == 0:
if self.orientation[0] > 0:
d = 1
elif self.orientation[0] < 0:
d = -1
else:
d = game.random_generator.choice([-1, 1])
self.physics.activeMovement(self, (d, 0))
Missile.update(self, game)
class WalkJumper(Walker):
prob = 0.1
strength = 10
def update(self, game):
if self.lastdirection[0] == 0:
if self.prob < game.random_generator.random():
self.physics.activeMovement(self, (0, -self.strength))
Walker.update(self, game)
class RandomInertial(OrientedSprite, RandomNPC):
physicstype = ContinuousPhysics
class RandomMissile(Missile):
def __init__(self, **kwargs):
Missile.__init__(self, orientation=game.random_generator.choice(BASEDIRS),
speed=game.random_generator.choice([0.1, 0.2, 0.4]), **kwargs)
class ErraticMissile(Missile):
""" A missile that randomly changes direction from time to time.
(with probability 'prob' per timestep). """
def __init__(self, prob=0.1, **kwargs):
Missile.__init__(self, orientation=game.random_generator.choice(BASEDIRS), **kwargs)
self.prob = prob
self.is_stochastic = (prob > 0 and prob < 1)
def update(self, game):
Missile.update(self, game)
if game.random_generator.random() < self.prob:
self.orientation = game.random_generator.choice(BASEDIRS)
class Bomber(SpawnPoint, Missile):
color = ORANGE
is_static = False
def update(self, game):
Missile.update(self, game)
SpawnPoint.update(self, game)
class Chaser(RandomNPC):
""" Pick an action that will move toward the closest sprite of the provided target type. """
stype = None
fleeing = False
def _closestTargets(self, game):
bestd = 1e100
res = []
for target in game.getSprites(self.stype):
d = self.physics.distance(self.rect, target.rect)
if d < bestd:
bestd = d
res = [target]
elif d == bestd:
res.append(target)
return res
def _movesToward(self, game, target):
""" Find the canonical direction(s) which move toward
the target. """
res = []
basedist = self.physics.distance(self.rect, target.rect)
for a in BASEDIRS:
r = self.rect.copy()
r = r.move(a)
newdist = self.physics.distance(r, target.rect)
if self.fleeing and basedist < newdist:
res.append(a)
if not self.fleeing and basedist > newdist:
res.append(a)
return res
def update(self, game):
VGDLSprite.update(self, game)
options = []
for target in self._closestTargets(game):
options.extend(self._movesToward(game, target))
if len(options) == 0:
options = BASEDIRS
self.physics.activeMovement(self, game.random_generator.choice(options))
class Fleeing(Chaser):
""" Just reversing directions"""
fleeing = True
class AStarChaser(RandomNPC):
from vgdl.ai import AStarWorld
""" Move towards the character using A* search. """
stype = None
fleeing = False
drawpath = None
walkableTiles = None
neighborNodes = None
def _movesToward(self, game, target):
""" Find the canonical direction(s) which move toward
the target. """
res = []
basedist = self.physics.distance(self.rect, target.rect)
for a in BASEDIRS:
r = self.rect.copy()
r = r.move(a)
newdist = self.physics.distance(r, target.rect)
if self.fleeing and basedist < newdist:
res.append(a)
if not self.fleeing and basedist > newdist:
res.append(a)
return res
def _draw(self, game):
""" With a triangle that shows the orientation. """
RandomNPC._draw(self, game)
if self.walkableTiles:
col = pygame.Color(0, 0, 255, 100)
for sprite in self.walkableTiles:
pygame.draw.rect(game.screen, col, sprite.rect)
if self.neighborNodes:
#logToFile("len(neighborNodes)=%s" %len(self.neighborNodes))
col = pygame.Color(0, 255, 255, 80)
for node in self.neighborNodes:
pygame.draw.rect(game.screen, col, node.sprite.rect)
if self.drawpath:
col = pygame.Color(0, 255, 0, 120)
for sprite in self.drawpath[1:-1]:
pygame.draw.rect(game.screen, col, sprite.rect)
def _setDebugVariables(self, world, path):
'''
Sets the variables required for debug drawing of the paths
resulting from the A-Star search.
'''
path_sprites = [node.sprite for node in path]
self.walkableTiles = world.get_walkable_tiles()
self.neighborNodes = world.neighbor_nodes_of_sprite(self)
self.drawpath = path_sprites
def update(self, game):
VGDLSprite.update(self, game)
world = AStarWorld(game)
path = world.getMoveFor(self)
# Uncomment below to draw debug paths.
# self._setDebugVariables(world,path)
if len(path)>1:
move = path[1]
nextX, nextY = world.get_sprite_tile_position(move.sprite)
nowX, nowY = world.get_sprite_tile_position(self)
movement = None
if nowX == nextX:
if nextY > nowY:
#logToFile('DOWN')
movement = DOWN
else:
#logToFile('UP')
movement = UP
else:
if nextX > nowX:
#logToFile('RIGHT')
movement = RIGHT
else:
#logToFile('LEFT')
movement = LEFT
self.physics.activeMovement(self, movement)
```
#### File: py-vgdl/vgdl/parser.py
```python
from .tools import Node, indentTreeParser
from .core import BasicGame
from vgdl import registry, ontology
from vgdl.core import BasicGame, SpriteRegistry
registry.register_all(ontology)
registry.register_class(BasicGame)
class VGDLParser:
""" Parses a string into a Game object. """
verbose = False
def parseGame(self, tree, **kwargs):
""" Accepts either a string, or a tree. """
if not isinstance(tree, Node):
tree = indentTreeParser(tree).children[0]
sclass, args = self._parseArgs(tree.content)
args.update(kwargs)
# BasicGame construction
self.sprite_registry = SpriteRegistry()
self.game = sclass(self.sprite_registry, **args)
for c in tree.children:
if c.content == "SpriteSet":
self.parseSprites(c.children)
if c.content == "InteractionSet":
self.parseInteractions(c.children)
if c.content == "LevelMapping":
self.parseMappings(c.children)
if c.content == "TerminationSet":
self.parseTerminations(c.children)
return self.game
def _eval(self, estr):
"""
Whatever is visible in the global namespace (after importing the ontologies)
can be used in the VGDL, and is evaluated.
"""
# Classes and functions etc are registered with the global registry
if estr in registry:
return registry.request(estr)
else:
# Strings and numbers should just be interpreted
return eval(estr)
def parseInteractions(self, inodes):
for inode in inodes:
if ">" in inode.content:
pair, edef = [x.strip() for x in inode.content.split(">")]
eclass, args = self._parseArgs(edef)
objs = [x.strip() for x in pair.split(" ") if len(x)>0]
for obj in objs[1:]:
self.game.collision_eff.append(tuple([objs[0], obj, eclass, args]))
if self.verbose:
print("Collision", pair, "has effect:", edef)
def parseTerminations(self, tnodes):
for tn in tnodes:
sclass, args = self._parseArgs(tn.content)
if self.verbose:
print("Adding:", sclass, args)
self.game.terminations.append(sclass(**args))
def parseSprites(self, snodes, parentclass=None, parentargs={}, parenttypes=[]):
for sn in snodes:
assert ">" in sn.content
key, sdef = [x.strip() for x in sn.content.split(">")]
sclass, args = self._parseArgs(sdef, parentclass, parentargs.copy())
stypes = parenttypes+[key]
if 'singleton' in args:
if args['singleton']==True:
self.game.singletons.append(key)
args = args.copy()
del args['singleton']
if len(sn.children) == 0:
if self.verbose:
print("Defining:", key, sclass, args, stypes)
self.sprite_registry.register_sprite_class(key, sclass, args, stypes)
if key in self.game.sprite_order:
# last one counts
self.game.sprite_order.remove(key)
self.game.sprite_order.append(key)
else:
self.parseSprites(sn.children, sclass, args, stypes)
def parseMappings(self, mnodes):
for mn in mnodes:
c, val = [x.strip() for x in mn.content.split(">")]
assert len(c) == 1, "Only single character mappings allowed."
# a char can map to multiple sprites
keys = [x.strip() for x in val.split(" ") if len(x)>0]
if self.verbose:
print("Mapping", c, keys)
self.game.char_mapping[c] = keys
def _parseArgs(self, s, sclass=None, args=None):
if not args:
args = {}
sparts = [x.strip() for x in s.split(" ") if len(x) > 0]
if len(sparts) == 0:
return sclass, args
if not '=' in sparts[0]:
sclass = self._eval(sparts[0])
sparts = sparts[1:]
for sp in sparts:
k, val = sp.split("=")
try:
args[k] = self._eval(val)
except:
args[k] = val
return sclass, args
```
#### File: 921kiyo/symbolic-rl/q_learning.py
```python
import gym
import gym_vgdl
import numpy as np
import time
# Q-learning
import itertools
import sys
import os.path
import pickle
from collections import defaultdict
from lib import plotting, py_asp, helper, induction, abduction
import config as cf
base_dir = os.path.dirname(os.path.abspath(__file__))
ACTION_SPACE = 4 # env.action_space.n
def make_epsilon_greedy_policy(Q, epsilon, nA):
def policy_fn(observation, episodes):
# new_epsilon = epsilon*(1/(episodes+1))
new_epsilon = epsilon
A = np.ones(nA, dtype=float)* new_epsilon/nA
best_action = np.argmax(Q[observation])
print("action_probs ", A[0])
print("best_action ", best_action)
A[best_action] += (1.0 - new_epsilon)
return A
return policy_fn
def run_experiment(env, Q, stats_test, i_episode, width, time_range):
policy = make_epsilon_greedy_policy(Q, 0, ACTION_SPACE)
current_state = env.reset()
current_state_int = helper.convert_state(current_state[1], current_state[0], width)
for t in range(time_range):
env.render()
# time.sleep(0.1)
print("running test.....", current_state_int)
action_probs = policy(current_state_int, 1)
action = np.argmax(action_probs)
next_state, reward, done, _ = env.step(action)
current_state_int = helper.convert_state(next_state[1], next_state[0], width)
if done:
reward = 10
else:
reward = reward - 1
print("reward here is ", reward)
print("i_episode here is ", i_episode)
# Update stats
# for i in range(i_episode-9, i_episode+1):
stats_test.episode_rewards[i_episode] += reward
stats_test.episode_lengths[i_episode] = t
if done:
break
def q_learning(env, num_episodes, discount_factor=1, alpha=0.5, epsilon=0.1):
"""
Args:
alpha: TD learning rate
"""
# height = env.unwrapped.game.height
width = env.unwrapped.game.width
Q = defaultdict(lambda: np.zeros(ACTION_SPACE))
# Q = defaultdict(lambda: np.random.rand(ACTION_SPACE))
# Q = defaultdict(lambda: np.ones(ACTION_SPACE))
goal_int = helper.convert_state(16, 1, width)
for i in range(3):
Q[goal_int][i] = 0
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes),
episode_runtime=np.zeros(num_episodes))
stats_test = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes),
episode_runtime=np.zeros(num_episodes))
policy = make_epsilon_greedy_policy(Q, epsilon, ACTION_SPACE)
for i_episode in range(num_episodes):
print("------------------------------")
start_total_runtime = time.time()
# Reset the env and pick the first action
previous_state = env.reset()
state_int = helper.convert_state(previous_state[1], previous_state[0], width)
for t in range(cf.TIME_RANGE):
env.render()
# time.sleep(0.1)
# Take a step
action_probs = policy(state_int, i_episode)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
# action = env.action_space.sample()
if(action == 4):
import ipdb; ipdb.set_trace()
# print("---------------------------------")
# 0: UP
# 1: DOWN
# 2: LEFT
# 3: RIGHT
next_state, reward, done, _ = env.step(action)
if done:
reward = 10
else:
reward = reward - 1
previous_state = next_state
next_state_int = helper.convert_state(next_state[1], next_state[0], width)
# Update stats
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
# TD Update
best_next_action = np.argmax(Q[next_state_int])
td_target = reward + discount_factor*Q[next_state_int][best_next_action]
td_delta = td_target - Q[state_int][action]
Q[state_int][action] += alpha * td_delta
if done:
# import ipdb; ipdb.set_trace()
break
previous_state = next_state
state_int = next_state_int
stats.episode_runtime[i_episode] += (time.time()-start_total_runtime)
run_experiment(env, Q, stats_test, i_episode, width, cf.TIME_RANGE)
return Q, stats, stats_test
env = gym.make('vgdl_experiment3_after-v0')
# env = gym.make('vgdl_experiment1-v0')
temp_dir = os.path.join(base_dir, "result_pkl/experiment3_q_q")
# import ipdb; ipdb.set_trace()
# Q, stats, stats_test = q_learning(env, 100)
for i in range(30):
Q, stats, stats_test = q_learning(env, 100)
plotting.store_stats(stats, temp_dir, "exp3_v{}".format(i))
plotting.store_stats(stats_test, temp_dir, "exp3_test_v{}".format(i))
# import ipdb; ipdb.set_trace()
# plotting.plot_episode_stats_test(stats, stats_test)
# plotting.plot_episode_stats(stats)
# plotting.plot_episode_stats_simple(stats)
# plotting.plot_episode_stats_simple(stats_test)
``` |
{
"source": "923132714/PyMarkdoownTool",
"score": 3
} |
#### File: PyMarkdoownTool/src/util.py
```python
import time
def date_to_str(date):
'''
时间格式转换从%Y/ %m/ %d 转换为 %Y-%m-%d
:param date:
:return:
'''
tempTime = time.strptime(date,'%Y/ %m/ %d')
resTime = time.strftime('%Y-%m-%d',tempTime)
return resTime
import re
import os
class FileName():
def __init__(self,path="",**kv):
self.filepath=path
if path!="":
self.read_filename()
items = ['prefix','suffix','name','ext','dir']
for i in items:
if i in kv.keys():
self.__dict__[i] = kv[i]
def read_filename(self,filename = ""):
'''
解析路径记录文件夹,前缀,中间名,后缀,扩展名
:param filename:
:return:
'''
if filename=="":
filename = self.filepath
self.dir = os.path.dirname(filename)
base_name, self.ext = os.path.splitext(os.path.basename(filename))
m = re.split(r'-', base_name)
self.prefix = '-'.join(m[:-1])
name = m[-1]
m = re.split(r'\.', name)
self.suffix = '.'.join(m[1:])
self.name = m[0]
# 后缀名判断
def is_ext_file(self,ext):
'''
后缀名判断
:param fileext:
:return:
'''
if (self.ext == self.ext):
return True;
return False;
@property
def path(self):
return self.dir + "\\" + self.prefix + "-" + self.name + "." + self.suffix + self.ext
@path.setter
def path(self, path):
self.read_filename(path)
@property
def name(self):
return self.name
@name.setter
def name(self, name):
self.name = name
@property
def prefix(self):
return self.prefix
@prefix.setter
def prefix(self, prefix):
self.prefix = prefix
@property
def suffix(self):
return self.suffix
@suffix.setter
def suffix(self, suffix):
self.suffix = suffix
@property
def ext(self):
return self.ext
@ext.setter
def ext(self, ext):
self.ext = ext
@property
def dir(self):
return self.dir
@dir.setter
def dir(self, dir):
self.dir = dir
if __name__ == '__main__':
path = 'E:\\WorkSpace\\page\\github_blog\\_posts\\Android\\2019-01-09-game.txt.md'
a = FileName(path)
print(a.get_path())
``` |
{
"source": "923132714/SerialReader",
"score": 2
} |
#### File: SerialReader/db/Memory.py
```python
class Memory:
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __init__(self):
pass
def commit_air_quality_data(self):
pass
def select_air_quality_data(self):
pass
```
#### File: SerialReader/db/MysqlDB.py
```python
import pymysql
from db.Memory import Memory
class MysqlDB(Memory):
# 连接配置信息
config = {
'host': '127.0.0.1',
'port': 3306, # MySQL默认端口
'user': 'logic', # mysql默认用户名
'password': '<PASSWORD>',
'db': 'logic', # 数据库
'charset': 'utf8mb4',
'cursorclass': pymysql.cursors.DictCursor,
}
def __init__(self):
pass
def __enter__(self):
"""
调用前链接数据库
:return: MysqlDB
"""
print("-----MysqlDB __enter__-----")
# connect database
# self.db = pymysql.connect("localhost", "logic", "logic", "logic")
self.db = pymysql.connect(**self.config)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print("-----MysqlDB __exit__-----")
print("type: ", exc_type)
print("val: ", exc_val)
print("tb: ", exc_tb)
self.db.close()
def commit_air_quality_data(self, serials_data):
'''
根据数据字典,调用write_row存储数据,成功则行加一
:param serials_data: 数据字典
:return: boolean 成功与否
'''
# insert statement
sql = """INSERT INTO fresh_air(pm2,pm10,temp,humi,addr,time) \
values (%s, %s, %s, %s, %s, '%s' )""" % \
(serials_data['pm2.5'], serials_data["pm10"], \
serials_data["temp"], serials_data["humi"], \
serials_data["addr"], serials_data["time"])
print(sql)
try:
# 获取会话指针
with self.db.cursor() as cursor:
# execute sql
result = cursor.execute(sql)
# commit sql
self.db.commit()
return result
except:
print("something error ")
self.db.rollback()
return False
def select_air_quality_data(self, line=50):
serials_data ={}
sql = """SELECT id,pm2,pm10,temp,humi,addr,time
from fresh_air ORDER BY id DESC LIMIT %s""" %\
(line,)
try:
# 获取会话指针
with self.db.cursor() as cursor:
# 执行sql语句
conut = cursor.execute(sql) # 行数
print(conut)
# 查询数据
result = cursor.fetchall() # 查询所有数据
print(result)
# {'id': 93, 'pm2': 125.0, 'pm10': 143.0,
# 'temp': 28.0, 'humi': 23.0, 'addr': 15,
# 'time': datetime.datetime(2019, 2, 19, 13, 1, 32)}
return result[::-1]
except:
print("something error ")
self.db.rollback()
return False
if __name__ == '__main__':
with MysqlDB() as mydb:
mydb.select_air_quality_data()
``` |
{
"source": "925781609/flask_web",
"score": 3
} |
#### File: app/apps/views.py
```python
from . import apps
from random import randint
from flask import render_template, redirect, flash, url_for, session
from flask_wtf import Form
from wtforms import IntegerField, SubmitField
from wtforms.validators import Required, NumberRange
from ..models import Item
start_to_guess = 1
class GuessNumberForm(Form):
number = IntegerField(u'Please guess a number between 0 and 1000:',
validators=[Required(u'Please input a valid number'),
NumberRange(0, 1000, u'the number should be 0 - 1000') ])
submit = SubmitField(u'Submit')
@apps.route('/guess_number', methods=['GET', 'POST'])
def guess_number():
#Generate an integer between 0 and 1000, and save it in session
global start_to_guess
if start_to_guess:
session['number'] = randint(0, 1000)
session['times'] = 10
start_to_guess = 0
print('guess_number() was called')
times = session['times']
result = session.get('number')
form = GuessNumberForm()
if form.validate_on_submit():
print('times is %s' % times)
times -= 1
session['times'] = times
print("session['times'] is %s" % times)
if times == 0:
flash(u'Failed to guess the right number ....o(>_<)o')
flash(u'The real number is {}'.format(result))
start_to_guess = 1
return redirect(url_for('main.index'))
answer = form.number.data
if answer > result:
flash(u'Too big! you still have %s times chance' % times)
elif answer < result:
flash(u'Too small! you still have %s times chance' % times)
else:
flash(u'You win')
start_to_guess = 1
return redirect(url_for('main.index'))
return render_template('apps/guess.html', form=form)
@apps.route('/todo', methods=['GET', 'POST'])
def todo_list():
flash(u'todo_list view function was called')
item = Item()
return render_template('apps/todo.html', item = item)
@apps.route('/edit_list/<int:id>', methods=['GET', 'POST'])
def edit_list():
flash(u'todo_list view function was called')
item = Item()
return render_template('apps/todo.html', item = item)
``` |
{
"source": "927589452/bumblebee-status",
"score": 3
} |
#### File: bumblebee/modules/spaceapi.py
```python
import bumblebee.input
import bumblebee.output
import bumblebee.engine
import requests
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
super(Module, self).__init__(
engine, config, bumblebee.output.Widget(full_text=self.getState)
)
# Represents the state of the hackerspace
self._open = False
# Set to true if there was an error calling the spaceapi
self._error = False
# The URL representing the api endpoint
self._url = self.parameter("url",
default="http://club.entropia.de/spaceapi")
# Space Name, can be set manually in case of multiple widgets,
# so you're able to distinguish
self._name = self.parameter("name", default="")
# The timeout prevents the statusbar from blocking when the destination
# can't be reached.
self._timeout = self.parameter("timeout", default=2)
# Only execute every 5 minutes by default
self.interval(self.parameter("interval", default=5))
def getState(self, widget):
text = self.parameter("prefix", default="")
text += self._name + ": "
if self._error:
text += "ERROR"
elif self._open:
text += "Open"
else:
text += "Closed"
return text
def state(self, widget):
if self._error:
return ["critical"]
elif self._open:
return ["warning"]
else:
return []
def update(self, widgets):
try:
with requests.get(self._url, timeout=self.timeout) as u:
json = u.json()
self._open = json["state"]["open"]
self._name = self.parameter("name", default=json["space"])
self._error = False
except Exception:
# Displays ERROR status
self._error = True
# Author: <NAME> <<EMAIL>>
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
``` |
{
"source": "928606496/SmartStoreSystem",
"score": 3
} |
#### File: 928606496/SmartStoreSystem/CustomersInformationWindow.py
```python
import sys
from PIL import ImageGrab
from PyQt5.QtWidgets import QWidget, QPushButton, QApplication
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PIL import ImageGrab
import os
from Database import Database
class CustomersInformationWindow(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
db = Database()
#UI
self.setGeometry(500, 400, 1000, 800)
self.setWindowIcon(QIcon('./Pictures/Customers.ico'))
self.resize(1200,500)
self.setWindowTitle('Customers information')
items = db.getAll()
self.informationLabels = []
i = 0
for item in items:
self.informationLabels.append([QLabel(self),QLabel(self),QLabel(self),QLabel(self),QLabel(self),QLabel(self)])
self.informationLabels[i][0].setText(str(item[0] + 100000))
self.informationLabels[i][1].setText(str(item[1]))
self.informationLabels[i][2].setText(item[2])
self.informationLabels[i][3].setText(item[3])
self.informationLabels[i][4].setText(item[4])
self.informationLabels[i][5].setText(item[5])
self.informationLabels[i][0].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][1].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][2].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][3].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][4].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][5].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][0].move(0,50 + i * 50)
self.informationLabels[i][1].move(200,50 + i * 50)
self.informationLabels[i][2].move(400,50 + i * 50)
self.informationLabels[i][3].move(600,50 + i * 50)
self.informationLabels[i][4].move(800,50 + i * 50)
self.informationLabels[i][5].move(1000,50 + i * 50)
i += 1
#Buttons
# btn = QPushButton("Load",self)
# btn.clicked.connect(self.register)
# btn.resize(btn.sizeHint())
# btn.move(150,350)
#Editors' Labels
self.show1Label = QLabel(self)
self.show2Label = QLabel(self)
self.show3Label = QLabel(self)
self.show4Label = QLabel(self)
self.show5Label = QLabel(self)
self.show6Label = QLabel(self)
self.show1Label.setText("ID")
self.show2Label.setText("Age")
self.show3Label.setText("Name")
self.show4Label.setText("Sex")
self.show5Label.setText("Telephone")
self.show6Label.setText("Consumption")
self.show1Label.move(0,0)
self.show2Label.move(200,0)
self.show3Label.move(400,0)
self.show4Label.move(600,0)
self.show5Label.move(800,0)
self.show6Label.move(1000,0)
self.show1Label.setFont(QFont("Microsoft Yahei",12))
self.show2Label.setFont(QFont("Microsoft Yahei",12))
self.show3Label.setFont(QFont("Microsoft Yahei",12))
self.show4Label.setFont(QFont("Microsoft Yahei",12))
self.show5Label.setFont(QFont("Microsoft Yahei",12))
self.show6Label.setFont(QFont("Microsoft Yahei",12))
def refresh(self):
db = Database()
items = db.getAll()
i = 0
for item in items:
if i >= len(self.informationLabels):
self.informationLabels.append([QLabel(self),QLabel(self),QLabel(self),QLabel(self),QLabel(self),QLabel(self)])
self.informationLabels[i][0].setText(str(item[0] + 100000))
self.informationLabels[i][1].setText(str(item[1]))
self.informationLabels[i][2].setText(item[2])
self.informationLabels[i][3].setText(item[3])
self.informationLabels[i][4].setText(item[4])
self.informationLabels[i][5].setText(item[5])
self.informationLabels[i][0].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][1].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][2].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][3].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][4].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][5].setFont(QFont("Microsoft Yahei",12))
self.informationLabels[i][0].move(0,50 + i * 50)
self.informationLabels[i][1].move(200,50 + i * 50)
self.informationLabels[i][2].move(400,50 + i * 50)
self.informationLabels[i][3].move(600,50 + i * 50)
self.informationLabels[i][4].move(800,50 + i * 50)
self.informationLabels[i][5].move(1000,50 + i * 50)
i += 1
```
#### File: 928606496/SmartStoreSystem/DrawChart.py
```python
import matplotlib.pyplot as plt
import pygal
class DrawChart():
def __init__(self,db):
self.db = db
def draw(self):
items = self.db.flowrate_select()
x_values = list(range(0,24))
y_values = []
for x in x_values:
y_values.append(0)
for item in items:
value = item[2][11:13].replace(':','')
temp = int(value)
y_values[temp] += item[1]
chart = pygal.Bar()
chart.title = "SmartStore24-hours flowrate"
chart.x_labels = x_values
chart.add('Number of customer',y_values)
chart.render_to_file('Flowrate_Chart.svg')
``` |
{
"source": "92swhite/example-api-scrapper",
"score": 3
} |
#### File: api-scraper/helpers/api_handler.py
```python
import os
import logging
import requests
from typing import Dict, Iterator, Tuple, Any
class ApiHandler:
def __init__(self, testing: bool = False) -> None:
self.session = requests.session()
self.token = self.__get_token(testing)
self.headers = self.__make_headers()
self.base_url = "https://api.spotify.com/v1/"
def __get_token(self, testing: bool) -> str:
if testing:
return self.__get_testing_token()
else:
return self.__get_token_from_request()
def __get_testing_token(self) -> str:
try:
logging.info('USING "API_TESTING_TOKEN"')
return os.environ["API_TESTING_TOKEN"]
except KeyError:
logging.warning('MISSING "API_TESTING_TOKEN" ENV VAR')
return self.__get_token_from_request()
def __get_token_from_request(self) -> str:
url = f"https://accounts.spotify.com/api/token"
payload = self.__make_token_payload()
response = self.session.post(url, data=payload)
logging.debug(f"TOKEN RESPONSE CODE: {response.status_code}")
return response.json()["access_token"]
def __make_token_payload(self) -> Dict[str, str]:
return {
"grant_type": "client_credentials",
"client_id": os.environ["API_CLIENT_ID"],
"client_secret": os.environ["API_CLIENT_SECRET"],
}
def __make_headers(self) -> Dict[str, str]:
return {
"Authorization": f"Bearer {self.token}",
"Content-Type": "application/json",
}
def __get_endpoint_results(self, url: str) -> Dict[str, Any]:
response = self.session.get(url, headers=self.headers)
return response.json()
def __parse_offset_and_limit(self, url: str) -> Tuple[int, ...]:
if url is None:
return (0, 0)
else:
parsed = url.split("&")[-2:]
map_parsed = map(lambda x: int(x.split("=")[-1]), parsed)
return tuple(map_parsed)
def get_new_releases(
self, country: str = "US", limit: int = 20, offset: int = 0
) -> Iterator[Dict[str, Any]]:
url = (
f"{self.base_url}"
f"browse/new-releases?country={country}&offset={offset}&limit={limit}"
)
logging.info("PULLING NEW RELEASES...")
while url is not None:
logging.debug(f"NEW RELEASES URL: {url}")
logging.info(f"\t{offset} - {offset + limit}")
result = self.__get_endpoint_results(url)
url = result["albums"]["next"]
offset, limit = self.__parse_offset_and_limit(url)
yield result["albums"]["items"]
``` |
{
"source": "92xianshen/PyCRFbyBilateralGrid",
"score": 3
} |
#### File: 92xianshen/PyCRFbyBilateralGrid/inference.py
```python
import numpy as np
from high_dim_filter import HighDimFilter
from PIL import Image
import matplotlib.pyplot as plt
import cv2
'''
CRF inference
Is channel-last or will be transferred to channel-last
'''
def unary_from_labels(labels, n_labels, gt_prob, zero_unsure=True):
"""
Simple classifier that is 50% certain that the annotation is correct.
(same as in the inference example).
Parameters
----------
labels: numpy.array
The label-map, i.e. an array of your data's shape where each unique
value corresponds to a label.
n_labels: int
The total number of labels there are.
If `zero_unsure` is True (the default), this number should not include
`0` in counting the labels, since `0` is not a label!
gt_prob: float
The certainty of the ground-truth (must be within (0,1)).
zero_unsure: bool
If `True`, treat the label value `0` as meaning "could be anything",
i.e. entries with this value will get uniform unary probability.
If `False`, do not treat the value `0` specially, but just as any
other class.
"""
assert 0 < gt_prob < 1, "`gt_prob must be in (0,1)."
labels = labels.flatten()
n_energy = -np.log((1.0 - gt_prob) / (n_labels - 1))
p_energy = -np.log(gt_prob)
# Note that the order of the following operations is important.
# That's because the later ones overwrite part of the former ones, and only
# after all of them is `U` correct!
U = np.full((n_labels, len(labels)), n_energy, dtype='float32')
U[labels - 1 if zero_unsure else labels, np.arange(U.shape[1])] = p_energy
# Overwrite 0-labels using uniform probability, i.e. "unsure".
if zero_unsure:
U[:, labels == 0] = -np.log(1.0 / n_labels)
return U
def _diagonal_compatibility(shape):
return np.eye(shape[0], shape[1], dtype=np.float32)
def _potts_compatibility(shape):
return -1 * _diagonal_compatibility(shape)
def _softmax(x):
e_x = np.exp(x - x.max(axis=-1, keepdims=True))
return e_x / e_x.sum(axis=-1, keepdims=True)
def inference(image, unary, num_classes, theta_alpha, theta_beta, theta_gamma, spatial_compat, bilateral_compat, num_iterations, Q):
# Check if `image` is three-channel and (h, w, 3)
assert image.ndim == 3 and image.shape[-1] == 3
# Check if data scale of `image` is [0, 1]
assert image.max() <= 1. and image.min() >= 0.
# Check if theta_beta is float and < 1
assert theta_beta <= 1.
height, width, _ = image.shape # channel-last
# Storage
spatial_weights = spatial_compat * _diagonal_compatibility((num_classes, num_classes))
bilateral_weights = bilateral_compat * _diagonal_compatibility((num_classes, num_classes))
compatibility_matrix = _potts_compatibility((num_classes, num_classes))
all_ones = np.ones((height, width, num_classes), dtype=np.float32)
spatial_norm_vals = np.zeros_like(all_ones)
bilateral_norm_vals = np.zeros_like(all_ones)
spatial_out = np.zeros_like(all_ones)
bilateral_out = np.zeros_like(all_ones)
# Spatial and bilateral high-dim filters
spatial_high_dim_filter = HighDimFilter(is_bilateral=False, height=height, width=width, space_sigma=theta_gamma)
bilateral_high_dim_filter = HighDimFilter(is_bilateral=True, height=height, width=width, space_sigma=theta_alpha, range_sigma=theta_beta)
# Initialize high-dim filters
spatial_high_dim_filter.init()
bilateral_high_dim_filter.init(image)
# Compute symmetric weight
spatial_high_dim_filter.compute(all_ones, spatial_norm_vals)
bilateral_high_dim_filter.compute(all_ones, bilateral_norm_vals)
spatial_norm_vals = 1. / (spatial_norm_vals ** .5 + 1e-20)
bilateral_norm_vals = 1. / (bilateral_norm_vals ** .5 + 1e-20)
# Initialize Q
Q[:] = _softmax(-unary)
for i in range(num_iterations):
print('iter {}'.format(i))
tmp1 = -unary
# Symmetric normalization and spatial message passing
# Message passing - spatial
spatial_high_dim_filter.compute(Q * spatial_norm_vals, spatial_out)
spatial_out *= spatial_norm_vals
# Message passing - bilateral
bilateral_high_dim_filter.compute(Q * bilateral_norm_vals, bilateral_out)
bilateral_out *= bilateral_norm_vals
# Message passing
message_passing = spatial_out.reshape((-1, num_classes)).dot(spatial_weights) + bilateral_out.reshape((-1, num_classes)).dot(bilateral_weights)
# Compatibility transform
pairwise = message_passing.dot(compatibility_matrix)
pairwise = pairwise.reshape((height, width, num_classes))
# Local update
tmp1 -= pairwise
# Normalize
Q[:] = _softmax(tmp1)
if __name__ == "__main__":
img = cv2.imread('examples/im2.png')
anno_rgb = cv2.imread('examples/anno2.png').astype(np.uint32)
anno_lbl = anno_rgb[:,:,0] + (anno_rgb[:,:,1] << 8) + (anno_rgb[:,:,2] << 16)
colors, labels = np.unique(anno_lbl, return_inverse=True)
HAS_UNK = 0 in colors
if HAS_UNK:
print("Found a full-black pixel in annotation image, assuming it means 'unknown' label, and will thus not be present in the output!")
print("If 0 is an actual label for you, consider writing your own code, or simply giving your labels only non-zero values.")
colors = colors[1:]
colorize = np.empty((len(colors), 3), np.uint8)
colorize[:,0] = (colors & 0x0000FF)
colorize[:,1] = (colors & 0x00FF00) >> 8
colorize[:,2] = (colors & 0xFF0000) >> 16
n_labels = len(set(labels.flat)) - int(HAS_UNK)
print(n_labels, " labels", (" plus \"unknown\" 0: " if HAS_UNK else ""), set(labels.flat))
unary = unary_from_labels(labels, n_labels, 0.7, HAS_UNK)
unary = np.rollaxis(unary.reshape(n_labels, *img.shape[:2]), 0, 3)
pred = np.zeros_like(unary)
inference(img / 255., unary, n_labels, theta_alpha=80., theta_beta=.0625, theta_gamma=3., spatial_compat=3., bilateral_compat=10., num_iterations=10, Q=pred)
MAP = np.argmax(pred, axis=-1)
plt.imshow(MAP)
plt.show()
``` |
{
"source": "92xianshen/PyDCRF",
"score": 2
} |
#### File: 92xianshen/PyDCRF/high_dim_filter_loader.py
```python
import numpy as np
from cython.permutohedral_lattice import permutohedral_lattice_filter
def _compute_spatial_kernel(height, width, theta_gamma):
positions = np.zeros((height, width, 2), dtype='float32')
for r in range(height):
for c in range(width):
positions[r, c, 0] = c / theta_gamma
positions[r, c, 1] = r / theta_gamma
return positions
def _compute_bilateral_kernel(img, theta_alpha, theta_beta):
height, width = img.shape[0], img.shape[1]
positions = np.zeros((height, width, 5), dtype='float32')
for r in range(height):
for c in range(width):
positions[r, c, 0] = c / theta_alpha
positions[r, c, 1] = r / theta_alpha
positions[r, c, 2] = img[r, c, 0] / theta_beta
positions[r, c, 3] = img[r, c, 1] / theta_beta
positions[r, c, 4] = img[r, c, 2] / theta_beta
return positions
def spatial_high_dim_filter(inp, theta_gamma):
height, width, _ = inp.shape
print('Computing spatial kernel...')
positions = _compute_spatial_kernel(height, width, theta_gamma)
print('Spatial kernel computed.')
print('High order filtering...')
out = permutohedral_lattice_filter(inp, positions)
print('High order filtered.')
return out
def bilateral_high_dim_filter(inp, img, theta_alpha, theta_beta):
print('Computing bilateral kernel...')
positions = _compute_bilateral_kernel(img, theta_alpha, theta_beta)
print('Bilateral kernel filtered.')
print('High order filtering...')
out = permutohedral_lattice_filter(inp, positions)
print('High order filtered.')
return out
``` |
{
"source": "92xianshen/PyFastBilateralFilter",
"score": 2
} |
#### File: 92xianshen/PyFastBilateralFilter/NaiveNLinearInterpolation.py
```python
# # Shape (h x w x c, 2) because it is `data`, idx 0 is input, idx 1 is weight
# array = array.reshape((-1, ))
# return \
# (1. - x_alpha) * (1. - y_alpha) * (1. - r_alpha) * (1. - g_alpha) * (1. - b_alpha) * array[yxrgb_idx] + \
# x_alpha * (1. - y_alpha) * (1. - r_alpha) * (1. - g_alpha) * (1. - b_alpha) * array[yxxrgb_idx] + \
# (1. - x_alpha) * y_alpha * (1. - r_alpha) * (1. - g_alpha) * (1. - b_alpha) * array[yyxrgb_idx] + \
# x_alpha * y_alpha * (1. - r_alpha) * (1. - g_alpha) * (1. - b_alpha) * array[yyxxrgb_idx] + \
# (1. - x_alpha) * (1. - y_alpha) * r_alpha * (1. - g_alpha) * (1. - b_alpha) * array[yxrrgb_idx] + \
# x_alpha * (1. - y_alpha) * r_alpha * (1. - g_alpha) * (1. - b_alpha) * array[yxxrrgb_idx] + \
# (1. - x_alpha) * y_alpha * r_alpha * (1. - g_alpha) * (1. - b_alpha) * array[yyxrrgb_idx] + \
# x_alpha * y_alpha * r_alpha * (1. - g_alpha) * (1. - b_alpha) * array[yyxxrrgb_idx] + \
# (1. - x_alpha) * (1. - y_alpha) * (1. - r_alpha) * g_alpha * (1. - b_alpha) * array[yxrggb_idx] + \
# x_alpha * (1. - y_alpha) * (1. - r_alpha) * g_alpha * (1. - b_alpha) * array[yxxrggb_idx] + \
# (1. - x_alpha) * y_alpha * (1. - r_alpha) * g_alpha * (1. - b_alpha) * array[yyxrggb_idx] + \
# x_alpha * y_alpha * (1. - r_alpha) * g_alpha * (1. - b_alpha) * array[yyxxrggb_idx] + \
# (1. - x_alpha) * (1. - y_alpha) * r_alpha * g_alpha * (1. - b_alpha) * array[yxrrggb_idx] + \
# x_alpha * (1. - y_alpha) * r_alpha * g_alpha * (1. - b_alpha) * array[yxxrrggb_idx] + \
# (1. - x_alpha) * y_alpha * r_alpha * g_alpha * (1. - b_alpha) * array[yyxrrggb_idx] + \
# x_alpha * y_alpha * r_alpha * g_alpha * (1. - b_alpha) * array[yyxxrrggb_idx] + \
# (1. - x_alpha) * (1. - y_alpha) * (1. - r_alpha) * (1. - g_alpha) * b_alpha * array[yxrgbb_idx] + \
# x_alpha * (1. - y_alpha) * (1. - r_alpha) * (1. - g_alpha) * b_alpha * array[yxxrgbb_idx] + \
# (1. - x_alpha) * y_alpha * (1. - r_alpha) * (1. - g_alpha) * b_alpha * array[yyxrgbb_idx] + \
# x_alpha * y_alpha * (1. - r_alpha) * (1. - g_alpha) * b_alpha * array[yyxxrgbb_idx] + \
# (1. - x_alpha) * (1. - y_alpha) * r_alpha * (1. - g_alpha) * b_alpha * array[yxrrgbb_idx] + \
# x_alpha * (1. - y_alpha) * r_alpha * (1. - g_alpha) * b_alpha * array[yxxrrgbb_idx] + \
# (1. - x_alpha) * y_alpha * r_alpha * (1. - g_alpha) * b_alpha * array[yyxrrgbb_idx] + \
# x_alpha * y_alpha * r_alpha * (1. - g_alpha) * b_alpha * array[yyxxrrgbb_idx] + \
# (1. - x_alpha) * (1. - y_alpha) * (1. - r_alpha) * g_alpha * b_alpha * array[yxrggbb_idx] + \
# x_alpha * (1. - y_alpha) * (1. - r_alpha) * g_alpha * b_alpha * array[yxxrggbb_idx] + \
# (1. - x_alpha) * y_alpha * (1. - r_alpha) * g_alpha * b_alpha * array[yyxrggbb_idx] + \
# x_alpha * y_alpha * (1. - r_alpha) * g_alpha * b_alpha * array[yyxxrggbb_idx] + \
# (1. - x_alpha) * (1. - y_alpha) * r_alpha * g_alpha * b_alpha * array[yxrrggbb_idx] + \
# x_alpha * (1. - y_alpha) * r_alpha * g_alpha * b_alpha * array[yxxrrggbb_idx] + \
# (1. - x_alpha) * y_alpha * r_alpha * g_alpha * b_alpha * array[yyxrrggbb_idx] + \
# x_alpha * y_alpha * r_alpha * g_alpha * b_alpha * array[yyxxrrggbb_idx]
``` |
{
"source": "92xianshen/refined-unet-v4",
"score": 2
} |
#### File: refined-unet-v4/model/CRFLayer.py
```python
import sys
import numpy as np
import tensorflow as tf
from model.spatial_filter_factory import spatial_high_dim_filter
from model.bilateral_filter_factory import bilateral_high_dim_filter
# from spatial_filter_factory import spatial_high_dim_filter
# from filter_factory.bilateral_filter_factory import bilateral_high_dim_filter
def _diagonal_compatibility(shape):
return tf.eye(shape[0], shape[1], dtype=np.float32)
def _potts_compatibility(shape):
return -1 * _diagonal_compatibility(shape)
class CRFLayer(tf.keras.layers.Layer):
""" A layer implementing CRF """
def __init__(self, num_classes, theta_alpha, theta_beta, theta_gamma, spatial_compat, bilateral_compat, num_iterations):
super(CRFLayer, self).__init__()
self.num_classes = num_classes
self.theta_alpha, self.theta_beta = theta_alpha, theta_beta
self.theta_gamma = theta_gamma
self.num_iterations = num_iterations
self.spatial_weights = spatial_compat * _diagonal_compatibility((num_classes, num_classes))
self.bilateral_weights = bilateral_compat * _diagonal_compatibility((num_classes, num_classes))
self.compatibility_matrix = _potts_compatibility((num_classes, num_classes))
def call(self, unary, image):
"""
The order of parameters: I, p
"""
assert len(image.shape) == 3 and len(unary.shape) == 3
unary_shape = tf.shape(unary)
height, width = unary_shape[0], unary_shape[1]
all_ones = tf.ones([height, width, self.num_classes], dtype=tf.float32)
# Compute symmetric weight
spatial_norm_vals = spatial_high_dim_filter(all_ones, height, width, space_sigma=self.theta_gamma)
bilateral_norm_vals = bilateral_high_dim_filter(all_ones, image, height, width, space_sigma=self.theta_alpha, range_sigma=self.theta_beta)
spatial_norm_vals = 1. / (spatial_norm_vals ** .5 + 1e-20)
bilateral_norm_vals = 1. / (bilateral_norm_vals ** .5 + 1e-20)
# Initialize Q
Q = tf.nn.softmax(-unary)
for i in range(self.num_iterations):
tmp1 = -unary
# Symmetric normalization and spatial message passing
spatial_out = spatial_high_dim_filter(Q * spatial_norm_vals, height, width, space_sigma=self.theta_gamma)
spatial_out *= spatial_norm_vals
# Symmetric normalization and bilateral message passing
bilateral_out = bilateral_high_dim_filter(Q * bilateral_norm_vals, image, height, width, space_sigma=self.theta_alpha, range_sigma=self.theta_beta)
bilateral_out *= bilateral_norm_vals
# Message passing
spatial_out = tf.reshape(spatial_out, [-1, self.num_classes])
spatial_out = tf.matmul(spatial_out, self.spatial_weights)
bilateral_out = tf.reshape(bilateral_out, [-1, self.num_classes])
bilateral_out = tf.matmul(bilateral_out, self.bilateral_weights)
message_passing = spatial_out + bilateral_out
# Compatibility transform
pairwise = tf.matmul(message_passing, self.compatibility_matrix)
pairwise = tf.reshape(pairwise, unary_shape)
# Local update
tmp1 -= pairwise
# Normalize
Q = tf.nn.softmax(tmp1)
return Q
``` |
{
"source": "92ypli/imgclsmob",
"score": 3
} |
#### File: chainercv2/models/preresnet_cifar.py
```python
__all__ = ['CIFARPreResNet', 'preresnet20_cifar10', 'preresnet20_cifar100', 'preresnet20_svhn', 'preresnet56_cifar10',
'preresnet56_cifar100', 'preresnet56_svhn', 'preresnet110_cifar10', 'preresnet110_cifar100',
'preresnet110_svhn', 'preresnet164bn_cifar10', 'preresnet164bn_cifar100', 'preresnet164bn_svhn',
'preresnet1001_cifar10', 'preresnet1001_cifar100', 'preresnet1001_svhn', 'preresnet1202_cifar10',
'preresnet1202_cifar100', 'preresnet1202_svhn']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv3x3, SimpleSequential
from .preresnet import PreResUnit, PreResActivation
class CIFARPreResNet(Chain):
"""
PreResNet model for CIFAR from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
classes=10):
super(CIFARPreResNet, self).__init__()
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
setattr(stage, "unit{}".format(j + 1), PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "post_activ", PreResActivation(
in_channels=in_channels))
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=8,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_preresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create PreResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def preresnet20_cifar10(classes=10, **kwargs):
"""
PreResNet-20 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar10", **kwargs)
def preresnet20_cifar100(classes=100, **kwargs):
"""
PreResNet-20 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar100",
**kwargs)
def preresnet20_svhn(classes=10, **kwargs):
"""
PreResNet-20 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_svhn", **kwargs)
def preresnet56_cifar10(classes=10, **kwargs):
"""
PreResNet-56 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar10", **kwargs)
def preresnet56_cifar100(classes=100, **kwargs):
"""
PreResNet-56 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar100",
**kwargs)
def preresnet56_svhn(classes=10, **kwargs):
"""
PreResNet-56 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_svhn", **kwargs)
def preresnet110_cifar10(classes=10, **kwargs):
"""
PreResNet-110 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar10",
**kwargs)
def preresnet110_cifar100(classes=100, **kwargs):
"""
PreResNet-110 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar100",
**kwargs)
def preresnet110_svhn(classes=10, **kwargs):
"""
PreResNet-110 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_svhn",
**kwargs)
def preresnet164bn_cifar10(classes=10, **kwargs):
"""
PreResNet-164(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar10",
**kwargs)
def preresnet164bn_cifar100(classes=100, **kwargs):
"""
PreResNet-164(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar100",
**kwargs)
def preresnet164bn_svhn(classes=10, **kwargs):
"""
PreResNet-164(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_svhn",
**kwargs)
def preresnet1001_cifar10(classes=10, **kwargs):
"""
PreResNet-1001 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar10",
**kwargs)
def preresnet1001_cifar100(classes=100, **kwargs):
"""
PreResNet-1001 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar100",
**kwargs)
def preresnet1001_svhn(classes=10, **kwargs):
"""
PreResNet-1001 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_svhn",
**kwargs)
def preresnet1202_cifar10(classes=10, **kwargs):
"""
PreResNet-1202 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar10",
**kwargs)
def preresnet1202_cifar100(classes=100, **kwargs):
"""
PreResNet-1202 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar100",
**kwargs)
def preresnet1202_svhn(classes=10, **kwargs):
"""
PreResNet-1202 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_svhn",
**kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
(preresnet20_cifar10, 10),
(preresnet20_cifar100, 100),
(preresnet20_svhn, 10),
(preresnet56_cifar10, 10),
(preresnet56_cifar100, 100),
(preresnet56_svhn, 10),
(preresnet110_cifar10, 10),
(preresnet110_cifar100, 100),
(preresnet110_svhn, 10),
(preresnet164bn_cifar10, 10),
(preresnet164bn_cifar100, 100),
(preresnet164bn_svhn, 10),
(preresnet1001_cifar10, 10),
(preresnet1001_cifar100, 100),
(preresnet1001_svhn, 10),
(preresnet1202_cifar10, 10),
(preresnet1202_cifar100, 100),
(preresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet20_cifar10 or weight_count == 272282)
assert (model != preresnet20_cifar100 or weight_count == 278132)
assert (model != preresnet20_svhn or weight_count == 272282)
assert (model != preresnet56_cifar10 or weight_count == 855578)
assert (model != preresnet56_cifar100 or weight_count == 861428)
assert (model != preresnet56_svhn or weight_count == 855578)
assert (model != preresnet110_cifar10 or weight_count == 1730522)
assert (model != preresnet110_cifar100 or weight_count == 1736372)
assert (model != preresnet110_svhn or weight_count == 1730522)
assert (model != preresnet164bn_cifar10 or weight_count == 1703258)
assert (model != preresnet164bn_cifar100 or weight_count == 1726388)
assert (model != preresnet164bn_svhn or weight_count == 1703258)
assert (model != preresnet1001_cifar10 or weight_count == 10327706)
assert (model != preresnet1001_cifar100 or weight_count == 10350836)
assert (model != preresnet1001_svhn or weight_count == 10327706)
assert (model != preresnet1202_cifar10 or weight_count == 19423834)
assert (model != preresnet1202_cifar100 or weight_count == 19429684)
assert (model != preresnet1202_svhn or weight_count == 19423834)
x = np.zeros((1, 3, 32, 32), np.float32)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
```
#### File: gluoncv2/models/resnet_cifar.py
```python
__all__ = ['CIFARResNet', 'resnet20_cifar10', 'resnet20_cifar100', 'resnet20_svhn', 'resnet56_cifar10',
'resnet56_cifar100', 'resnet56_svhn', 'resnet110_cifar10', 'resnet110_cifar100', 'resnet110_svhn',
'resnet164bn_cifar10', 'resnet164bn_cifar100', 'resnet164bn_svhn', 'resnet272bn_cifar10',
'resnet272bn_cifar100', 'resnet272bn_svhn', 'resnet542bn_cifar10', 'resnet542bn_cifar100',
'resnet542bn_svhn', 'resnet1001_cifar10', 'resnet1001_cifar100', 'resnet1001_svhn', 'resnet1202_cifar10',
'resnet1202_cifar100', 'resnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block
from .resnet import ResUnit
class CIFARResNet(HybridBlock):
"""
ResNet model for CIFAR from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resnet20_cifar10(classes=10, **kwargs):
"""
ResNet-20 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar10", **kwargs)
def resnet20_cifar100(classes=100, **kwargs):
"""
ResNet-20 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar100", **kwargs)
def resnet20_svhn(classes=10, **kwargs):
"""
ResNet-20 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_svhn", **kwargs)
def resnet56_cifar10(classes=10, **kwargs):
"""
ResNet-56 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar10", **kwargs)
def resnet56_cifar100(classes=100, **kwargs):
"""
ResNet-56 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar100", **kwargs)
def resnet56_svhn(classes=10, **kwargs):
"""
ResNet-56 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_svhn", **kwargs)
def resnet110_cifar10(classes=10, **kwargs):
"""
ResNet-110 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar10", **kwargs)
def resnet110_cifar100(classes=100, **kwargs):
"""
ResNet-110 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar100", **kwargs)
def resnet110_svhn(classes=10, **kwargs):
"""
ResNet-110 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_svhn", **kwargs)
def resnet164bn_cifar10(classes=10, **kwargs):
"""
ResNet-164(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar10", **kwargs)
def resnet164bn_cifar100(classes=100, **kwargs):
"""
ResNet-164(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar100", **kwargs)
def resnet164bn_svhn(classes=10, **kwargs):
"""
ResNet-164(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_svhn", **kwargs)
def resnet272bn_cifar10(classes=10, **kwargs):
"""
ResNet-272(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar10", **kwargs)
def resnet272bn_cifar100(classes=100, **kwargs):
"""
ResNet-272(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar100", **kwargs)
def resnet272bn_svhn(classes=10, **kwargs):
"""
ResNet-272(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_svhn", **kwargs)
def resnet542bn_cifar10(classes=10, **kwargs):
"""
ResNet-542(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar10", **kwargs)
def resnet542bn_cifar100(classes=100, **kwargs):
"""
ResNet-542(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar100", **kwargs)
def resnet542bn_svhn(classes=10, **kwargs):
"""
ResNet-272(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_svhn", **kwargs)
def resnet1001_cifar10(classes=10, **kwargs):
"""
ResNet-1001 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar10", **kwargs)
def resnet1001_cifar100(classes=100, **kwargs):
"""
ResNet-1001 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar100", **kwargs)
def resnet1001_svhn(classes=10, **kwargs):
"""
ResNet-1001 model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_svhn", **kwargs)
def resnet1202_cifar10(classes=10, **kwargs):
"""
ResNet-1202 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar10", **kwargs)
def resnet1202_cifar100(classes=100, **kwargs):
"""
ResNet-1202 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar100", **kwargs)
def resnet1202_svhn(classes=10, **kwargs):
"""
ResNet-1202 model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(resnet20_cifar10, 10),
(resnet20_cifar100, 100),
(resnet20_svhn, 10),
(resnet56_cifar10, 10),
(resnet56_cifar100, 100),
(resnet56_svhn, 10),
(resnet110_cifar10, 10),
(resnet110_cifar100, 100),
(resnet110_svhn, 10),
(resnet164bn_cifar10, 10),
(resnet164bn_cifar100, 100),
(resnet164bn_svhn, 10),
(resnet272bn_cifar10, 10),
(resnet272bn_cifar100, 100),
(resnet272bn_svhn, 10),
(resnet542bn_cifar10, 10),
(resnet542bn_cifar100, 100),
(resnet542bn_svhn, 10),
(resnet1001_cifar10, 10),
(resnet1001_cifar100, 100),
(resnet1001_svhn, 10),
(resnet1202_cifar10, 10),
(resnet1202_cifar100, 100),
(resnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet20_cifar10 or weight_count == 272474)
assert (model != resnet20_cifar100 or weight_count == 278324)
assert (model != resnet20_svhn or weight_count == 272474)
assert (model != resnet56_cifar10 or weight_count == 855770)
assert (model != resnet56_cifar100 or weight_count == 861620)
assert (model != resnet56_svhn or weight_count == 855770)
assert (model != resnet110_cifar10 or weight_count == 1730714)
assert (model != resnet110_cifar100 or weight_count == 1736564)
assert (model != resnet110_svhn or weight_count == 1730714)
assert (model != resnet164bn_cifar10 or weight_count == 1704154)
assert (model != resnet164bn_cifar100 or weight_count == 1727284)
assert (model != resnet164bn_svhn or weight_count == 1704154)
assert (model != resnet272bn_cifar10 or weight_count == 2816986)
assert (model != resnet272bn_cifar100 or weight_count == 2840116)
assert (model != resnet272bn_svhn or weight_count == 2816986)
assert (model != resnet542bn_cifar10 or weight_count == 5599066)
assert (model != resnet542bn_cifar100 or weight_count == 5622196)
assert (model != resnet542bn_svhn or weight_count == 5599066)
assert (model != resnet1001_cifar10 or weight_count == 10328602)
assert (model != resnet1001_cifar100 or weight_count == 10351732)
assert (model != resnet1001_svhn or weight_count == 10328602)
assert (model != resnet1202_cifar10 or weight_count == 19424026)
assert (model != resnet1202_cifar100 or weight_count == 19429876)
assert (model != resnet1202_svhn or weight_count == 19424026)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
``` |
{
"source": "937447974/YJProductOptimizing",
"score": 2
} |
#### File: 937447974/YJProductOptimizing/YJWeakImages.py
```python
import os
import re
import glob
import shutil
import time
depth = 5 # 查询深度,一个文件夹一级
imageFolder = "YJ/Assets.xcassets" # 图片资源文件夹
project = os.path.basename(rootPath) # 工程名
rootPath = os.getcwd() # 当前目录,工程根目录
tempPath = os.path.join(rootPath, "YJTempFolder") # 临时文件夹
weakPath = os.path.join(rootPath, "YJWeakImages") # 弱应用文件夹,存放已删除文件
# 主函数
def main():
# 创建文件夹
createFolder()
# 文件移除项目
sourceFolder = os.path.join(rootPath, imageFolder)
if not os.path.exists(sourceFolder) :
print '源文件%s不存在' % (sourceFolder)
return
shutil.move(sourceFolder, tempPath)
# 需要检索的图片文件
imagePaths = []
for i in range(depth):
imagePaths += glob.glob(os.path.join(tempPath, '%s*.imageset' % ('*/' * i)))
# 照片弱引用检索
weakImgs = []
for i in range(0, len(imagePaths)):
imagePath = imagePaths[i]
imageName = os.path.basename(imagePath)[:-9]
command = 'ag "%s" %s' % (imageName, project)
result = os.popen(command).read()
if result == '': # 弱引用
weakImgs.append(imagePath)
shutil.move(os.path.join(rootPath, imagePath), weakPath)
print 'remove %s' % (imagePaths[i])
# 文件移回项目
shutil.move(tempPath+'/'+os.path.basename(sourceFolder), sourceFolder[:-len(os.path.basename(sourceFolder))])
# 删除文件夹
removeFolder()
# 文档记录
textPath = 'YJWeakImages.txt'
text = '\n操作时间:%s \n\n删除文件:\n' % (time.strftime('%Y-%m-%d %X', time.localtime())).join(sorted(weakImgs))
os.system('echo "%s" > %s' % (text, textPath))
print 'weakImages result:%d' % (len(weakImgs))
# 创建文件夹
def createFolder():
# 临时文件夹
if os.path.exists(tempPath) :
shutil.rmtree(tempPath)
os.makedirs(tempPath)
# 弱引用文件夹
if os.path.exists(weakPath) :
shutil.rmtree(weakPath)
os.makedirs(weakPath)
# 移除文件夹
def removeFolder():
if os.path.exists(tempPath) :
shutil.rmtree(tempPath)
# 开始运行
if __name__ == '__main__':
main()
``` |
{
"source": "93jpark/CPS498-S20-Team1-raster-vision",
"score": 2
} |
#### File: torch_utils/chip_classification/data.py
```python
from os.path import join
from torchvision.transforms import Compose, ToTensor
from torch.utils.data import DataLoader
from rastervision.backend.torch_utils.data import DataBunch
from rastervision.backend.torch_utils.chip_classification.folder import (
ImageFolder)
def build_databunch(data_dir, img_sz, batch_sz, class_names):
num_workers = 4
train_dir = join(data_dir, 'train')
valid_dir = join(data_dir, 'valid')
aug_transform = Compose([ToTensor()])
transform = Compose([ToTensor()])
train_ds = ImageFolder(
train_dir, transform=aug_transform, classes=class_names)
valid_ds = ImageFolder(valid_dir, transform=transform, classes=class_names)
train_dl = DataLoader(
train_ds,
shuffle=True,
batch_size=batch_sz,
num_workers=num_workers,
drop_last=True,
pin_memory=True)
valid_dl = DataLoader(
valid_ds,
batch_size=batch_sz,
num_workers=num_workers,
pin_memory=True)
return DataBunch(train_ds, train_dl, valid_ds, valid_dl, class_names)
```
#### File: torch_utils/chip_classification/model.py
```python
from torchvision import models
from torch import nn
def get_model(model_arch, num_labels, pretrained=True):
model = getattr(models, model_arch)(pretrained=True, progress=True)
model.fc = nn.Linear(model.fc.in_features, num_labels)
return model
``` |
{
"source": "93lykevin/FlaskPythonForFSP",
"score": 3
} |
#### File: 93lykevin/FlaskPythonForFSP/flask_app.py
```python
from flask import Flask, redirect, render_template, request, url_for, jsonify
from flask_cors import CORS, cross_origin
from flask_sqlalchemy import SQLAlchemy
import json
import numpy
import pandas as pd
import sys
from stockxsdk import Stockx
stockx = Stockx()
app = Flask(__name__)
CORS(app)
app.config["DEBUG"] = True
# SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}".format(
# username="KevTLy",
# password="<PASSWORD>",
# hostname="KevTLy.mysql.pythonanywhere-services.com",
# databasename="KevTLy$comments",
# )
# app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
# app.config["SQLALCHEMY_POOL_RECYCLE"] = 299
# app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
# db = SQLAlchemy(app)
# class Comment(db.Model):
# __tablename__ = "comments"
# id = db.Column(db.Integer, primary_key=True)
# content = db.Column(db.String(4096))
# comments = []
# @app.route("/")
@app.route("/", methods=["GET", "POST"])
@cross_origin()
def searchStockx():
if request.method == 'POST':
search = request.form['search']
searchResult = stockx.search(search)
return jsonify(searchResult)
# return response.headers['Access-Control-Allow-Origin']
# searchText = request.get.text
# return jsonify(searchText)
# searchRes = jsonify(stockx.search(searchText))
# print(searchRes)
# return searchRes
return jsonify([])
# @app.route("/", methods=["GET"])
# @cross_origin()
# def stockx_get():
# return jsonify('in GET request')
``` |
{
"source": "93suhwan/python-sos",
"score": 4
} |
#### File: python-sos/src/ast.py
```python
class AST:
pass
class Lambda(AST):
"""lambda abstraction
syntax: fun var -> exp
"""
def __init__(self, var, exp):
self.var = var
self.exp = exp
class App(AST):
"""lambda application
syntax: left_exp right_exp
"""
def __init__(self, left_exp, right_exp):
self.left_exp = left_exp
self.right_exp = right_exp
class BinOp(AST):
"""binary operation
left_exp (iop | bop) right_exp
"""
def __init__(self, left_exp, op, right_exp):
self.left_exp = left_exp
self.op = op
self.right_exp = right_exp
class LetIn(AST):
"""local variable declaration
let var = exp in body
"""
def __init__(self, var, exp, body):
self.var = var
self.exp = exp
self.body = body
class If(AST):
"""conditional expression
if cond then exp1 else exp2
"""
def __init__(self, cond, exp1, exp2):
self.cond = cond
self.exp1 = exp1
self.exp2 = exp2
class While(AST):
"""while loop
while cond do block
"""
def __init__(self, cond, block):
self.cond = cond
self.block = block
class Ref(AST):
"""memory allocation
ref exp
"""
def __init__(self, exp):
self.exp = exp
class Bang(AST):
"""memory dereference
! exp
"""
def __init__(self, exp):
self.exp = exp
class Assign(AST):
"""assignment expression
left_exp := right_exp
"""
def __init__(self, left_exp, right_exp):
self.left_exp = left_exp
self.right_exp = right_exp
class Seq(AST):
"""sequence expression
left_exp ; right_exp
"""
def __init__(self, left_exp, right_exp):
self.left_exp = left_exp
self.right_exp = right_exp
class Num(AST):
"""integer expression"""
def __init__(self, num):
self.value = num
class Bool(AST):
"""boolean expression"""
def __init__(self, bvalue):
self.value = bvalue
class Loc(AST):
"""location"""
def __init__(self, loc):
self.value = loc
class Skip(AST):
"""skip"""
pass
class Var(AST):
"""variable"""
def __init__(self, var_name):
self.name = var_name
```
#### File: python-sos/src/printer.py
```python
from visitor import PrintVisitor
class Printer(PrintVisitor):
"""A simple pretty-printer class"""
def __init__(self):
self._indent = 0
def print_Lambda(self, node):
var_name = node.var.name
indent_prev = self._indent
self._indent = 0
exp_str = self.print(node.exp)
self._indent = indent_prev
return self.indent() + 'fun ' + var_name + ' -> ' + exp_str
def print_App(self, node):
indent_prev = self._indent
self._indent = 0
left_exp = self.print(node.left_exp)
right_exp = self.print(node.right_exp)
self._indent = indent_prev
return self.indent() + '(' + left_exp + ')(' + right_exp + ')'
def print_BinOp(self, node):
op = node.op
indent_prev = self._indent
self._indent = 0
left_str = self.print(node.left_exp)
right_str = self.print(node.right_exp)
self._indent = indent_prev
return self.indent() + left_str + ' ' + op + ' ' + right_str
def print_LetIn(self, node):
var_name = node.var.name
indent_prev = self._indent
self._indent = 0
exp_str = self.print(node.exp)
self._indent = indent_prev
body_str = self.print(node.body)
self._indent = indent_prev
return self.indent() + 'let ' + var_name + \
' = ' + exp_str + ' in\n' + body_str
def print_If(self, node):
indent_prev = self._indent
self._indent = 0
cond_str = self.print(node.cond)
self._indent = indent_prev + 1
exp1_str = self.print(node.exp1)
exp2_str = self.print(node.exp2)
self._indent = indent_prev
return self.indent() + 'if ' + cond_str + '\n' + \
self.indent() + 'then {\n' + exp1_str + '\n' + \
self.indent() + '} \n' + self.indent() + \
'else {\n' + exp2_str + '\n' + self.indent() + '}'
def print_While(self, node):
indent_prev = self._indent
self._indent = 0
cond_str = self.print(node.cond)
self._indent = indent_prev + 1
block_str = self.print(node.block)
self._indent = indent_prev
return self.indent() + 'while ' + cond_str + \
' do {\n' + self.indent() + block_str + \
'\n' + self.indent() + '}'
def print_Ref(self, node):
indent_prev = self._indent
self._indent = 0
exp = self.print(node.exp)
self._indent = indent_prev
return self.indent() + 'ref (' + exp + ')'
def print_Bang(self, node):
indent_prev = self._indent
self._indent = 0
var_name = self.print(node.exp)
self._indent = indent_prev
return self.indent() + '!' + var_name
def print_Assign(self, node):
indent_prev = self._indent
self._indent = 0
var_name = self.print(node.left_exp)
exp_str = self.print(node.right_exp)
self._indent = indent_prev
return self.indent() + var_name + ' := ' + exp_str
def print_Seq(self, node):
indent_prev = self._indent
self._indent = 0
left_exp_str = self.print(node.left_exp)
self._indent = indent_prev
right_exp_str = self.print(node.right_exp)
self._indent = indent_prev
return self.indent() + left_exp_str + ';\n' + \
right_exp_str
def print_Num(self, node):
return str(node.value)
def print_Bool(self, node):
return str(node.value)
def print_Var(self, node):
return node.name
def indent(self):
return ' ' * self._indent
def write(self, node):
if node is not None:
print(self.print(node))
else:
raise Exception('Input AST is None')
``` |
{
"source": "93suhwan/RepCoder",
"score": 3
} |
#### File: RepCoder/dsl/function.py
```python
from dsl.value import Value, IntValue, ListValue, NULLVALUE
from dsl.types import FunctionType
import params
class OutputOutOfRangeError(Exception):
pass
class NullInputError(Exception):
pass
def in_range(val):
if isinstance(val, IntValue):
val = ListValue([val.val])
for x in val.val:
if x < params.integer_min or x > params.integer_max:
return False
return True
class Function(Value):
def __init__(self, name, f, input_type, output_type):
super(Function, self).__init__(f, FunctionType(input_type, output_type))
self.name = name
def __call__(self, *args):
for arg in args:
if arg == NULLVALUE:
raise NullInputError('{}({})'.format(self.name, args))
raw_args = [x.val for x in args]
output_raw = self.val(*raw_args)
output_val = Value.construct(output_raw, self.output_type)
if output_val != NULLVALUE and not in_range(output_val):
raise OutputOutOfRangeError('{}({})'.format(self.name, args))
return output_val
@property
def input_type(self):
return self.type.input_type
@property
def output_type(self):
return self.type.output_type
def __eq__(self, other):
if not isinstance(other, Function):
return False
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __repr__(self):
return str(self)
def __str__(self):
return self.name
```
#### File: RepCoder/dsl/types.py
```python
class PrimitiveType(object):
def __init__(self, name):
self.name = name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def __str__(self):
return self.name
def __repr__(self):
return self.name
INT = PrimitiveType('INT')
BOOL = PrimitiveType('BOOL')
LIST = PrimitiveType('LIST')
NULLTYPE = PrimitiveType('NULL')
class FunctionType(PrimitiveType):
def __init__(self, input_type, output_type):
name = 'F(' + str(input_type) + ', ' + str(output_type) + ')'
super(FunctionType, self).__init__(name)
self.input_type = input_type
self.output_type = output_type
# iterable
self.input_types = (input_type,) if not isinstance(input_type, tuple) else input_type
```
#### File: RepCoder/env/operator.py
```python
from dsl.impl import HIGHER_ORDER_FUNCTIONS, FIRST_ORDER_FUNCTIONS, LAMBDAS
def build_operator_space():
operators = []
for func in HIGHER_ORDER_FUNCTIONS:
for lambd in LAMBDAS:
if lambd.type == func.input_type[0]:
operators.append(Operator(func, lambd))
operators += [Operator(func) for func in FIRST_ORDER_FUNCTIONS]
return operators
class Operator(object):
"""
Represents a combination of function + lambda (or just function if the function does not receive a lambda).
This type is needed for the "function head" of the network.
"""
def __init__(self, function, lambd=None):
self.function = function
self.lambd = lambd
@staticmethod
def from_statement(statement):
if isinstance(statement.args[0], int):
return Operator(statement.function)
else:
return Operator(statement.function, statement.args[0])
def __repr__(self):
if self.lambd:
return "<Operator: %s %s>" % (self.function, self.lambd)
else:
return "<Operator: %s>" % self.function
def __eq__(self, other):
if not isinstance(other, Operator):
return False
return self.function == other.function and self.lambd == other.lambd
def __hash__(self):
return hash(str(self))
operator_space = build_operator_space()
num_operators = len(operator_space)
operator_to_index = dict([(func, indx) for indx, func in enumerate(operator_space)])
```
#### File: 93suhwan/RepCoder/makeErrorFile.py
```python
import env.operator as op
import itertools
import random
from dsl.impl import FIRST_ORDER_FUNCTIONS, HIGHER_ORDER_FUNCTIONS
import json
import copy
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input_path', type=str)
parser.add_argument('max_prog_leng', type=str)
args = parser.parse_args()
def cart(func, lambd):
lst = []
for element in itertools.product(func, lambd):
lst.append(','.join(element))
return lst
def argInt(func, maxLen):
lst = []
for length in range(1, maxLen + 1):
string = func + ',' + str(length)
lst.append(string)
return lst
def changeOp(progList, changedOp, length, changeNum):
if changeNum == 0:
return progList, sorted(changedOp)
while True:
rNum = random.randrange(0, length)
if rNum not in changedOp:
changeNum -= 1
changedOp.append(rNum)
break
for i in range(len(progList)):
if progList[i] == 'LIST' or progList[i] == 'INT':
rNum = rNum + 1
continue
else:
break
L2LOP = ['REVERSE', 'SORT'] \
+ cart(['MAP'], I2I) + cart(['FILTER'], I2B) + cart(['SCAN1L'], I2I2I) \
+ argInt('TAKE', rNum2) + argInt('DROP', rNum2)
L2IOP = ['HEAD', 'TAIL', 'MINIMUM', 'MAXIMUM', 'SUM'] \
+ cart(['COUNT'], I2B) + argInt('ACCESS', rNum2)
funcArgs = progList[rNum].split(',')
if funcArgs[0] in L2I:
if funcArgs[0] in ['ACCESS', 'COUNT']:
origFunc = funcArgs[0] + ',' + funcArgs[1]
funcArgs.remove(funcArgs[1])
else:
origFunc = funcArgs[0]
funcArgs.remove(funcArgs[0])
if origFunc in L2IOP:
L2IOP.remove(origFunc)
funcArgs = [L2IOP[random.randrange(0, len(L2IOP))]] + funcArgs
L2IOP.append(origFunc)
else:
funcArgs = [L2IOP[random.randrange(0, len(L2IOP))]] + funcArgs
elif funcArgs[0] in L2L:
if funcArgs[0] in ['REVERSE', 'SORT']:
origFunc = funcArgs[0]
else:
origFunc = funcArgs[0] + ',' + funcArgs[1]
funcArgs.remove(funcArgs[1])
funcArgs.remove(funcArgs[0])
if origFunc in L2LOP:
L2LOP.remove(origFunc)
funcArgs = [L2LOP[random.randrange(0, len(L2LOP))]] + funcArgs
L2LOP.append(origFunc)
else:
funcArgs = [L2LOP[random.randrange(0, len(L2LOP))]] + funcArgs
elif funcArgs[0] in L2L2L:
origFunc = funcArgs[0] + ',' + funcArgs[1]
funcArgs.remove(funcArgs[1])
funcArgs.remove(funcArgs[0])
if origFunc in L2L2LOP:
L2L2LOP.remove(origFunc)
funcArgs = [L2L2LOP[random.randrange(0, len(L2L2LOP))]] + funcArgs
L2L2LOP.append(origFunc)
else:
funcArgs = [L2L2LOP[random.randrange(0, len(L2L2LOP))]] + funcArgs
else:
print("Error")
progList[rNum] = ','.join(funcArgs)
return changeOp(progList, changedOp, length, changeNum)
L2I = ['HEAD', 'TAIL', 'MINIMUM', 'MAXIMUM', 'SUM', 'ACCESS', 'COUNT']
L2L = ['MAP', 'FILTER', 'SCAN1L', 'REVERSE', 'SORT', 'TAKE', 'DROP']
L2L2L = ['ZIPWITH']
I2I = ["+1", "-1", "*2", "/2", "*-1", "**2", "*3", "/3", "*4", "/4"]
I2B = [">0", "<0", "EVEN", "ODD"]
I2I2I = ["+", "-", "*", "max", "min"]
L2L2LOP = cart(L2L2L, I2I2I)
with open(args.input_path, 'r') as f:
lines = f.read().splitlines()
leng = ['one', 'two', 'three', 'four', 'five']
leng1 = ['six', 'seven', 'eight']
leng2 = ['nine', 'ten']
leng3 = ['eleven', 'twelve']
leng4 = ['thirteen', 'fourteen']
leng5 = ['fifteen']
leng6 = ['sixteen']
if args.max_prog_leng == '8':
leng += leng1
elif args.max_prog_leng == '10':
leng += leng1
leng += leng2
elif args.max_prog_leng == '12':
leng += leng1
leng += leng2
leng += leng3
elif args.max_prog_leng == '14':
leng += leng1
leng += leng2
leng += leng3
leng += leng4
elif args.max_prog_leng == '15':
leng += leng1
leng += leng2
leng += leng3
leng += leng4
leng += leng5
elif args.max_prog_leng == '16':
leng += leng1
leng += leng2
leng += leng3
leng += leng4
leng += leng5
leng += leng6
for i in range(len(lines)):
data = json.loads(lines[i].rstrip())
prog = data['program']
progList = prog.split('|')
for l in range(len(leng)):
progListArg = copy.deepcopy(progList)
results = changeOp(progListArg, [], int(args.max_prog_leng), l + 1)
data['program'], data['changedOp'] = '|'.join(results[0]), results[1]
with open('./data/changed_test_dataset_' + args.max_prog_leng + '_' + leng[l], 'a') as outFile:
outFile.write(json.dumps(data) + '\n')
```
#### File: RepCoder/scripts/train.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import json
import random
import torch
import multiprocessing
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
from torch.autograd import Variable
from torch import nn
from tqdm import tqdm
import params
from model.model import PCCoder
from cuda import use_cuda, LongTensor, FloatTensor
from env.env import ProgramEnv
from env.operator import Operator, operator_to_index
from env.statement import Statement, statement_to_index
from dsl.program import Program
from dsl.example import Example
learn_rate = 0.001
batch_size = 100
num_epochs = 40
test_iterator_size = 100
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_path', type=str, help='Path to data')
parser.add_argument('output_path', type=str, help='Output path of trained model')
parser.add_argument('--max_len', type=int, default=None,
help='Optional limit to the dataset size (usually for debugging)')
args = parser.parse_args()
train(args)
def generate_prog_data(line):
data = json.loads(line.rstrip())
examples = Example.from_line(data)
env = ProgramEnv(examples)
program = Program.parse(data['program'])
inputs = []
statements = []
drop = []
operators = []
for i, statement in enumerate(program.statements):
inputs.append(env.get_encoding())
# Translate absolute indices to post-drop indices
f, args = statement.function, list(statement.args)
for j, arg in enumerate(args):
if isinstance(arg, int):
args[j] = env.real_var_idxs.index(arg)
statement = Statement(f, args)
statements.append(statement_to_index[statement])
used_args = []
for next_statement in program.statements[i:]:
used_args += [x for x in next_statement.args if isinstance(x, int)]
to_drop = []
for j in range(params.max_program_vars):
if j >= env.num_vars or env.real_var_idxs[j] not in used_args:
to_drop.append(1)
else:
to_drop.append(0)
drop.append(to_drop)
operator = Operator.from_statement(statement)
operators.append(operator_to_index[operator])
if env.num_vars < params.max_program_vars:
env.step(statement)
else:
# Choose a random var (that is not used anymore) to drop.
env.step(statement, random.choice([j for j in range(len(to_drop)) if to_drop[j] > 0]))
return inputs, statements, drop, operators
def load_data(fileobj, max_len):
X = []
Y = []
Z = []
W = []
print("Loading dataset...")
lines = fileobj.read().splitlines()
if max_len is not None:
lines = lines[:max_len]
pool = multiprocessing.Pool()
res = list(tqdm(pool.imap(generate_prog_data, lines), total=len(lines)))
for input, target, to_drop, operators in res:
X += input
Y += target
Z += to_drop
W += operators
return np.array(X), np.array(Y), np.array(Z), np.array(W)
def train(args):
with open(args.input_path, 'r') as f:
data, statement_target, drop_target, operator_target = load_data(f, args.max_len)
model = PCCoder()
if use_cuda:
model.cuda()
model = nn.DataParallel(model)
# The cuda types are not used here on purpose - most GPUs can't handle so much memory
data, statement_target, drop_target, operator_target = torch.LongTensor(data), torch.LongTensor(statement_target), \
torch.FloatTensor(drop_target), torch.LongTensor(operator_target)
optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate)
statement_criterion = nn.CrossEntropyLoss()
drop_criterion = nn.BCELoss()
operator_criterion = nn.CrossEntropyLoss()
lr_sched = torch.optim.lr_scheduler.StepLR(optimizer, step_size=4)
dataset_size = data.shape[0]
indices = list(range(dataset_size))
random.shuffle(indices)
train_size = int(0.9 * dataset_size)
train_data = data[indices[:train_size]]
train_statement_target = statement_target[indices[:train_size]]
train_drop_target = drop_target[indices[:train_size]]
train_operator_target = operator_target[indices[:train_size]]
test_data = Variable(data[indices[train_size:]].type(LongTensor))
test_statement_target = Variable(statement_target[indices[train_size:]].type(LongTensor))
test_drop_target = Variable(drop_target[indices[train_size:]].type(FloatTensor))
test_operator_target = Variable(operator_target[indices[train_size:]].type(LongTensor))
train_dataset = TensorDataset(train_data, train_statement_target, train_drop_target, train_operator_target)
data_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
for epoch in range(num_epochs):
model.train()
print("Epoch %d" % epoch)
lr_sched.step()
statement_losses = []
drop_losses = []
operator_losses = []
for batch in tqdm(data_loader):
x = Variable(batch[0].type(LongTensor))
y = Variable(batch[1].type(LongTensor))
z = Variable(batch[2].type(FloatTensor))
w = Variable(batch[3].type(LongTensor))
optimizer.zero_grad()
pred_act, pred_drop, pred_operator = model(x)
statement_loss = statement_criterion(pred_act, y)
drop_loss = drop_criterion(pred_drop, z)
operator_loss = operator_criterion(pred_operator, w)
loss = statement_loss + operator_loss + drop_loss
statement_losses.append(statement_loss.item())
drop_losses.append(drop_loss.item())
operator_losses.append(operator_loss.item())
loss.backward()
optimizer.step()
avg_statement_train_loss = np.array(statement_losses).mean()
avg_drop_train_loss = np.array(drop_losses).mean()
avg_operator_train_loss = np.array(operator_losses).mean()
model.eval()
with torch.no_grad():
# Iterate through test set to avoid out of memory issues
statement_pred, drop_pred, operator_pred = [], [], []
for i in range(0, len(test_data), test_iterator_size):
output = model(test_data[i: i + test_iterator_size])
statement_pred.append(output[0])
drop_pred.append(output[1])
operator_pred.append(output[2])
statement_pred = torch.cat(statement_pred, dim=0)
drop_pred = torch.cat(drop_pred, dim=0)
operator_pred = torch.cat(operator_pred, dim=0)
test_statement_loss = statement_criterion(statement_pred, test_statement_target)
test_drop_loss = drop_criterion(drop_pred, test_drop_target)
test_operator_loss = operator_criterion(operator_pred, test_operator_target)
print("Train loss: S %f" % avg_statement_train_loss, "D %f" % avg_drop_train_loss,
"F %f" % avg_operator_train_loss)
print("Test loss: S %f" % test_statement_loss.item(), "D %f" % test_drop_loss.item(),
"F %f" % test_operator_loss.item())
predict = statement_pred.data.max(1)[1]
test_error = (predict != test_statement_target.data).sum().item() / float(test_data.shape[0])
print("Test classification error: %f" % test_error)
model.module.save(args.output_path + ".%d" % epoch)
if __name__ == '__main__':
main()
``` |
{
"source": "93TEI/3D_Action_Recognition",
"score": 3
} |
#### File: Ground-Truth-Skeletons/Naman/LSTM2.py
```python
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from helperFunctions import *
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
torch.set_default_tensor_type('torch.cuda.FloatTensor')
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
return np.eye(num_classes, dtype='uint8')[y.astype(int)]
class LSTMClassifier(nn.Module):
def __init__(self, hidden_dim=64,label_size=49,modified_input_dim=64):
super(LSTMClassifier, self).__init__()
self.hidden_dim = hidden_dim
self.fully_connected = nn.Sequential(nn.Linear(75, 70),nn.ReLU(),nn.Linear(70, 64),nn.ReLU())
self.lstm = nn.LSTM(input_size=modified_input_dim, hidden_size=hidden_dim)
self.hidden2label = nn.Linear(hidden_dim, label_size)
self.hidden = self.init_hidden()
def init_hidden(self):
# the first is the hidden h
# the second is the cell c
return (autograd.Variable(torch.zeros(1,1, self.hidden_dim)),
autograd.Variable(torch.zeros(1 ,1, self.hidden_dim)))
def forward(self, joint_3d_vec):
#print(joint_3d_vec.size())
x = joint_3d_vec
x = self.fully_connected(x.view(x.size()[0],x.size()[2]))
x = x.view(x.size()[0],1,x.size()[1])
#print(x.size())
#print(self.hidden[0].size(), self.hidden[1].size())
lstm_out, self.hidden = self.lstm(x, self.hidden)
y = self.hidden2label(lstm_out[-1])
log_probs = F.log_softmax(y)
return log_probs
trainingData = torch.from_numpy(getData())
labels = getLabels()
#indices = torch.from_numpy((labels.reshape(labels.shape[0])<5).dtype()).type(torch.LongTensor)
#indices = (torch.from_numpy(labels)<5).numpy()
number = int((labels==17).sum()) + int((labels==23).sum())
#indices = (labels<5)
# labels = labels[indices,:]
# trainingData = trainingData[indices,:,:,:]
neededData = torch.randn(number, 300, 8, 3)
neededLabels = np.zeros((number,1))
currentIndex = 0
for i in range(labels.shape[0]):
if (labels[i, 0] == 17) or (labels[i, 0] == 23):
neededData[currentIndex,:,:,:] = trainingData[i,:,13:21,:]
neededLabels[currentIndex,:] = labels[i,:]
currentIndex+=1
#labels = torch.from_numpy(to_categorical((neededLabels),5)).view(number,-1)
labels = torch.from_numpy(neededLabels).view(number,-1).type(torch.cuda.LongTensor)
trainingData = neededData
def checkAcc(model0,data,labels):
l = labels.size()[0]
labelsdash = autograd.Variable(labels.view(l))
l = 1000
out_labels = autograd.Variable(torch.zeros(l))
for i in range(l):
temp = model0(autograd.Variable(trainingData[i,:,:,:].view(300,1,75)))
# print(temp)
# print(temp.size(), type(temp))
out_labels[i] = temp.max(1)[1]
return(torch.mean((labelsdash[0:l].type(torch.cuda.LongTensor)==out_labels.type(torch.cuda.LongTensor)).type(torch.cuda.FloatTensor)))
model0 = LSTMClassifier(label_size=2).cuda()
def TrainAcc():
print(checkAcc(model0,trainingData,labels))
#print(labels.size())
def train(model, num_epoch, num_iter, lr=1e-3,rec_interval=2, disp_interval=10):
optimizer = optim.Adam(model.parameters(), lr)
loss_values = []
rec_step = 0
for eph in range(num_epoch):
print('epoch {} starting ...'.format(eph))
avg_loss = 0
n_samples = 0
randpermed = torch.randperm(trainingData.size()[0])[:num_iter]
for i in range(num_iter):
model.hidden = (model.hidden[0].detach(), model.hidden[1].detach())
model.zero_grad()
j = randpermed[i]
X,Y = trainingData[j,:,:,:].view(300,1,75),labels[j,:]
#print(X.size())
n_samples += len(X)
X = autograd.Variable(X)
#print(X)
Y = autograd.Variable(Y.view(1))
y_hat = model(X)
loss = F.cross_entropy(y_hat, Y)
avg_loss += loss.data[0]
if i % disp_interval == 0:
print('epoch: %d iterations: %d loss :%g' % (eph, i, loss.data[0]))
if rec_step%rec_interval==0:
loss_values.append(loss.data[0])
loss.backward()
optimizer.step()
rec_step += 1
avg_loss /= n_samples
#evaluating model accuracy
#TrainAcc()
print('epoch: {} <====train track===> avg_loss: {} \n'.format(eph, avg_loss))
return loss_values
#l = train(model0, 10, 100, 2, 20)
def PlotLoss(l,name):
plt.plot(l)
plt.show()
plt.savefig(name)
def Scheduler():
loss0 = train(model0,3,3300,6e-3)
loss1 = train(model0,20,3300,1e-3)
PlotLoss(loss1,'loss1.png')
TrainAcc()
loss2 = train(model0,20,3300,1e-3)
TrainAcc()
loss3 = train(model0,20,3300,1e-4)
PlotLoss(loss1+loss2+loss3,'loss2.png')
TrainAcc()
loss4 = train(model0,20,3300,1e-4)
TrainAcc()
loss5 = train(model0,50,3300,1e-5)
PlotLoss(loss1+loss2+loss3+loss4+loss5,'loss3.png')
TrainAcc()
```
#### File: Ground-Truth-Skeletons/Naman/remove0s.py
```python
import numpy as np
from helperFunctions import *
def f(a):
##array is 3d
##size is 300x25x3
first = 0
last = 300
zeros = np.zeros((25, 3))
if not (a[299, :, :]==0).all():
return a
while (first<last):
middle = (first + last)//2
if (a[middle,:,:] == 0).all():
last = middle
else:
first = middle + 1
firstZeroIndex = min(first, last)
currentIndex = firstZeroIndex
while currentIndex + firstZeroIndex < 300:
a[currentIndex:currentIndex+firstZeroIndex,:,:] = a[:firstZeroIndex,:,:]
currentIndex += firstZeroIndex
howMuch = 300 - currentIndex
a[currentIndex:] = a[:howMuch]
return a
trainData = getValData()
for i in range(trainData.shape[0]):
print(i)
trainData[i,:,:,:] = f(trainData[i,:,:,:])
np.save(open("Final-Data2/val_data.npy", 'wb'), trainData)
```
#### File: Ground-Truth-Skeletons/Sahil/twoLSTMcuda.py
```python
import torch
from cudaHelperFunctions import *
import torch.nn as nn
from torch import autograd
from torch import optim
import torch.nn.functional as F
torch.set_default_tensor_type('torch.cuda.FloatTensor')
def TrainAcc(start = 0, l = 1000):
print("The training accuracy is:", )
print(checkAcc(model,data,labels, start = start, length = l)[0])
def ValAcc(start = 0, l = 1000):
print("The validation accuracy is:",)
print(checkAcc(model, valData, valLabels,start = start, length = l)[0])
def FullAcc(theData, theLabels):
totalLength = len(theData)
totalAccuracy = 0
for i in range(totalLength//1000):
current = checkAcc(model, theData, theLabels, start = i*1000, length = 1000)[0]
print(current)
totalAccuracy += 1000.0*current
current = checkAcc(model, theData, theLabels, start = totalLength - 1000*(totalLength//1000), length = (totalLength % 1000))[0]
print(current)
totalAccuracy += float(totalLength%1000)*current
print("The overall accuracy is: ")
print(totalAccuracy/float(totalLength))
class LSTMClassifier(nn.Module):
def __init__(self, hidden_dim=256, label_size=49, input_dim=75, num_layers = 1):
super(LSTMClassifier, self).__init__()
self.hiddenDim = hidden_dim
self.layers = num_layers
self.embedding = nn.Linear(input_dim, 64)
self.lstm = nn.LSTM(input_size=64, hidden_size=hidden_dim, num_layers = num_layers)
self.fullyConnected = nn.Linear(hidden_dim, label_size)
self.hidden = self.init_hidden()
def init_hidden(self):
# the first is the hidden h
# the second is the cell c
return (autograd.Variable(torch.zeros(self.layers, 1, self.hiddenDim).type(torch.cuda.FloatTensor)),
autograd.Variable(torch.zeros(self.layers, 1, self.hiddenDim).type(torch.cuda.FloatTensor)))
def forward(self, input):
#print(joint_3d_vec.size())
#x = joint_3d_vec
#x = input.view(input.size()[0],1,input.size()[1])
#print(x.size())
#print(self.hidden[0].size(), self.hidden[1].size())
#print(type(input))
#print(input.size())
#print(input.type())
x = autograd.Variable(input)
#x = self.embedding(input.view(input.size()[0], 75))
x = self.embedding(x)
#print(x.size())
#print(x.view(x.size()[0], 1, 64).size())
#print(type(x), type(self.hidden[0]))
lstm_out, self.hidden = self.lstm(x.view(x.size()[0],1, 64), self.hidden)
y = self.fullyConnected(lstm_out[-1])
log_probs = F.log_softmax(y)
return log_probs
def train(model, num_epoch, epoch_size = -1, batchSize = 5, lr=1e-3,rec_interval=5, disp_interval=1):
global data, labels
optimizer = optim.Adam(model.parameters(), lr)
loss_values = []
rec_step = 0
for eph in range(num_epoch):
print('epoch {} starting ...'.format(eph))
avg_loss = 0
n_samples = 0
if epoch_size == -1:
num_iter = len(data)//batchSize
else:
num_iter = epoch_size//batchSize
randpermed = torch.randperm(len(data))
for i in range(num_iter):
model.hidden = (model.hidden[0].detach(), model.hidden[1].detach())
model.zero_grad()
totalLoss = 0.0
for k in range(batchSize):
j = randpermed[i*batchSize + k]
X= data[j]
Y = torch.cuda.LongTensor(1)
Y[0]=labels[j]
#print(X.size())
n_samples += len(X)
#print(X)
Y = autograd.Variable(Y)
y_hat = model(X)
loss = F.cross_entropy(y_hat, Y)
avg_loss += loss.data[0]
totalLoss += loss.data[0]
loss.backward(retain_variables = True)
rec_step += 1
optimizer.step()
if i % disp_interval == 0:
print('epoch: %d iterations: %d loss :%g' % (eph, i, totalLoss/batchSize))
if rec_step%rec_interval==0:
loss_values.append(totalLoss/batchSize)
avg_loss /= n_samples
#evaluating model accuracy
#TrainAcc()
print('epoch: {} <====train track===> avg_loss: {} \n'.format(eph, avg_loss))
PlotLoss(loss_values, name = 'twoLSTMloss.png')
return loss_values
print("Loaded libraries")
data = getData()
print("Loaded training data")
labels = getLabels()
print("Loaded training labels")
valData = getValData()
print("Loaded validation data")
valLabels = getValLabels()
print("Loaded validation labels")
#labels = torch.from_numpy(labels).view(number,-1).type(torch.cuda.LongTensor)
#print(labels.size())
model = LSTMClassifier(num_layers = 2)
#PlotLoss(loss)
def Scheduler():
loss0 = []
loss1 = []
loss2 = []
loss3 = []
loss4 = []
loss5 = []
TrainAcc()
ValAcc()
PlotLoss(loss0)
loss0 = train(model,1,batchSize = 16, lr = 1e-4)
PlotLoss(loss0)
TrainAcc()
ValAcc()
#PlotLoss(loss1,'loss1.png')
loss0 += train(model,1,batchSize = 16, lr = 5e-5)
PlotLoss(loss0)
TrainAcc()
ValAcc()
loss0 += train(model,1,batchSize = 16,lr = 2e-5)
PlotLoss(loss0)
TrainAcc()
ValAcc()
loss0 += train(model,1,batchSize = 16,lr=1e-5)
PlotLoss(loss0)
TrainAcc()
ValAcc()
loss0 += train(model,20,batchSize = 8,lr = 5e-6)
TrainAcc()
ValAcc()
PlotLoss(loss0)
#loss5 = train(model0,50,3300,1e-5)
#PlotLoss(loss1+loss2+loss3+loss4+loss5,'loss3.png')
#TrainAcc()
return loss0
```
#### File: Ground-Truth-Skeletons/Uddeshya/LSTM_test1.py
```python
import pandas as pd
from random import random
flow = (list(range(1,10,1)) + list(range(10,1,-1)))*100
pdata = pd.DataFrame({"a":flow, "b":flow})
pdata.b = pdata.b.shift(9)
data = pdata.iloc[10:] * random() # some noise
import numpy as np
def _load_data(data, n_prev = 100):
"""
data should be pd.DataFrame()
"""
docX, docY = [], []
for i in range(len(data)-n_prev):
docX.append(data.iloc[i:i+n_prev].as_matrix())
docY.append(data.iloc[i+n_prev].as_matrix())
alsX = np.array(docX)
alsY = np.array(docY)
return alsX, alsY
def train_test_split(df, test_size=0.1):
"""
This just splits data to training and testing parts
"""
ntrn = int(round(len(df) * (1 - test_size)))
X_train, y_train = _load_data(df.iloc[0:ntrn])
X_test, y_test = _load_data(df.iloc[ntrn:])
return (X_train, y_train), (X_test, y_test)
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.recurrent import LSTM
in_out_neurons = 2
hidden_neurons = 50
model = Sequential()
# n_prev = 100, 2 values per x axis
model.add(LSTM(hidden_neurons, input_shape=(100, 2)))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
model.compile(loss="mean_squared_error",
optimizer="rmsprop",
metrics=['accuracy'])
(X_train, y_train), (X_test, y_test) = train_test_split(data)
model.fit(X_train, y_train, batch_size=700, nb_epoch=50, validation_data=(X_test, y_test), verbose=1)
score = model.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
predicted = model.predict(X_test, batch_size=700)
# and maybe plot it
pd.DataFrame(predicted).to_csv("predicted.csv")
pd.DataFrame(y_test).to_csv("test_data.csv")
```
#### File: Videos/Naman/cudaHelperFunctions.py
```python
import matplotlib
import pickle
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
from torch import autograd
def getData():
model = pickle.load(open('../datasets/trainData.npy', 'rb'))
for i in range(len(model)):
model[i] = model[i].type(torch.cuda.FloatTensor)
return model
def getLabels():
labels = np.load('../datasets/trainLabels.npy')
return torch.from_numpy(labels).type(torch.cuda.LongTensor)
def getValData():
model = pickle.load(open('../datasets/valData.npy', 'rb'))
for i in range(len(model)):
model[i] = model[i].type(torch.cuda.FloatTensor)
return model
def getValLabels():
labels = np.load('../datasets/valLabels.npy')
return torch.from_numpy(labels).type(torch.cuda.LongTensor)
def checkAcc(model0,data,labels, start = 0, length = 500):
if length == -1:
l = labels.size()[0]
else:
l = length
labels = labels[start:start + l]
labelsdash = labels.view(l)
out_labels = torch.zeros(l, 1).type(torch.cuda.FloatTensor)
for i in range(start, start + l):
model0.hidden = (model0.hidden[0].detach(), model0.hidden[1].detach())
model0.zero_grad()
temp = model0(data[i])
# print(temp)
# print(temp.size(), type(temp))
if i%200 == 0 and l > 100:
print("checking", i, "of", length)
out_labels[i-start] = (temp.data.max(1)[1]).type(torch.cuda.FloatTensor)[0]
return(torch.mean((labelsdash[:l].type(torch.cuda.LongTensor)==out_labels.type(torch.cuda.LongTensor)).type(torch.cuda.FloatTensor)))
def PlotLoss(l,name = 'currentLoss.png'):
plt.clf()
plt.cla()
plt.close()
plt.plot(l)
plt.show()
plt.savefig(name)
def PlotAccuracies(l1, l2, name="accuracies.png"):
plt.clf()
plt.cla()
plt.close()
plt.plot(l1)
plt.plot(l2)
plt.show()
plt.savefig(name)
```
#### File: Videos/Video-Training/combinedLSTMs.py
```python
import torch.nn as nn
from torch import autograd
from torch import optim
import torch.nn.functional as F
model = torch.load('hgreg-3d.pth').cuda()
class CombinedLSTM(nn.Module):
"""docstring for CombinedLSTM"""
def __init__(self, inHeight, inWidth, inChannels, featHidden, skeHidden, outDim):
super(CombinedLSTM, self).__init__()
self.inHeight = inHeight
self.inWidth = inWidth
self.inChannels = inChannels
self.featHidden = featHidden
self.skeHidden = skeHidden
self.outDim = outDim
def forward(self, input):
output = model(input)
pred = getPreds((output[-2].data).cpu().numpy())[0] * 4
reg = (output[-1].data).cpu().numpy().reshape(pred.shape[0], 1)
point_3d = np.concatenate([pred, (reg + 1) / 2. * 256], axis = 1)
``` |
{
"source": "93xiaoming/RL_state_preparation",
"score": 3
} |
#### File: single qubit/deep QL/run_this.py
```python
from environment import Env
from Net_dql import DeepQNetwork
import numpy as np
def run_maze():
step = 0
fid_10 = 0
ep_max = 500
for episode in range(ep_max):
observation = env.reset()
while True:
action = RL.choose_action(observation)
observation_, reward, done, fid = env.step(action)
RL.store_transition(observation, action, reward, observation_)
RL.learn()
observation = observation_
if done:
if episode >= ep_max-11:
fid_10 = max(fid_10,fid)
break
step += 1
return fid_10
if __name__ == "__main__":
dt_=np.pi/20
env = Env(action_space=list(range(2)), #allow two actions
dt=dt_)
RL = DeepQNetwork(env.n_actions, env.n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.99,
replace_target_iter=200,
memory_size=2000,
e_greedy_increment=0.001,
)
fidelity = run_maze()
print("Final_fidelity=", fidelity)
```
#### File: RL_state_preparation/single qubit/Krotov.py
```python
import numpy as np
from scipy import linalg
sx = 1/2 * np.mat([[0, 1],[ 1, 0]], dtype=complex)
sy = 1/2 * np.mat([[0, -1j],[1j, 0]], dtype=complex)
sz = 1/2 * np.mat([[1, 0],[0, -1]], dtype=complex)
def hamiltonian(j):
J = 4
H = (j) * J * sz + sx
return H
T = 2*np.pi
N = 20
dt = T/N
I = 500
fidelity = np.zeros(I+1)
observable = np.mat(np.zeros(shape=(2,2), dtype=complex))
observable[-1, -1] = 1
psi = np.mat(np.zeros(shape=(2, N+1), dtype=complex))
psi[0,0] = 1
pseudo = np.mat(np.zeros(shape=(2, N+1), dtype=complex)) #
seq = np.random.rand(N)
seq_f = seq
for i in range(N):
psi[:,i+1] = linalg.expm(-(1j) * hamiltonian(seq[i]) * dt).dot(psi[:,i])
fidelity[0]=(np.absolute(psi[-1,-1]))**2
pseudo[:,-1] = observable.dot(psi[:,-1])
dj = 0.01
for i in range(I):
for j in reversed(range(N)):
pseudo[:,j] = linalg.expm((1j) * hamiltonian(seq[j]) * dt).dot(pseudo[:,j+1])
for k in range(N):
dH = (hamiltonian(seq[k]+dj) - hamiltonian(seq[k]-dj)) / (2*dj)
seq_f[k] = seq[k] + (pseudo[:,k].conj().T.dot(dH.dot(psi[:,k]))).imag[0,0]
psi[:,k+1] = linalg.expm(-(1j) * hamiltonian(seq_f[k]) * dt).dot(psi[:,k])
seq = seq_f
fidelity[i+1] += (np.absolute(psi[-1,-1]))**2
pseudo[:,-1] = observable.dot(psi[:,-1])
print('final_fidelity=',fidelity[-1])
```
#### File: single qubit/policy gradient/environment.py
```python
import numpy as np
from scipy.linalg import expm
class Env( object):
def __init__(self,
dt=np.pi/10):
super(Env, self).__init__()
self.n_actions = 2
self.n_states = 4
self.state = np.array([1,0,0,0])
self.nstep=0 ##count number of step at each episode
self.dt=dt
def reset(self):
# return observation
self.state = np.array([1,0,0,0])
self.nstep = 0 #reset number of step at each episode
return self.state
def step(self, action):
psi = np.array([self.state[0:int(len(self.state) / 2)] + self.state[int(len(self.state) / 2):int(len(self.state))] * 1j])
psi = psi.T
psi=np.mat(psi)
J = 4 # control field strength
# J=2
######## pauli matrix
sx = np.mat([[0, 1], [1, 0]], dtype=complex)
sz = np.mat([[1, 0], [0, -1]], dtype=complex)
U = np.matrix(np.identity(2, dtype=complex)) # initial Evolution operator
H = J *float(action)/(self.n_actions-1)* sz + 1 * sx
U = expm(-1j * H * self.dt) # Evolution operator
psi = U * psi
########################## target state defined by yourself
target = np.mat([[0], [1]], dtype=complex) # south pole
err = 1 - (np.abs(psi.H * target) ** 2).item(0).real # infidelity (to make it as small as possible)
################################################################
#rwd = 10*(err < 10e-3) # give reward only when the error is small enough
#rwd = -1 +5000.*(err < 10e-3) #or other type of reward
rwd = (err<0.5)*10 +(err<0.1)*100 + 5000.*(err < 10e-3) #nice reward
done =( (err < 10e-3) or self.nstep>=np.pi/self.dt ) #end each episode if error is small enough, or step is larger than 2*pi/dt
self.nstep +=1 # step counter add one
psi=np.array(psi)
ppsi = psi.T
self.state = np.array(ppsi.real.tolist()[0] + ppsi.imag.tolist()[0])
return self.state, rwd, done, 1-err
```
#### File: single qubit/QL/environment.py
```python
import math
import cmath
import numpy as np
from scipy.linalg import expm
sx = 1/2 * np.mat([[0, 1],[ 1, 0]], dtype=complex)
sy = 1/2 * np.mat([[0, -1j],[1j, 0]], dtype=complex)
sz = 1/2 * np.mat([[1, 0],[0, -1]], dtype=complex)
def hamiltonian(j):
J = 4
H = (j) * J * sz + sx
return H
psi_target = np.mat([[1],[0]], dtype=complex)
psi_0 = np.mat([[0],[1]], dtype=complex)
dt = np.pi/20
Dtheta = np.pi/30
Dphi = np.pi/30
def phase2(z):
'''
return phase angle in [0, 2pi]
'''
phase = cmath.phase(z)
if phase < 0:
phase += 2*math.pi
return phase
def state_to_lattice_point(state):
'''
Note: phi = 0 or 2pi are the same
return the list [theta_i, phi_i]
'''
if state[0,0] == 0:
## Special case 1: [0, 1]
theta, phi = math.pi, 0
else:
conj = state[0,0].conj()
state_reg = state * (conj/abs(conj))
# print(state_reg[0,0].real)
if (state_reg[0,0].real)>= 1:
# Unitary should preserve norm
theta, phi = 0, 0
else:
# print(state_reg[0,0].imag) # this should be 0
theta = 2 * math.acos(state_reg[0,0].real)
# state_reg[1,0]/sin(theta/2) = cos(pi) + i sin(pi)
if theta == 0:
## Special case 2: [1, 0]
phi = 0
else:
phi = phase2(state_reg[1,0]/math.sin(theta/2)) #force the phase of the first elements to be 0.
theta_i = round(theta/Dtheta)
phi_i = round(phi/Dphi)
if phi_i == round(2*math.pi/Dphi):
phi_i = 0
return [theta_i, phi_i]
# class Maze(object): # for Python 2
class Maze:
# qubit in the Bloch Maze
def __init__(self):
self.action_space = ['0', '1']
self.n_actions = len(self.action_space)
self._build_maze()
def _build_maze(self):
self.state = psi_0
def reset(self):
self.state = psi_0
self.counter = 0
# print(dt)
return state_to_lattice_point(self.state)
def step(self, action):
if action == 0:
U = expm(-(1j) * hamiltonian(0) * dt)
elif action == 1:
U = expm(-(1j) * hamiltonian(1) * dt)
self.state = U.dot(self.state)
self.counter += 1
s_ = self.state
fidelity = (abs(s_.conj().T.dot(psi_target)[0,0]))**2
error = 1-fidelity
if error < 10e-3:
reward = 5000
done = True
s_lattice = 'terminal'
else:
#reward = -1*(error>=0.5) + 10*(error<0.5) + 100*(error<0.1)
reward = 10*(error<0.5) + 100*(error<0.1)
done = (self.counter >= np.pi/dt)
s_lattice = state_to_lattice_point(s_)
return s_lattice, reward, done, fidelity
``` |
{
"source": "940716tian/FlaskStudy",
"score": 2
} |
#### File: day01/myapp/__init__.py
```python
from flask import Flask
from .views import blue
def create_app():
# 创建APP flask的实例
app = Flask(__name__)
app.config['SECRET_KEY'] = 'guangfangyidiande'
# 注册蓝图
app.register_blueprint(blue)
return app
```
#### File: myapp/views/views.py
```python
from flask import Blueprint, session, render_template
from jinja2 import Template
from myapp.models import db, Person
blue = Blueprint("day02",__name__)
@blue.route('/set')
def set_session():
session['name'] = 'tom'
return "OK"
@blue.route('/get')
def get_session():
res = session.get('name','游客')
return str(res)
@blue.route('/index')
def index():
import os
# 获得当前程序运行所在目录
root = os.path.dirname(__file__)
# 拼接文件的路径
file_path = os.path.join(root,'templates','index.html')
f = open(file_path,'r')
template = Template(f.read())
html = template.render()
return html
@blue.route('/block')
def my_block():
return render_template('test.html')
@blue.route('/my_marco')
def my_marco():
data = ['python','php','C','C++']
return render_template('my_marco.html',data=data)
@blue.route('/for_demo')
def for_demo():
data = ['python','php','C','C++']
return render_template('fordemo.html',data=data)
@blue.route('/tags')
def tags():
my_str = "hello"
html_str = "<h1>the end is good if not good it's not end</h1>"
return render_template("filter_tags.html",data=my_str,html_str=html_str,my_arr=[1,3,2,6,5])
@blue.route('/create')
def create_db():
db.create_all()
return "创建完毕"
@blue.route('/drop')
def drop_db():
db.drop_all()
return "跑路"
@blue.route('/create_data')
def create_user():
# 创建数据
# u = Person(
# name="张三"
# )
# db.session.add(u)
# db.session.commit()
# 批量创建
persons = []
for i in range(10):
u = Person(name="张四"+str(i))
persons.append(u)
# 批量写到数据库
db.session.add_all(persons)
db.session.commit()
return "创建完毕"
@blue.route('/get_users')
def get_users():
# 查询数据
res = Person.query.all()
for i in res:
print(i.name)
return "OK"
```
#### File: day02_v1/myapp/ext.py
```python
from flask_session import Session
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def init_ext(app):
se = Session()
se.init_app(app)
# db绑定
db.init_app(app)
```
#### File: day03/myapp/models.py
```python
from myapp.extensions import db
class Dog(db.Model):
id = db.Column(
db.Integer,
primary_key=True,
autoincrement=True
)
name = db.Column(
db.String(30),
nullable=False,
unique=True
)
place = db.Column(
db.String(50),
default="杭州户口"
)
def to_dict(self):
data = {
"id":self.id,
"name":self.name,
"place":self.place
}
return data
class Grade(db.Model):
id = db.Column(
db.Integer,
primary_key=True,
autoincrement=True
)
name = db.Column(
db.String(20),
unique=True
)
stus = db.relationship(
"Stu",
backref="grade",
lazy=True
)
class Stu(db.Model):
id = db.Column(
db.Integer,
primary_key=True,
autoincrement=True
)
name = db.Column(
db.String(20),
unique=True
)
grade_id = db.Column(
db.Integer,
db.ForeignKey("grade.id")
)
class Tag(db.Model):
id = db.Column(
db.Integer,
primary_key=True,
autoincrement=True
)
title = db.Column(
db.String(20)
)
tags = db.Table(
"tags",
db.Column("tag_id",db.Integer,db.ForeignKey("tag.id"),primary_key=True ),
db.Column("book_id",db.Integer,db.ForeignKey("book.id"),primary_key=True ),
)
class Book(db.Model):
id = db.Column(
db.Integer,
primary_key=True,
autoincrement=True
)
name = db.Column(
db.String(20),
)
tags = db.relationship(
"Tag",
secondary=tags,
backref=db.backref("books",lazy=True),
lazy=True
)
```
#### File: day05/myapp/views.py
```python
from flask import Blueprint, render_template, request, jsonify
from .models import News
from .ext import db, cache, api
from flask_restful import Resource
blue = Blueprint("day04",__name__)
def init_blue(app):
app.register_blueprint(blue)
@blue.route('/news',methods=['GET','POST','PUT','DELETE'])
def news_api():
if request.method == "GET":
# 获取数据
id = int(request.args.get("id",1))
news = News.query.get_or_404(id)
result = {
'code':1,
'msg':"OKKK",
'data':news.to_dict()
}
return jsonify(result)
elif request.method == "POST":
# 创建数据
params = request.form
title = params.get("title")
content = params.get("content")
news = News(
title=title,
content=content
)
db.session.add(news)
db.session.commit()
result = {
'code': 1,
'msg': "OKKK",
'data': news.to_dict()
}
return jsonify(result),201
elif request.method == "PUT":
# 修改数据
params = request.form
id = int(params.get("id"))
# 知道改谁
news = News.query.get_or_404(id)
# 能解析到 就用解析到的数据 如果不能 就用原来的数据
title = params.get("title",news.title)
content = params.get("content",news.content)
news.title = title
news.content = content
db.session.add(news)
db.session.commit()
result = {
'code': 1,
'msg': "OKKK",
'data': news.to_dict()
}
return jsonify(result), 201
else:
# 删除
id = int(request.form.get("id"))
news = News.query.get_or_404(id)
result = {
'msg': "OKKK",
'data': news.to_dict()
}
db.session.delete(news)
db.session.commit()
return jsonify(result),204
class NewsApiTest(Resource):
def get(self):
return {'data':'nimadi'}
api.add_resource(NewsApiTest,'/test','/hehe')
```
#### File: day05restful/myapp/urls_apis_v1.py
```python
from flask_restful import Api
from myapp.apis_v1 import *
# 初始化
api = Api()
# 绑定app
def init_api(app):
api.init_app(app)
# 注册各种路由
api.add_resource(NewsOneApi,'/newsone')
api.add_resource(NewsTwoApi,'/two')
api.add_resource(NewsThreeApi,'/three/<int:id>')
api.add_resource(FourApi,'/four/<int:page>/<int:per_page>')
api.add_resource(FiveApi,'/five')
api.add_resource(SixApi,'/six')
api.add_resource(SevenApi,'/seven')
``` |
{
"source": "940716tian/PythonStudy",
"score": 3
} |
#### File: day03/app03plus/models.py
```python
from django.db import models
# Create your models here.
class IdCard(models.Model):
num = models.CharField(
max_length=20,
verbose_name="身份证编号"
)
addr = models.CharField(
max_length=20,
default="当地派出所"
)
class Meta:
verbose_name = "身份证类"
class Person(models.Model):
name = models.CharField(
max_length=30,
verbose_name="人名"
)
idcard = models.OneToOneField(
IdCard,
on_delete=models.PROTECT
)
def __str__(self):
return self.name
# class Grade(models.Model):
# name = models.CharField(
# max_length=20,
#
# )
# def __str__(self):
# return self.name
#
# class Stu(models.Model):
# name = models.CharField(
# max_length=30,
# )
# grade = models.ForeignKey(
# Grade,
# )
# def __str__(self):
# return self.name
class Author(models.Model):
name = models.CharField(
max_length=20
)
def __str__(self):
return self.name
class Book(models.Model):
name = models.CharField(
max_length=20
)
author = models.ManyToManyField(
Author
)
def __str__(self):
return self.name
```
#### File: day03/app03/views.py
```python
from django.db.models import Avg, Sum, Q, F
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
from .models import Humen
def get_data(req):
#聚合函数 money>10050
humens = Humen.objects.filter(money__gt=10050)
# avg_age = humens.aggregate(Avg("age"))
avg_age = humens.aggregate(Sum("age"))
print(avg_age)
# return HttpResponse("OK")
return HttpResponse(avg_age.get("age__sum"))
def get_data_by_q(req):
#获取查询条件
# data = Humen.objects.filter(Q(age__lt=10) | Q(money__gt=10050))
# data = Humen.objects.filter(id__lt=10,age__gt=10)
data = Humen.objects.filter(Q(id__lt=10) & Q(age__gt=10))
return render(req,"humens.html",{"humens":data})
def get_data_by_f(req):
#获取条件查询 找出自己年纪大于自己编号的数据
data = Humen.objects.filter(age__gt=F("id"))
res = Humen.new_objects.create_girl("曹蒹")
print(res)
return render(req,"humens.html",{"humens":data})
def delete_humen(req):
#解析参数
param = req.GET
h_id = param.get("h_id")
h_id = int(h_id)
#数据查询
# obj = Humen.objects.get(pk=h_id)
#删除
# obj.delete()
objs = Humen.objects.filter(id__lt=h_id)#条件批量删除
objs.delete()
return HttpResponse("删除成功")
#数据的更新
def update_humen(req):
#解析参数
new_name = req.GET.get("name")
#拿到数据集合的第一个
# obj=Humen.objects.all().first()
# obj.name = new_name
# obj.save
res = Humen.objects.filter(id=10)
data = {
"name":new_name,
"age":1000
}
# res.update(name=new_name,age=1)
res.update(**data)
return HttpResponse("OK")
```
#### File: day05/app05plus/views.py
```python
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.shortcuts import render, redirect
# Create your views here.
from django.urls import reverse
def index(req):
user = req.user
uname = user.username if user.username else "游客"
return render(req,"new_index.html",{"user_name":uname})
def register(req):
if req.method == "GET":
return render(req,"register.html")
else:
params = req.POST
name = params.get('name')
pwd = params.get('pwd')
confirm_pwd = params.get("confirm_pwd")
if pwd and len(pwd) >= 4 and pwd == confirm_pwd:
# 继续校验用户是否存在
if not User.objects.filter(username=name).exists():
# 创建用户
user = User.objects.create_user(username=name,password=<PASSWORD>)
return redirect(reverse("Day05:mylogin"))
else:
return HttpResponse("用户已存在")
else:
return HttpResponse("账号或密码有误")
def mylogin(req):
if req.method == "GET":
return render(req,"mylogon.html")
else:
params = req.POST
name = params.get("name")
pwd = params.get("pwd")
if len(name) == 0 or len(pwd) == 0:
return HttpResponse("不能为空")
#校验用户
user = authenticate(username=name,password=<PASSWORD>)
if user is None:
return HttpResponse("账号或密码错误")
else:
#用户登录
login(req,user)
return redirect("/app05plus/newindex01")
def mylogout(req):
logout(req)
return redirect("/app05plus/newindex01")
```
#### File: day05/app05/views.py
```python
from django.http import JsonResponse, HttpResponse, HttpResponseNotAllowed
from django.shortcuts import render, redirect
import json
# Create your views here.
def youxi(req):
return render(req,"2048.html")
def json_test(req):
data = {
"code":1,
"msg":"OK",
"data":[1,24,5],
}
# print(dir(req))
# print("请求方法",req.method)
# print("host",req.get_host())
# print("GET",req.GET)
# print("POST",req.POST)
# print("FILES",req.FILES)
# print(("META",req.META))
return JsonResponse(data)
return HttpResponse(json.dumps(data))
# json.load() 将json 数据转化成对应的数据结构
def test_res(req):
# 实例化
response = HttpResponse()
# 设置返回内容
response.content = "哈哈"
# 设置状态码
response.status_code = 404
response.write("我是write写的")
response.flush()
response.content = "清除缓存"
return response
def mylogin(req):
if req.method =="GET":
return render(req,"login.html")
elif req.method == "POST":
# 做登录的操作
pass
# 解析参数 名字和密码
params = req.POST
name = params.get("umame")
pwd = params.get("pwd")
#假设有校验 并且也通过了
#登录 重定向到首页
response = redirect("/app05/index")
#设置cookie
response.set_cookie("user",name,max_age=30)
# 也设置session
req.session['pwd']=<PASSWORD>
req.session.set_expiry(30)
return response
else:
return HttpResponseNotAllowed("访问不允许")
#首页
def index(req):
#读一个请求对象的cookie 拿出叫user的字段对应的值
u_name = req.COOKIES.get("user")
#获取session
res = req.session.get('pwd')
print("session的结果",res)
#判断是否拿到,如果没拿到 那么使用游客
return render(req,"index.html",{"user_name":u_name})
def mylogout(req):
response = redirect("/app05/index")
response.delete_cookie("user")
del req.session['pwd']
return response
```
#### File: day09/app09/__init__.py
```python
from django.db.models.signals import pre_save,post_save
from .mysingal import action
def pre_save_func(sender,**kwargs):
print(sender)
print(kwargs)
print(kwargs.get("instance"))
# pre_save.connect(pre_save_func)
post_save.connect(pre_save_func)
def my_action_func(sender,**kwargs):
print(sender)
print(kwargs)
action.connect(my_action_func)
```
#### File: day09/app09/tasks.py
```python
from celery import task
import time
# @task
# def hello_celery(loop):
# for i in range(loop):
# print('hello')
# time.sleep(2)
@task
def my_task():
time.sleep(5)
#假设在发送邮件
print("执行")
@task
def task2(n):
for i in range(n):
print(i)
time.sleep(2)
@task
def res_task(n):
print(n)
#也可以通过缓存保存结果
return {"data":n}
@task
def write_task():
print("我是定时炸弹")
``` |
{
"source": "940941188/lazeScript",
"score": 2
} |
#### File: lazeScript/backup/main.py
```python
import os
from plumbum import colors, cli
from plumbum.cmd import cp, mv
abs_path = os.path.abspath('.')
vs_code_config_path=os.path.expanduser("~/.config/Code - OSS/User")
vs_code_backup_path = os.path.join(abs_path, 'vs_code')
vs_code_files = ['keybindings.json', 'settings.json', 'snippets']
class BackupApp(cli.Application):
VERSION=colors.blue | "1.0.0"
def init_backup_vscode(self):
if not os.path.exists(vs_code_backup_path):
os.mkdir(vs_code_backup_path)
print(colors.green | ("created %s" % vs_code_backup_path))
def backup_vscode(self):
for file_name in vs_code_files:
target_path = os.path.join(vs_code_config_path, file_name)
if os.path.exists(target_path):
cp("-r", target_path, vs_code_backup_path)
print(colors.green | ('cp %s' % target_path))
else:
print(colors.red | ('file not found: %s' % target_path))
print("vscode config backup completed")
def recover_vscode(self):
for file_name in vs_code_files:
target_path = os.path.join(vs_code_config_path, file_name)
backup_file_path = os.path.join(vs_code_backup_path, file_name)
if os.path.exists(backup_file_path):
if os.path.isdir(target_path):
cp("-rf", backup_file_path, vs_code_config_path)
else:
cp("-f", backup_file_path, target_path)
print(colors.green | ('cp %s covers %s' % (backup_file_path, target_path)))
else:
print(colors.red | ('file not found: %s' % backup_file_path))
print("vscode config recover completed")
@cli.switch(["-b", "--backup"], help="backup config")
def backup(self):
print("start backup")
# backup vscode config
self.init_backup_vscode()
self.backup_vscode()
print("backup completed")
@cli.switch(["-r", "--recover"], cli.Set("all", "vscode"), help="recover config")
def recover(self, mode):
if mode == 'vscode':
self.recover_vscode()
def main(self):
pass
if __name__ == "__main__":
BackupApp.run()
``` |
{
"source": "942431221/kejiao_test",
"score": 3
} |
#### File: kejiao_test/build-flask/test.py
```python
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, SignatureExpired, BadSignature
from flask_httpauth import HTTPTokenAuth
SECRET_KEY = '<KEY>'
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql+pymysql://root:[email protected]:3306/flask"
auth = HTTPTokenAuth()
db = SQLAlchemy(app)
# 建表
class User(db.Model):
__tablename__ = 'users' # 表名
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
first_name = db.Column(db.String(64))
last_name = db.Column(db.String(64))
mobile = db.Column(db.String(128), unique=True)
def __repr__(self):
return '<User %r>' % self.username
@app.route('/token/', methods=['POST'])
def get_token():
"""
通过手机号验证用户(不需要实现发短信功能)
:return:{“access_token”: “xxx”, “refresh_token”: “xxx”, “expiry”: 12345}
"""
# 获取前端请求参数mobile
mobile = request.form.get('mobile')
try:
user = User.query.filter_by(mobile=mobile).first()
if user:
serializer = Serializer(SECRET_KEY, expires_in=12345)
return jsonify(data={'code': 200, 'message': 'Authenticate successfully',
'access_token': serializer.dumps(
{'mobile': '+86-12388888888', 'otp': '123456'}).decode('utf-8'),
'refresh_token': '',
'expiry': 12345})
else:
return jsonify(data={'code': 500, 'message': 'Wrong mobile, Authenticate failed'})
except Exception as e:
print(e)
return jsonify(data={'code': 500, 'message': 'Authenticate failed'})
@auth.error_handler
def error_handler():
return jsonify(data={'code': 401, 'message': '401 Unauthorized Access'})
@auth.verify_token
def verify_token(token):
"""
解析token
:param token:
:return:
"""
s = Serializer(SECRET_KEY)
# token正确
try:
data = s.loads(token)
return data
# token过期
except SignatureExpired:
return None
# token错误
except BadSignature:
return None
@auth.login_required
@app.route('/profile/', methods=['GET'])
def get_info():
"""
获取用户本人基本信息
:return:
"""
mobile = request.args.get('mobile')
try:
user = User.query.filter_by(mobile=mobile).first()
if user:
return jsonify(
{'code': 200, 'data': {'id': user.id, 'first_name': user.first_name, 'last_name': user.last_name}})
else:
return jsonify(data={'code': 500, 'message': 'The user does not exist'})
except Exception as e:
print(e)
return jsonify(data={'code': 500, 'message': 'Get failed'})
if __name__ == "__main__":
# 将host设置为0.0.0.0,则外网用户也可以访问到这个服务
app.run(host="0.0.0.0", port=5000, debug=True)
``` |
{
"source": "942star/upbit-client",
"score": 2
} |
#### File: swagger_client/api/withdraw_api.py
```python
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class WithdrawApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def withdraw_chance(self, currency, **kwargs): # noqa: E501
"""출금 가능 정보 # noqa: E501
## 해당 통화의 가능한 출금 정보를 확인한다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.withdraw_chance(currency, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency Symbol (required)
:return: WithdrawChance
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.withdraw_chance_with_http_info(currency, **kwargs) # noqa: E501
else:
(data) = self.withdraw_chance_with_http_info(currency, **kwargs) # noqa: E501
return data
def withdraw_chance_with_http_info(self, currency, **kwargs): # noqa: E501
"""출금 가능 정보 # noqa: E501
## 해당 통화의 가능한 출금 정보를 확인한다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.withdraw_chance_with_http_info(currency, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency Symbol (required)
:return: WithdrawChance
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['currency'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method withdraw_chance" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'currency' is set
if ('currency' not in params or
params['currency'] is None):
raise ValueError("Missing the required parameter `currency` when calling `withdraw_chance`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'currency' in params:
query_params.append(('currency', params['currency'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/withdraws/chance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WithdrawChance', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def withdraw_coin(self, currency, amount, address, **kwargs): # noqa: E501
"""코인 출금하기 # noqa: E501
## 코인 출금을 요청한다. **NOTE**: 바로출금 이용 시 유의사항 업비트 회원의 주소가 아닌 주소로 바로출금을 요청하는 경우, 출금이 정상적으로 수행되지 않습니다. 반드시 주소를 확인 후 출금을 진행하시기 바랍니다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.withdraw_coin(currency, amount, address, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency 코드 (required)
:param str amount: 출금 수량 (required)
:param str address: 출금 가능 주소에 등록된 출금 주소 (required)
:param str secondary_address: 2차 출금 주소 (필요한 코인에 한해서)
:param str transaction_type: 출금 유형 - default : 일반출금 - internal : 바로출금
:return: WithdrawCoin
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.withdraw_coin_with_http_info(currency, amount, address, **kwargs) # noqa: E501
else:
(data) = self.withdraw_coin_with_http_info(currency, amount, address, **kwargs) # noqa: E501
return data
def withdraw_coin_with_http_info(self, currency, amount, address, **kwargs): # noqa: E501
"""코인 출금하기 # noqa: E501
## 코인 출금을 요청한다. **NOTE**: 바로출금 이용 시 유의사항 업비트 회원의 주소가 아닌 주소로 바로출금을 요청하는 경우, 출금이 정상적으로 수행되지 않습니다. 반드시 주소를 확인 후 출금을 진행하시기 바랍니다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.withdraw_coin_with_http_info(currency, amount, address, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency 코드 (required)
:param str amount: 출금 수량 (required)
:param str address: 출금 가능 주소에 등록된 출금 주소 (required)
:param str secondary_address: 2차 출금 주소 (필요한 코인에 한해서)
:param str transaction_type: 출금 유형 - default : 일반출금 - internal : 바로출금
:return: WithdrawCoin
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['currency', 'amount', 'address', 'secondary_address', 'transaction_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method withdraw_coin" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'currency' is set
if ('currency' not in params or
params['currency'] is None):
raise ValueError("Missing the required parameter `currency` when calling `withdraw_coin`") # noqa: E501
# verify the required parameter 'amount' is set
if ('amount' not in params or
params['amount'] is None):
raise ValueError("Missing the required parameter `amount` when calling `withdraw_coin`") # noqa: E501
# verify the required parameter 'address' is set
if ('address' not in params or
params['address'] is None):
raise ValueError("Missing the required parameter `address` when calling `withdraw_coin`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'currency' in params:
form_params.append(('currency', params['currency'])) # noqa: E501
if 'amount' in params:
form_params.append(('amount', params['amount'])) # noqa: E501
if 'address' in params:
form_params.append(('address', params['address'])) # noqa: E501
if 'secondary_address' in params:
form_params.append(('secondary_address', params['secondary_address'])) # noqa: E501
if 'transaction_type' in params:
form_params.append(('transaction_type', params['transaction_type'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/withdraws/coin', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WithdrawCoin', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def withdraw_info(self, **kwargs): # noqa: E501
"""개별 출금 조회 # noqa: E501
## 출금 UUID를 통해 개별 출금 정보를 조회한다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.withdraw_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: 출금 UUID
:param str txid: 출금 TXID
:param str currency: Currency 코드
:return: Withdraw
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.withdraw_info_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.withdraw_info_with_http_info(**kwargs) # noqa: E501
return data
def withdraw_info_with_http_info(self, **kwargs): # noqa: E501
"""개별 출금 조회 # noqa: E501
## 출금 UUID를 통해 개별 출금 정보를 조회한다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.withdraw_info_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uuid: 출금 UUID
:param str txid: 출금 TXID
:param str currency: Currency 코드
:return: Withdraw
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid', 'txid', 'currency'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method withdraw_info" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'uuid' in params:
query_params.append(('uuid', params['uuid'])) # noqa: E501
if 'txid' in params:
query_params.append(('txid', params['txid'])) # noqa: E501
if 'currency' in params:
query_params.append(('currency', params['currency'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/withdraw', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Withdraw', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def withdraw_info_all(self, **kwargs): # noqa: E501
"""출금 리스트 조회 # noqa: E501
## 출금 리스트를 조회한다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.withdraw_info_all(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency 코드
:param str state: 출금 상태 - submitting : 처리 중 - submitted : 처리 완료 - almost_accepted : 출금대기중 - rejected : 거부 - accepted : 승인됨 - processing : 처리 중 - done : 완료 - canceled : 취소됨
:param list[str] uuids: 출금 UUID의 목록
:param list[str] txids: 출금 TXID의 목록
:param float limit: 개수 제한 (default: 100, max: 100)
:param float page: 페이지 수, default: 1
:param str order_by: 정렬 방식 - asc : 오름차순 - desc : 내림차순 (default)
:return: list[Withdraw]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.withdraw_info_all_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.withdraw_info_all_with_http_info(**kwargs) # noqa: E501
return data
def withdraw_info_all_with_http_info(self, **kwargs): # noqa: E501
"""출금 리스트 조회 # noqa: E501
## 출금 리스트를 조회한다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.withdraw_info_all_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str currency: Currency 코드
:param str state: 출금 상태 - submitting : 처리 중 - submitted : 처리 완료 - almost_accepted : 출금대기중 - rejected : 거부 - accepted : 승인됨 - processing : 처리 중 - done : 완료 - canceled : 취소됨
:param list[str] uuids: 출금 UUID의 목록
:param list[str] txids: 출금 TXID의 목록
:param float limit: 개수 제한 (default: 100, max: 100)
:param float page: 페이지 수, default: 1
:param str order_by: 정렬 방식 - asc : 오름차순 - desc : 내림차순 (default)
:return: list[Withdraw]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['currency', 'state', 'uuids', 'txids', 'limit', 'page', 'order_by'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method withdraw_info_all" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'currency' in params:
query_params.append(('currency', params['currency'])) # noqa: E501
if 'state' in params:
query_params.append(('state', params['state'])) # noqa: E501
if 'uuids' in params:
query_params.append(('uuids', params['uuids'])) # noqa: E501
collection_formats['uuids'] = 'multi' # noqa: E501
if 'txids' in params:
query_params.append(('txids', params['txids'])) # noqa: E501
collection_formats['txids'] = 'multi' # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/withdraws', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Withdraw]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def withdraw_krw(self, amount, **kwargs): # noqa: E501
"""원화 출금하기 # noqa: E501
## 원화 출금을 요청한다. 등록된 출금 계좌로 출금된다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.withdraw_krw(amount, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str amount: 출금 원화 수량 (required)
:return: Withdraw
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.withdraw_krw_with_http_info(amount, **kwargs) # noqa: E501
else:
(data) = self.withdraw_krw_with_http_info(amount, **kwargs) # noqa: E501
return data
def withdraw_krw_with_http_info(self, amount, **kwargs): # noqa: E501
"""원화 출금하기 # noqa: E501
## 원화 출금을 요청한다. 등록된 출금 계좌로 출금된다. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.withdraw_krw_with_http_info(amount, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str amount: 출금 원화 수량 (required)
:return: Withdraw
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['amount'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method withdraw_krw" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'amount' is set
if ('amount' not in params or
params['amount'] is None):
raise ValueError("Missing the required parameter `amount` when calling `withdraw_krw`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'amount' in params:
form_params.append(('amount', params['amount'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/withdraws/krw', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Withdraw', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
```
#### File: swagger_client/models/currency.py
```python
import pprint
import re # noqa: F401
import six
class Currency(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'str',
'withdraw_fee': 'str',
'is_coin': 'bool',
'wallet_state': 'str',
'wallet_support': 'list[str]'
}
attribute_map = {
'code': 'code',
'withdraw_fee': 'withdraw_fee',
'is_coin': 'is_coin',
'wallet_state': 'wallet_state',
'wallet_support': 'wallet_support'
}
def __init__(self, code=None, withdraw_fee=None, is_coin=None, wallet_state=None, wallet_support=None): # noqa: E501
"""Currency - a model defined in Swagger""" # noqa: E501
self._code = None
self._withdraw_fee = None
self._is_coin = None
self._wallet_state = None
self._wallet_support = None
self.discriminator = None
if code is not None:
self.code = code
if withdraw_fee is not None:
self.withdraw_fee = withdraw_fee
if is_coin is not None:
self.is_coin = is_coin
if wallet_state is not None:
self.wallet_state = wallet_state
if wallet_support is not None:
self.wallet_support = wallet_support
@property
def code(self):
"""Gets the code of this Currency. # noqa: E501
화폐를 의미하는 영문 대문자 코드 # noqa: E501
:return: The code of this Currency. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this Currency.
화폐를 의미하는 영문 대문자 코드 # noqa: E501
:param code: The code of this Currency. # noqa: E501
:type: str
"""
self._code = code
@property
def withdraw_fee(self):
"""Gets the withdraw_fee of this Currency. # noqa: E501
해당 화폐의 출금 수수료 # noqa: E501
:return: The withdraw_fee of this Currency. # noqa: E501
:rtype: str
"""
return self._withdraw_fee
@withdraw_fee.setter
def withdraw_fee(self, withdraw_fee):
"""Sets the withdraw_fee of this Currency.
해당 화폐의 출금 수수료 # noqa: E501
:param withdraw_fee: The withdraw_fee of this Currency. # noqa: E501
:type: str
"""
self._withdraw_fee = withdraw_fee
@property
def is_coin(self):
"""Gets the is_coin of this Currency. # noqa: E501
화폐의 코인 여부 # noqa: E501
:return: The is_coin of this Currency. # noqa: E501
:rtype: bool
"""
return self._is_coin
@is_coin.setter
def is_coin(self, is_coin):
"""Sets the is_coin of this Currency.
화폐의 코인 여부 # noqa: E501
:param is_coin: The is_coin of this Currency. # noqa: E501
:type: bool
"""
self._is_coin = is_coin
@property
def wallet_state(self):
"""Gets the wallet_state of this Currency. # noqa: E501
해당 화폐의 지갑 상태 # noqa: E501
:return: The wallet_state of this Currency. # noqa: E501
:rtype: str
"""
return self._wallet_state
@wallet_state.setter
def wallet_state(self, wallet_state):
"""Sets the wallet_state of this Currency.
해당 화폐의 지갑 상태 # noqa: E501
:param wallet_state: The wallet_state of this Currency. # noqa: E501
:type: str
"""
self._wallet_state = wallet_state
@property
def wallet_support(self):
"""Gets the wallet_support of this Currency. # noqa: E501
해당 화폐가 지원하는 입출금 정보 # noqa: E501
:return: The wallet_support of this Currency. # noqa: E501
:rtype: list[str]
"""
return self._wallet_support
@wallet_support.setter
def wallet_support(self, wallet_support):
"""Sets the wallet_support of this Currency.
해당 화폐가 지원하는 입출금 정보 # noqa: E501
:param wallet_support: The wallet_support of this Currency. # noqa: E501
:type: list[str]
"""
self._wallet_support = wallet_support
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Currency, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Currency):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: python/test/test_deposit_api.py
```python
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.deposit_api import DepositApi # noqa: E501
from swagger_client.rest import ApiException
class TestDepositApi(unittest.TestCase):
"""DepositApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.deposit_api.DepositApi() # noqa: E501
def tearDown(self):
pass
def test_deposit_coin_address(self):
"""Test case for deposit_coin_address
개별 입금 주소 조회 # noqa: E501
"""
pass
def test_deposit_coin_addresses(self):
"""Test case for deposit_coin_addresses
전체 입금 주소 조회 # noqa: E501
"""
pass
def test_deposit_generate_coin_address(self):
"""Test case for deposit_generate_coin_address
입금 주소 생성 요청 # noqa: E501
"""
pass
def test_deposit_info(self):
"""Test case for deposit_info
개별 입금 조회 # noqa: E501
"""
pass
def test_deposit_info_all(self):
"""Test case for deposit_info_all
입금 리스트 조회 # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "943470549/Torando-sqlalchemy",
"score": 2
} |
#### File: Torando-sqlalchemy/test/asynciotest.py
```python
import asyncio
def wget(host):
print('wget %s...' % host)
connect = asyncio.open_connection(host,80)
r=yield from connect
```
#### File: Torando-sqlalchemy/test/ios.py
```python
import gc
import os,json
fpath = r'D:\log\22001400255343328\test.txt'
def openfile(path):
with open(path, 'r') as f:
for line in f.readlines():
print (line)
gc.collect()
with open(path, 'a') as fw:
fw.write('sss')
def dump_object():
with open(fpath, 'wb') as f:
d=dict(name='1',age=2)
json.dump(d,f)
def get_object():
with open(fpath, 'rb') as f:
d=json.load(f)
print d
class Student(object):
def __init__(self,name,age):
self.name=name
self.age=age
s = Student('aa',1)
print(json.dumps(s,default=lambda obj:obj.__dict__))
if __name__=='__main__':
# openfile(fpath)
# print (os.path.abspath('.'))
# os.rename(fpath, 'a.py')
# dump_object()
# get_object()
print(' ')
```
#### File: Torando-sqlalchemy/test/spider.py
```python
import argparse
import json
import time
import urllib2
import MySQLdb as mdb
import Queue
import traceback
import utils
DB_HOST = '127.0.0.1'
DB_PORT = '3306'
DB_USER = 'root'
# MySQL密码
DB_PASS = '<PASSWORD>'
# 数据库名称
DB_NAME = 'pan'
SPIDER_INTERVAL = 10 # 至少保证10秒以上,否则容易被封
ERR_NO = 0 # 正常
ERR_REFUSE = 1 # 爬虫爬取速度过快,被拒绝
ERR_EX = 2 # 未知错误
def getHtml(url, ref=None, reget=5):
try:
request = urllib2.Request(url)
request.add_header('User-Agent',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36')
if ref:
request.add_header('Referer', ref)
page = urllib2.urlopen(request, timeout=10)
html = page.read()
except:
if reget >= 1:
# 如果getHtml失败,则再次尝试5次
print 'getHtml error,reget...%d' % (6 - reget)
time.sleep(2)
return getHtml(url, ref, reget - 1)
else:
print 'request url:' + url
print 'failed to fetch html'
exit()
else:
return html
class Db(object):
def __init__(self):
self.dbconn = None
self.dbcurr = None
def check_conn(self):
try:
self.dbconn.ping()
except:
return False
else:
return True
def conn(self):
self.dbconn = mdb.connect(DB_HOST, DB_USER, DB_PASS, DB_NAME, charset='utf8')
self.dbconn.autocommit(False)
self.dbcurr = self.dbconn.cursor()
def fetchone(self):
return self.dbcurr.fetchone()
def fetchall(self):
return self.dbcurr.fetchall()
def execute(self, sql, args=None, falg=False):
if not self.dbconn:
# 第一次链接数据库
self.conn()
try:
if args:
rs = self.dbcurr.execute(sql, args)
else:
rs = self.dbcurr.execute(sql)
return rs
except Exception, e:
if self.check_conn():
print 'execute error'
traceback.print_exc()
else:
print 'reconnect mysql'
self.conn()
if args:
rs = self.dbcurr.execute(sql, args)
else:
rs = self.dbcurr.execute(sql)
return rs
def commit(self):
self.dbconn.commit()
def rollback(self):
self.dbconn.rollback()
def close(self):
self.dbconn.close()
self.dbcurr.close()
def last_row_id(self):
return self.dbcurr.lastrowid
class BaiduPanSpider(object):
def __init__(self):
self.db = Db()
self.files = []
self.got_files_count = 0
self.got_follow_count = 0
self.while_count = 0
self.spider_queue = Queue.Queue(maxsize=20)
self.status = 'stop'
self.errno = ERR_NO
self.file_type_t = {'video': 0, 'image': 1, 'document': 2, 'music': 3, 'package': 4, 'software': 5,
'torrent': 6, 'other': -1}
def getShareUser(self, uk):
url = 'http://yun.baidu.com/share/count?uk=%d&channel=chunlei&clienttype=0&web=1' % uk
follows_json = json.loads(getHtml(url, uk))
if follows_json['errno'] != 0:
if follows_json['errno'] == -55:
self.errno = ERR_REFUSE
else:
self.errno = ERR_EX
return False
return {
'pubshare_cnt': follows_json['pubshare_cnt'],
'fans': follows_json['fans'],
'follow': follows_json['follow'],
'album': follows_json['follows_json']
}
def getHotUser(self):
url = 'http://yun.baidu.com/pcloud/friend/gethotuserlist?type=1&from=feed&start=0&limit=24&channel=chunlei&clienttype=0&web=1'
follows_json = json.loads(getHtml(url))
if follows_json['errno'] != 0:
print u'failed to fetch hot users'
return False
returns = []
count = 0
for item in follows_json['hotuser_list']:
count = count + 1
hot_uname = item['hot_uname'].encode('utf-8')
hot_uk = item['hot_uk']
avatar_url = item['avatar_url'].encode('utf-8')
intro = item['intro'].encode('utf-8')
follow_count = item['follow_count']
fans_count = item['fans_count']
pubshare_count = item['pubshare_count']
album_count = item['album_count']
returns.append({'hot_uname': hot_uname, 'hot_uk': hot_uk, 'avatar_url': avatar_url, 'intro': intro,
'follow_count': follow_count, 'fans_count': fans_count, 'pubshare_count': pubshare_count,
'album_count': album_count})
if count == 0:
print "got no hot users"
return False
else:
print "success to fetched hot users: %d" % count
return returns
def getFans(self, uk, start=0, limit=24):
# query_uk:用户ID
# limit:每一页最多显示数量
# start:当前页数
follows_url = 'http://yun.baidu.com/pcloud/friend/getfanslist?query_uk=%d&limit=%d&start=%d' % (
uk, limit, start)
follows_json = json.loads(getHtml(follows_url, uk))
if follows_json['errno'] != 0:
print u'failed to fetch fens'
return False
total_count = follows_json['total_count']
returns = []
count = 0
for item in follows_json['fans_list']:
count = count + 1
fans_uname = item['fans_uname'].encode('utf-8')
fans_uk = item['fans_uk']
avatar_url = item['avatar_url'].encode('utf-8')
intro = item['intro'].encode('utf-8')
follow_count = item['follow_count']
fans_count = item['fans_count']
pubshare_count = item['pubshare_count']
album_count = item['album_count']
returns.append({'fans_uname': fans_uname, 'fans_uk': fans_uk, 'avatar_url': avatar_url, 'intro': intro,
'follow_count': follow_count, 'fans_count': fans_count, 'pubshare_count': pubshare_count,
'album_count': album_count})
return (total_count, count, returns)
def getFollows(self, uk, start=0, limit=24):
follows_url = 'http://yun.baidu.com/pcloud/friend/getfollowlist?query_uk=%d&limit=%d&start=%d&bdstoken=d82467db8b1f5741daf1d965d1509181&channel=chunlei&clienttype=0&web=1' % (
uk, limit, start)
ref = 'http://yun.baidu.com/pcloud/friendpage?type=follow&uk=%d&self=1' % uk
follows_json = json.loads(getHtml(follows_url, ref))
if follows_json['errno'] != 0:
print 'getFollows errno:%d' % follows_json['errno']
print 'request_url:' + follows_url
if follows_json['errno'] == -55:
self.errno = ERR_REFUSE
else:
self.errno = ERR_EX
return False
total_count = follows_json['total_count']
returns = []
count = 0
if (total_count > 0):
for item in follows_json['follow_list']:
count = count + 1
returns.append({
'follow_uname': item['follow_uname'].encode('utf-8'),
'follow_uk': item['follow_uk'],
'avatar_url': item['avatar_url'].encode('utf-8'),
'intro': item['intro'].encode('utf-8'),
'follow_count': item['follow_count'],
'fans_count': item['fans_count'],
'pubshare_count': item['pubshare_count'],
'album_count': item['album_count']
})
return (total_count, count, returns)
def getShareLists(self, uk, start=0, limit=60):
sharelists_url = 'http://yun.baidu.com/pcloud/feed/getsharelist?category=0&auth_type=1&request_location=share_home&start=%d&limit=%d&query_uk=%d&channel=chunlei&clienttype=0&web=1' % (
start, limit, uk)
ref = 'http://yun.baidu.com/share/home?uk=%d&view=share' % uk
sharelists_json = json.loads(getHtml(sharelists_url, ref))
if (sharelists_json['errno'] != 0):
print 'getShareLists errno:%d' % sharelists_json['errno']
print 'request_url:' + sharelists_url
if sharelists_json['errno'] == -55:
self.errno = ERR_REFUSE
else:
self.errno = ERR_EX
return False
total_count = sharelists_json['total_count']
returns = []
count = 0
if total_count > 0:
for item in sharelists_json['records']:
count = count + 1
feed_type = item['feed_type']
isdir = 0
size = 0
md5 = ''
album_id = ''
shorturl = ''
if feed_type == 'share':
if item['filecount'] == 1:
filelist = item['filelist']
isdir = filelist[0]['isdir']
size = filelist[0]['size']
md5 = filelist[0]['md5']
else:
isdir = 1
elif feed_type == 'album':
album_id = item['album_id']
isdir = 2
if item.has_key('shorturl'):
shorturl = item['shorturl']
if feed_type == 'share' or feed_type == 'album':
returns.append({
'title': item['title'].encode('utf-8'),
'shorturl': shorturl,
'shareid': item['source_id'],
'feed_time': item['feed_time'] // 1000, # 分享时间
'dCnt': item['dCnt'],
'isdir': isdir,
'size': size,
'md5': md5,
'uk': uk,
'feed_type': feed_type
})
return (total_count, count, returns)
def getAlbum(self, uk, start=0, limit=60):
url = 'http://yun.baidu.com/pcloud/album/getlist?start=%d&limit=%d&query_uk=%d&channel=chunlei&clienttype=0&web=1&bdstoken=d82467db8b1f5741daf1d965d1509181' % (
start, limit, uk)
album_json = json.loads(getHtml(url, uk))
total_count = album_json['count']
returns = []
count = 0
for item in album_json['album_list']:
count = count + 1
title = item['title'].encode('utf-8')
album_id = item['album_id']
create_time = item['create_time']
update_time = item['update_time']
filecount = item['filecount']
desc = item['desc']
returns.append({'title': title, 'album_id': album_id, 'create_time': create_time, 'desc': desc,
'update_time': update_time, 'filecount': filecount, 'uk': uk})
if count == 0:
print "get nothing"
return False
else:
print "success to fetched : %d" % count
if (start + count) < total_count:
start = start + limit
returns = returns + self.getAlbum(uk, start)
return returns
def seedUsers(self):
hot_usrs = self.getHotUser()
if not hot_usrs:
return
try:
for user in hot_usrs:
time_stamp = int(time.time())
if user['pubshare_count'] > 0:
self.db.execute("INSERT INTO share_users (uk,user_name,avatar_url,intro,follow_count,album_count,\
fens_count,pubshare_count,last_visited,create_time,weight) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(
user['hot_uk'], user['hot_uname'], user['avatar_url'], user['intro'],
user['follow_count'],
user['album_count'], user['fans_count'], user['pubshare_count'], time_stamp,
time_stamp, 5
)
)
uid = self.db.last_row_id()
self.db.execute("INSERT INTO spider_list (uk,uid) VALUES(%s,%s)", (user['hot_uk'], uid))
except:
traceback.print_exc()
self.db.rollback()
else:
self.db.commit()
def startSpider(self):
if self.spider_queue.empty():
fetched_users = self.db.execute('SELECT * from spider_list ORDER BY weight DESC limit 0,20')
if fetched_users <= 0:
print 'nothing to spider,spider_list is empty'
return False
self.start = 'start'
self.errno = ERR_NO
fetchall = self.db.fetchall()
# 将数据库中取出的待爬取的分享者,加入爬取队列
for item in fetchall:
self.spider_queue.put({
'sid': item[0],
'uk': item[1],
'file_fetched': item[2],
'follow_fetched': item[3],
'follow_done': item[4],
'file_done': item[5],
'weight': item[6],
'uid': item[7]
})
self.got_follow_count = 0
self.got_files_count = 0
self.while_count = 0
while not self.spider_queue.empty():
self.while_count += 1
share_user = self.spider_queue.get()
# 爬取分享者的文件列表
if not share_user['file_done']:
print '%d now spidering file ,%d file fetched' % (share_user['uk'], share_user['file_fetched'])
rs = self.getShareLists(share_user['uk'], share_user['file_fetched'])
if not rs:
print 'uk:%d error to fetch files,try again later...' % share_user['uk']
return True
total_count, fetched_count, file_list = rs
total_fetched = share_user['file_fetched'] + fetched_count
print 'fetched_file_count:%d' % fetched_count
if total_fetched >= total_count or total_count == 0:
share_user['file_done'] = 1 # 该分享者所有文件爬取完成
if total_count == 0:
self.db.execute("UPDATE spider_list set file_done=%s WHERE sid=%s", (1, share_user['sid']))
self.db.commit()
else:
try:
files_count = 0
for file in file_list:
files_count += 1
ext = ''
file_type = ''
file_type_i = -1
if file['isdir'] == 0 and file['feed_type'] == 'share':
ext = utils.get_extension(file['title']).lower()
file_type = utils.get_category(ext)
file_type_i = self.file_type_t[file_type]
time_stamp = int(time.time())
self.db.execute(
"INSERT INTO share_file (title,uk,shareid,shorturl,isdir,size,md5,ext,feed_time,create_time,file_type,uid,feed_type) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(file['title'], file['uk'], file['shareid'],
file['shorturl'], file['isdir'], file['size'], file['md5'], ext, file['feed_time'],
time_stamp, file_type_i, share_user['uid'], file['feed_type'])
)
except:
share_user['file_done'] = 0
self.db.rollback()
traceback.print_exc()
return False
else:
self.db.execute("UPDATE spider_list set file_fetched=%s,file_done=%s WHERE sid=%s",
(total_fetched, share_user['file_done'], share_user['sid']))
self.db.execute("UPDATE share_users set fetched=%s WHERE uid=%s",
(total_fetched, share_user['uid']))
share_user['file_fetched'] = total_fetched
self.got_files_count += files_count
self.db.commit()
# 爬取完文件后在爬取订阅列表
if share_user['follow_done'] == 0 and share_user['file_done'] == 1:
print '%d now spidering follow ,%d follow fetched' % (share_user['uk'], share_user['follow_fetched'])
rs = self.getFollows(share_user['uk'], share_user['follow_fetched'])
if not rs:
print 'error to fetch follows,try again later...'
return
total_count, fetched_count, follow_list = rs
total_fetched = share_user['follow_fetched'] + fetched_count
print 'fetched_follow_count:%d' % fetched_count
if total_fetched >= total_count or total_count == 0:
share_user['follow_done'] = 1
if total_count == 0:
self.db.execute("DELETE FROM spider_list WHERE sid=%s", (share_user['sid'],))
self.db.commit()
else:
try:
follow_count = 0
for follow in follow_list:
follow_count += 1
# 判断该用户是否已经在表中了
if self.db.execute('SELECT * FROM share_users WHERE uk=%s', (follow['follow_uk'],)) > 0:
print 'uk:%d has already in share_user table' % follow['follow_uk']
continue
time_stamp = int(time.time())
self.db.execute("INSERT INTO share_users (uk,user_name,avatar_url,intro,follow_count,album_count,\
fens_count,pubshare_count,last_visited,create_time,weight) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(
follow['follow_uk'], follow['follow_uname'], follow['avatar_url'],
follow['intro'], follow['follow_count'],
follow['album_count'], follow['fans_count'], follow['pubshare_count'],
time_stamp, time_stamp, 5
)
)
# 将获取的新分享者加入爬取列表
self.db.execute("INSERT INTO spider_list (uk,uid) VALUES(%s,%s)",
(follow['follow_uk'], self.db.last_row_id()))
except:
share_user['follow_done'] = 0
self.db.rollback()
traceback.print_exc()
return False
else:
if share_user['follow_done'] == 1:
# 订阅者爬取完成,该分享者的任务完成,从待爬取列表中删除
print 'delete follow fetched sid:%d from spider_list' % share_user['sid']
self.db.execute("DELETE FROM spider_list WHERE sid=%s", (share_user['sid'],))
else:
self.db.execute("UPDATE spider_list set follow_fetched=%s,follow_done=%s WHERE sid=%s",
(total_fetched, share_user['follow_done'], share_user['sid']))
share_user['follow_fetched'] = total_fetched
self.got_follow_count += follow_count
self.db.commit()
# 只要分享者列表没完成,说明该分享者还未爬取完,则加入工作队列,继续爬取
if share_user['follow_done'] == 0:
self.spider_queue.put(share_user)
else:
print '%d has done' % share_user['uk']
del share_user
time.sleep(SPIDER_INTERVAL)
print '-----------------Done------------------'
print 'while_count:%d' % self.while_count
print 'got_follow_count:%d' % self.got_follow_count
print 'got_files_count:%d' % self.got_files_count
return True
def stop(self):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--seed-user", help="get seed user", action="store_true")
args = parser.parse_args()
spider = BaiduPanSpider()
# 做种
if args.seed_user:
spider.seedUsers()
else:
while (1):
print 'start spider...'
result = spider.startSpider()
if not result:
print 'The spider is refused,5 mins later try again auto...'
time.sleep(60 * 5)
else:
print 'one worker queue id done'
time.sleep(1)
```
#### File: tornadoTest/services/testService.py
```python
import time
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor
from mytestdb import Article,Session_class
class serviceTest():
executor=ThreadPoolExecutor(10)
@run_on_executor
def saveArticle(self,title='', keywords='', digest=''):
article = Article (title,keywords,digest)
article.save()
return article
@run_on_executor
def selectTopTen(self):
session = Session_class() # 生成session实例,相当于游标
list=session.query(Article).limit(10)
return list
@run_on_executor
def selectTopOne(self):
session = Session_class() # 生成session实例,相当于游标
list=session.query(Article).first()
return list
``` |
{
"source": "9447-team-4/ImageScanner",
"score": 2
} |
#### File: 9447-team-4/ImageScanner/DynamicPRReporter.py
```python
from Reporters import PullRequestReporter, GitService
import boto3
import os
import json
class ZAPPRReporter(PullRequestReporter):
def __init__(self, git_service: GitService, pr_id: int):
super(ZAPPRReporter, self).__init__(git_service, pr_id)
self._exit_status = 0
self._bucket_name = os.getenv('S3_BUCKET_NAME')
self._s3_client = boto3.client('s3')
self._s3_bucket = boto3.resource('s3')
@property
def exit_status(self):
return self._exit_status
def _get_report(self):
report_url = self._s3_client.generate_presigned_url(
'get_object',
Params={'Bucket': self._bucket_name, 'Key': 'report.html'},
ExpiresIn=86400)
return report_url
def _get_metrics(self):
obj = self._s3_bucket.Object(self._bucket_name, 'report.json')
json_obj = json.loads(obj.get()['Body'].read().decode('utf-8'))
alerts = {'0': 0, '1': 0, '2': 0, '3': 0}
for site in json_obj['site']:
for alert in site['alerts']:
alerts[alert['riskcode']] += 1
# If high risk vuln > 0
if alerts['3'] > 0:
self._exit_status = 1
return alerts
def _create_message(self):
alerts = self._get_metrics()
msg = "# OWASP ZAP Fuzzing Results\n" \
"| Risk Level | Amount of Vulnerabilities |\n" \
"| -------------- | ------------------------- |\n" \
f"| High | {alerts['3']} |\n" \
f"| Medium | {alerts['2']} |\n" \
f"| Low | {alerts['1']} |\n" \
f"| Informational | {alerts['0']} |\n\n"
report_url = self._get_report()
msg += f"View more results:\n{report_url}"
return msg
def process_pull_review(self):
msg = self._create_message()
self.pull_review.body = msg
``` |
{
"source": "9447-team-4/Pipeline-dynamic-fuzzer",
"score": 3
} |
#### File: Pipeline-dynamic-fuzzer/owasp-zap/zapGenAPI.py
```python
import yaml #pip install pyyaml
import json
import argparse
parser = argparse.ArgumentParser(description='Generates a new openAPI json file with server url specification given an existing openAPI json or yaml file')
parser.add_argument('-f','--file', type=str, metavar='', required=True, help='openAPI json or yaml file')
parser.add_argument('-u','--url', type=str, metavar='', required=True, help='url of server')
args = parser.parse_args()
def main():
if args.file.lower().endswith('.json'):
api_file = open(args.file, 'r+')
json_data = json.load(api_file)
json_data['servers'] = [{'url':args.url}]
api_file.close()
api_file = open('zap_openapi.json', 'w')
json.dump(json_data, api_file, indent=2, sort_keys=True)
api_file.close()
elif args.file.lower().endswith('.yaml') or args.file.lower().endswith('.yml'):
api_file = open(args.file, 'r+')
yaml_data = yaml.safe_load(api_file)
yaml_data['servers'] = [{'url':args.url}]
api_file.close()
api_file = open('zap_openapi.json', 'w')
json.dump(yaml_data, api_file, indent=2, sort_keys=True)
api_file.close()
else:
raise Exception('Not a yaml or json file')
return
if __name__ == "__main__":
main()
``` |
{
"source": "946336/The-Worst-REPL-You-ve-Ever-Seen",
"score": 3
} |
#### File: repl/base/callstack.py
```python
class Entry:
def __init__(self, obj, line_number = 1):
self.obj = obj
self.line_number = line_number
def __repr__(self):
return "In {}, line {}".format(self.obj.name, self.line_number)
class CallStack:
def __init__(self, initial = None):
self.__stack = [] if not initial else initial
def __repr__(self):
stk = "\n".join([str(entry) for entry in self.__stack])
return "Traceback (Most recent call last):\n" + stk + "\n"
__str__ = __repr__
def __len__(self):
return len(self.__stack)
def append(self, entry):
self.__stack.append(entry)
def pop(self):
last = self.__stack[-1]
self.__stack.pop()
return last
def __getitem__(self, index):
if isinstance(index, slice):
return CallStack(self.__stack[index])
else: return self.__stack[index]
```
#### File: repl/base/common.py
```python
class REPLError(Exception): pass
class REPLControl(Exception): pass
class REPLBreak(REPLControl): pass
class REPLReturn(REPLControl):
def __init__(self, value):
self.value = value
class REPLFunctionShift(REPLControl): pass
class REPLSyntaxError(REPLError): pass
class REPLRuntimeError(REPLError): pass
```
#### File: repl/base/environment.py
```python
import json
class Environment:
def __init__(self, name = "(?)", upstream = None, default_value = "",
initial_bindings = None):
self.__name = name
self.__bindings = ({} if initial_bindings is None else initial_bindings)
# Default value to give when something isn't found
self.__default = default_value
if type(upstream) not in [type(None), Environment]:
raise RuntimeError("Upstream environment must be None or an " +
"environment")
self.__upstream = upstream
@property
def name(self):
return self.__name
def copy(self):
return Environment(
name = self.__name,
upstream = self.__upstream,
default_value = self.__default,
initial_bindings = self.__bindings.copy()
)
# Bindings search up as far as possible for something to trample, but
# otherwise stay at this height
def bind(self, name, value):
if self.update_upstream(name, value) is None:
self.__bindings[name] = value
def bind_here(self, name, value):
self.__bindings [name] = value
# Return name of environment where binding was updated, or None if no
# matching binding was found
def update_upstream(self, name, value):
up = None
if self.__upstream is not None:
up = self.__upstream.update_upstream(name, value)
if up is None and self.__bindings.get(name, None) is not None:
self.__bindings[name] = value
return self.__name
return up
def bind_no_trample(self, name, value):
if name in self.__bindings.keys():
raise KeyError("Key {} already present in environment {}"
.format(name, self.__name))
self.bind_here(name, value)
def unbind(self, name):
try:
del self.__bindings[name]
except KeyError as e:
# Unbinding something we don't have is fine
pass
# Don't catch the exception when unbinding something we don't have
def unbind_strict(self, name):
del self.__bindings[name]
# Downstream environments have priority
def get(self, name):
mine = self.__bindings.get(name, None)
if mine is None:
if self.__upstream is None:
return self.__default
else:
return self.__upstream.get(name)
else:
return mine
def __getitem__(self, name):
return self.get(name)
def load_from(self, file_like):
self.__bindings = json.load(file_like)
def write_to(self, file_like):
json.dump(self.__bindings, file_like, indent = 4, sort_keys = True)
def list(self):
return [ "* {} -> {}".format(k, v) for k, v in self.__bindings.items() ]
def list_tree(self):
finger = self.__upstream
accum = ["==========\n{}\n==========".format(self.__name),
"\n".join(self.list())]
while finger is not None:
accum.append("==========\n{}\n==========".format(finger.__name))
accum.append("\n".join(finger.list()))
finger = finger.__upstream
return accum
```
#### File: The-Worst-REPL-You-ve-Ever-Seen/repl/Function.py
```python
from . import formatter
from .base import command, syntax, common
import re
class REPLFunction:
# This requires further thought
forbidden_names = []
forbidden_argspec_pattern = re.compile(
"^[0-9]"
)
def __init__(self, owner, name, argspec = None):
self.__name = name
self.__owner = owner
self.__variadic = argspec[-1] == "..." if argspec else False
self.__argspec = argspec[:-1] if self.__variadic else argspec
self.__contents = []
self.args_ = None
self.argspec_ = None
@property
def name(self):
return self.__name
@property
def argspec(self):
return self.__argspec
def complete(self, line):
self.__owner.finish_block()
usagestring = \
("{} args".format(self.__name)
if not self.__argspec
else "{} {}".format(self.__name,
" ".join(self.__argspec)))
helpstring = \
("function {}\n".format(self.__name)
+ formatter.format(self.__contents, depth = 1) + "\nendfunction"
if not self.__argspec else
"function {} {}\n".format(self.__name,
" ".join(self.__argspec))
+ formatter.format(self.__contents, depth = 1) + "\nendfunction"
)
self.__owner.register_user_function(command.Command(
self,
self.__name,
usagestring,
helpstring
))
def append(self, line):
line = line.strip()
if line == "endfunction":
self.complete(line)
elif line.startswith("function"):
# We'd have problems with function
# lifetime and scope, so we can't really allow nested functions
sys.stderr.write("Cannot create nested functions\n")
self.__owner.discard_block()
else:
self.__contents.append(line)
return self
def make_bindings(self, args, argspec):
bindings = {
"FUNCTION": self.__name,
"#": str(len(args)),
"@": " ".join([syntax.quote(arg) for arg in args]),
"0": self.__name,
}
for position, argument in enumerate(args):
bindings[str(position + 1)] = argument
for name, argument in zip(argspec, args):
bindings[name] = argument
return bindings
def shift(self):
self.args_ = self.args_[1:]
to_unset = None
if len(self.args_) < len(self.argspec_):
to_unset = self.argspec_[0]
self.argspec_ = self.argspec_[1:]
bindings = self.make_bindings(self.args_, self.argspec_)
# Unset last argument
del self.bindings[str(len(self.args_))]
if to_unset:
del self.bindings[str(to_unset)]
# Apply shift down
for k, v in bindings.items():
self.bindings[k] = v
def calledIncorrectly(self, args):
if not self.__argspec: return False
if self.__variadic:
return len(args) < len(self.__argspec)
else:
return len(args) != len(self.__argspec)
def __call__(self, *args):
if self.calledIncorrectly(args):
raise common.REPLRuntimeError("Usage: {} {}"
.format(self.__name, " ".join(self.__argspec)))
self.argspec_ = self.__argspec[:]
self.args_ = args
self.bindings = self.make_bindings(self.args_, self.argspec_)
self.__owner.add_scope(self.bindings, self.__name)
try:
for line in self.__contents:
try:
res = self.__owner.eval(line)
self.__owner.stack_top().line_number += 1
if res: print(res.strip("\n"))
except common.REPLReturn as e:
return e.value
except common.REPLFunctionShift as e:
self.shift()
continue
finally:
self.__owner.pop_scope()
``` |
{
"source": "9465565598/ThaparWorkshopANN",
"score": 3
} |
#### File: keras-tutorials/1. MLP/2-3-batchnorm.py
```python
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.datasets import mnist
from keras.models import Sequential
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Activation, Dense, BatchNormalization
from keras import optimizers
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshaping X data: (n, 28, 28) => (n, 784)
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1] * X_train.shape[2]))
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1] * X_test.shape[2]))
# converting y data into categorical (one-hot encoding)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# use only 33% of training data to expedite the training process
X_train, _ , y_train, _ = train_test_split(X_train, y_train, test_size = 0.67, random_state = 7)
def mlp_model():
model = Sequential()
model.add(Dense(50, input_shape = (784, )))
model.add(BatchNormalization()) # Add Batchnorm layer before Activation
model.add(Activation('sigmoid'))
model.add(Dense(50))
model.add(BatchNormalization()) # Add Batchnorm layer before Activation
model.add(Activation('sigmoid'))
model.add(Dense(50))
model.add(BatchNormalization()) # Add Batchnorm layer before Activation
model.add(Activation('sigmoid'))
model.add(Dense(50))
model.add(BatchNormalization()) # Add Batchnorm layer before Activation
model.add(Activation('sigmoid'))
model.add(Dense(10))
model.add(Activation('softmax'))
sgd = optimizers.SGD(lr = 0.001)
model.compile(optimizer = sgd, loss = 'categorical_crossentropy', metrics = ['accuracy'])
return model
model = mlp_model()
history = model.fit(X_train, y_train, validation_split = 0.3, epochs = 100, verbose = 0)
# plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
# plt.legend(['training', 'validation'], loc = 'upper left')
# plt.show()
results = model.evaluate(X_test, y_test)
print('Test accuracy: ', results[1])
```
#### File: keras-tutorials/2. ETC/4-2-early-stopping.py
```python
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.utils.np_utils import to_categorical
from keras import optimizers
from keras.callbacks import *
from keras.layers import *
data = load_digits()
X_data = data.images
y_data = data.target
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size = 0.3, random_state = 777)
# reshaping X data => flatten into 1-dimensional
X_train = X_train.reshape((X_train.shape[0], -1))
X_test = X_test.reshape((X_test.shape[0], -1))
# converting y data into categorical (one-hot encoding)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
def create_model():
model = Sequential()
model.add(Dense(100, input_shape = (X_train.shape[1],)))
model.add(Activation('relu'))
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dense(y_train.shape[1]))
model.add(Activation('sigmoid'))
model.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
return model
callbacks = [EarlyStopping(monitor = 'acc', patience = 1)]
model = create_model()
# you could see that model stops training after 7 epochs
model.fit(X_train, y_train, epochs = 20, batch_size = 500, callbacks = callbacks, validation_data = (X_test, y_test))
```
#### File: scikit-learn/figures/plot_digits_dataset.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import datasets, decomposition
def digits_plot():
digits = datasets.load_digits(n_class=6)
n_digits = 500
X = digits.data[:n_digits]
y = digits.target[:n_digits]
n_samples, n_features = X.shape
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 1e5:
# don't show points that are too close
# set a high threshold to basically turn this off
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
n_img_per_row = 10
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
print("Computing PCA projection")
pca = decomposition.PCA(n_components=2).fit(X)
X_pca = pca.transform(X)
plot_embedding(X_pca, "Principal Components projection of the digits")
plt.matshow(pca.components_[0, :].reshape(8, 8), cmap="gray")
plt.title("First Principal Component")
plt.axis('off')
plt.matshow(pca.components_[1, :].reshape(8, 8), cmap="gray")
plt.title("Second Principal Component")
plt.axis('off')
plt.show()
``` |
{
"source": "9489yamamayumi/discordpy-startup",
"score": 3
} |
#### File: 9489yamamayumi/discordpy-startup/discordbot.py
```python
from discord.ext import commands
import os
import traceback
import random
bot = commands.Bot(command_prefix='!')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
TRPGmode = "None"
TRPGtype = ["CoC","Paranoia","Others"]
@bot.event
async def on_message(message):
if message.author.bot:
return
elif message.content == "Hi!":
await message.channel.send(f"{message.name} yay!")
#change TRPG mode
@bot.command()
async def mc(ctx,arg):
if(arg in TRPGtype):
TRPGmode = arg
await ctx.send(f"now changed TRPGmode to {TRPGmode}\r\nモードを {TRPGmode} に変更しました")
else:
await ctx.send(f"this mode isn't supported\r\nそのモードは存在しません")
@bot.command()
async def m(ctx):
await ctx.send(f"now TRPGmode is {TRPGmode}\r\n現在のTRPGモードは{TRPGmode}です")
#AdB to roll B-sides dices A times
@bot.command()
async def r(ctx,arg):
#prepare
results=[]
rsltsum=0
exstts=""
cfflg=0
if(arg=="1d100" and TRPGmode=="CoC"):cfflg=1
vals=arg.split('d')
#diceroll
for i in range(int(vals[0])):
roll=random.randint(1,int(vals[1]))
results.append(roll)
rsltsum+=roll
#result
if(cfflg==1):
if(rsltsum<6):exstts = "<critical!>"
if(rsltsum>95):exstts = "<fumble!>"
await ctx.send(f".{arg}.{rsltsum}.{exstts}.")
await ctx.send(f"roll {arg} -> {rsltsum} : {results} {exstts}")
bot.run(token)
``` |
{
"source": "948guppy/Morrigan-Rewrite",
"score": 2
} |
#### File: Morrigan-Rewrite/cogs/stream.py
```python
import textwrap
import asyncio
import discord
from discord.ext import commands
class StreamStatusIsNone(Exception):
pass
class Stream(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
channel_id = 758219068007776256
channel = member.guild.get_channel(channel_id)
# 配信作成時の関数
def overwrites(streamer):
overwrite = {
streamer.guild.default_role: discord.PermissionOverwrite(create_instant_invite=False,
manage_channels=False,
manage_permissions=False, manage_roles=False,
manage_webhooks=False, read_messages=False,
send_messages=False, send_tts_messages=False,
manage_messages=False, embed_links=False,
attach_files=False, read_message_history=False,
mention_everyone=False, external_emojis=False,
use_external_emojis=False, add_reactions=False,
view_channel=True, connect=True, speak=True,
stream=True, mute_members=False,
deafen_members=False, move_members=False,
use_voice_activation=True,
priority_speaker=False),
streamer: discord.PermissionOverwrite(create_instant_invite=False, manage_channels=False,
manage_permissions=False, manage_roles=False,
manage_webhooks=False, read_messages=False, send_messages=False,
send_tts_messages=False, manage_messages=False, embed_links=False,
attach_files=False, read_message_history=False,
mention_everyone=False, external_emojis=False,
use_external_emojis=False, add_reactions=False, view_channel=True,
connect=True, speak=True, stream=True, mute_members=True,
deafen_members=True, move_members=False,
use_voice_activation=True, priority_speaker=True)
}
return overwrite
async def create_stream_channel(streamer):
category_id = 733625569178157076
category = streamer.guild.get_channel(category_id)
stream = await category.create_voice_channel(name=f"{streamer.display_name}",
overwrites=overwrites(streamer))
await streamer.move_to(stream)
def get_streaming_game(streamer):
try:
game = streamer.activities[0]
except IndexError:
game = None
return game
async def send_stream_started(streamer):
e = discord.Embed()
e.title = "配信が開始されました!"
e.description = textwrap.dedent(
f"""
配信者 : {streamer.mention}さん
配信中のゲーム : {get_streaming_game(streamer).name if get_streaming_game(streamer) else '取得されませんでした'}
"""
)
e.colour = 0x99FFFF
await channel.send(embed=e)
async def send_error_message(streamer):
e = discord.Embed()
e.title = "エラーが発生しました!"
e.description = textwrap.dedent(
f"""
配信者 : {streamer.mention}さんによる配信情報パネルの取得ができませんでした。
既にパネルが削除されているか、存在するパネル数が多すぎる可能性があります。
このエラーは10秒後に削除されます。
"""
)
e.colour = 0xFF0000
await channel.send(embed=e, delete_after=10)
async def delete_stream_information(streamer):
stream_information = None
async for message in channel.history(limit=200):
try:
if message.embeds[0].title == "配信が開始されました!":
if f"配信者 : {streamer.mention}さん" in message.embeds[0].description:
stream_information = message
break
except IndexError:
continue
try:
await stream_information.delete()
except AttributeError:
await send_error_message(streamer)
# 配信終了時の関数
async def close_stream(listener, stream):
try:
if stream.channel.overwrites_for(listener).deafen_members:
await stream.channel.delete()
await delete_stream_information(member)
except AttributeError:
pass
# 処理の実行
try:
if after.channel.id == 733626787992567868:
await send_stream_started(member)
await create_stream_channel(member)
except AttributeError:
if not before.channel.id == 733626787992567868 and before.channel.category_id == 733625569178157076:
await close_stream(member, before)
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
information_channel_id = 758219068007776256
information_channel = self.bot.get_channel(information_channel_id)
channel = self.bot.get_channel(payload.channel_id)
guild = self.bot.get_guild(payload.guild_id)
message = await channel.fetch_message(payload.message_id)
member = guild.get_member(payload.user_id)
delete = []
def check(m):
return m.author == member
async def change_streaming_channel_name(listener, stream_name):
state = listener.voice
if state is None:
delete.append(await channel.send("VCにいません"))
else:
if state.channel.category_id == 733625569178157076 and not state.channel.id == 733626787992567868:
if state.channel.overwrites_for(listener).deafen_members:
stream_channel_id = state.channel.id
stream_channel = listener.guild.get_channel(stream_channel_id)
await stream_channel.edit(name=stream_name)
return True
return False
raise StreamStatusIsNone
if member.bot:
return
else:
try:
if message.embeds[0].title == "配信編集パネル":
if str(payload.emoji) == "1⃣":
try:
delete.append(await channel.send("配信の名前を入力してください"))
msg = await self.bot.wait_for('message', timeout=60.0, check=check)
delete.append(msg)
except asyncio.TimeoutError:
delete.append(await channel.send('タイムアウトしました'))
else:
try:
if await change_streaming_channel_name(member, msg.content):
delete.append(await channel.send(f"配信の名前を{msg.content}に変更しました"))
else:
delete.append(await channel.send("あなたの配信ではありません"))
await (await self.bot.get_channel(payload.channel_id).fetch_message(payload.message_id)).remove_reaction(
payload.emoji, self.bot.get_guild(payload.guild_id).get_member(payload.user_id))
except StreamStatusIsNone:
pass
except IndexError:
pass
await asyncio.sleep(5)
await channel.delete_messages(delete)
def setup(bot):
bot.add_cog(Stream(bot))
```
#### File: Morrigan-Rewrite/cogs/twitter.py
```python
import re
import sys
import discord
import tweepy
from discord.ext import commands, tasks
sys.path.append('../')
from config import Twitter
CONSUMER_KEY = Twitter.CONSUMER_KEY
CONSUMER_SECRET = Twitter.CONSUMER_SECRET
ACCESS_TOKEN = Twitter.ACCESS_TOKEN
ACCESS_TOKEN_SECRET = Twitter.ACCESS_TOKEN_SECRET
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
pattern = "https?://[\w/:%#\$&\?\(\)~\.=\+\-]+"
class TwitterCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
self.get_tweet.start()
@tasks.loop(seconds=480)
async def get_tweet(self):
def search_optimal_channel(tweet_data):
tweet_context = tweet_data.full_text
announce_channel = 702513437535895553
channels = {
"毎週水曜日はシュライン・オブ・シークレットの更新日!": 702513375917113475,
"アップデート": 702513333483339856,
"#新スキン": 702517983813173368,
"セール情報": 703107555295100980,
"#DbDアート": 702518338336849940,
}
for key in channels.keys():
if key in tweet_context:
announce_channel = channels[key]
return announce_channel
async def check_tweet_already_send(optimal_channel, tweet_data):
url_list = re.findall(pattern, tweet_data.full_text)
full_text = tweet_data.full_text
try:
if not tweet_data.full_text.replace(url_list[-1], '') == "":
full_text = tweet_data.full_text.replace(url_list[-1], '')
except IndexError:
pass
async for message in optimal_channel.history(limit=500):
try:
if message.embeds[0].description in full_text:
print('> 送信しませんでした')
return False
except AttributeError and IndexError:
continue
print('> 送信しました')
return True
async def send_tweet_embed(optimal_channel, tweet_data):
url_list = re.findall(pattern, tweet_data.full_text)
e = discord.Embed()
e.description = tweet_data.full_text
try:
if not tweet_data.full_text.replace(url_list[-1], '') == "":
e.description = tweet_data.full_text.replace(url_list[-1], '')
except IndexError:
pass
e.colour = 0x7fffd4
e.set_author(
name=tweet_data.user.name,
url=f"https://twitter.com/{tweet_data.user.screen_name}?s=20",
icon_url=api.get_user(tweet_data.user.id).profile_image_url_https
)
try:
e.set_image(url=tweet_data.extended_entities['media'][0]['media_url'])
except:
pass
await optimal_channel.send(embed=e)
for tweet in reversed(api.user_timeline(id='DeadbyBHVR_JP', tweet_mode='extended')[0:5]):
print(tweet.full_text)
channel = self.bot.get_channel(search_optimal_channel(tweet))
if await check_tweet_already_send(channel, tweet):
await send_tweet_embed(channel, tweet)
print('---------------------')
def setup(bot):
bot.add_cog(TwitterCog(bot))
```
#### File: 948guppy/Morrigan-Rewrite/main.py
```python
import traceback
import discord
from discord.ext import commands
from config import DiscordBot
intent = discord.Intents.all()
class Morrigan(commands.Bot):
def __init__(self, **kwargs):
super().__init__(command_prefix=commands.when_mentioned_or('m/'), intents=intent, **kwargs, pm_help=None,
help_attrs=dict(hidden=True))
for cog in DiscordBot.cogs:
try:
self.load_extension(cog)
except Exception as exc:
print('Could not load extension {0} due to {1.__class__.__name__}: {1}'.format(
cog, exc))
async def on_ready(self):
print('Logged on as {0} (ID: {0.id})'.format(self.user))
async def on_command_error(self, ctx, error):
if isinstance(error, commands.CheckFailure):
return
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
error_msg = "```py\n" + error_msg + "\n```"
await ctx.send(error_msg)
bot = Morrigan()
# write general commands here
bot.run(DiscordBot.token)
``` |
{
"source": "949027/food-plan",
"score": 2
} |
#### File: food-plan/foodplanapp/models.py
```python
from django.contrib.auth.models import User
from django.db import models
ORDER_DURATION = [(3, "3 мес"), (12, "12 мес")]
MENU_TYPE = [
("classic", "Классическое меню"),
("low_calorie", "Низкокалорийное меню"),
("vegan", "Вегетарианское меню"),
("keto", "Кето меню"),
]
class Price(models.Model):
classic_menu = models.IntegerField("Классическое меню, руб/мес")
low_calorie_menu = models.IntegerField("Низкокалорийное меню, руб/мес")
vegan_menu = models.IntegerField("Вегетарианское меню, руб/мес")
keto_menu = models.IntegerField("Кето меню, руб/мес")
meal = models.IntegerField("Доплата за каждый прием пищи, руб")
new_year_menu = models.IntegerField("Доплата за новогоднее меню, руб")
allergy = models.IntegerField("Доплата за гипоаллергенное меню, руб")
class Meta:
verbose_name = "цена"
verbose_name_plural = "цены"
class Order(models.Model):
menu_type = models.CharField(
"Тип меню", choices=MENU_TYPE, max_length=100, default="classic"
)
duration = models.IntegerField(
"Срок подписки, мес", choices=ORDER_DURATION
)
breakfast = models.BooleanField("Завтраки")
lunch = models.BooleanField("Обеды")
dinner = models.BooleanField("Ужины")
dessert = models.BooleanField("Десерты")
new_year_menu = models.BooleanField("Новогоднее меню")
persons_amount = models.IntegerField("Количество персон")
allergies = models.ManyToManyField(
"Allergies",
related_name="orders",
)
promo_code = models.CharField(
"Промокод",
null=True,
blank=True,
max_length=20,
)
total_price = models.IntegerField("Стоимость подписки", null=True)
user = models.ForeignKey(
User, on_delete=models.CASCADE, verbose_name="Пользователь"
)
class Meta:
verbose_name = "Заказ"
verbose_name_plural = "Заказы"
def __str__(self):
return f"Заказ №{self.id} для {self.user}"
class Allergies(models.Model):
name = models.CharField(
"Название",
max_length=100,
)
description = models.TextField(
"Описание",
blank=True,
)
class Meta:
verbose_name = "аллергия"
verbose_name_plural = "аллергии"
def __str__(self):
return f"Аллергия на {self.name}"
class Dish(models.Model):
name = models.CharField(
"Название",
max_length=200,
)
image = models.ImageField(
"Изображение",
upload_to="images/",
null=True,
)
calories = models.FloatField(
"Калорийность, ккал",
blank=True,
null=True,
)
weight = models.IntegerField(
"Вес, г",
blank=True,
null=True,
)
guide = models.TextField(
"Инструкция",
blank=True,
)
active = models.BooleanField(
"Активный",
default=True,
)
menu_type = models.CharField(
"Тип меню",
choices=MENU_TYPE,
max_length=100,
default="classic",
db_index=True,
)
allergies = models.ManyToManyField(
"Allergies",
related_name="dishes",
blank=True,
)
class Meta:
verbose_name = "рецепт"
verbose_name_plural = "рецепты"
def __str__(self):
return self.name
class Dishitems(models.Model):
dish = models.ForeignKey(
"Dish",
related_name="dishitems",
on_delete=models.CASCADE,
verbose_name="Рецепт",
)
ingredient = models.CharField(
"Наименование",
max_length=200,
)
amount = models.CharField(
"Количество",
max_length=10,
blank=True,
)
measurement_unit = models.CharField(
"Единица измерения",
max_length=10,
blank=True,
)
class Meta:
verbose_name = "ингредиент"
verbose_name_plural = "ингредиенты"
def __str__(self):
return self.ingredient
class Promocode(models.Model):
code = models.CharField("Код", max_length=20, unique=True)
valid_from = models.DateField("Действует с")
valid_to = models.DateField("Действует до")
discount = models.IntegerField("Размер скидки, руб")
class Meta:
verbose_name = "Промокод"
verbose_name_plural = "Промокоды"
def __str__(self):
return self.code
```
#### File: food-plan/payments/models.py
```python
from django.db import models
from foodplanapp.models import Order
class OrderPayment(models.Model):
payment_id = models.SlugField(
"ID платежа в Юкасса",
max_length=100,
unique=True
)
order = models.ForeignKey(
Order,
verbose_name="Заказ к оплате",
related_name="payments",
on_delete=models.CASCADE,
)
created_at = models.DateTimeField("Дата создания")
description = models.CharField("Назначение платежа", max_length=100)
status = models.CharField("Статус платежа", max_length=30)
is_test = models.BooleanField("Тестовый платеж?")
payment_amount = models.IntegerField("Сумма платежа")
payment_currency = models.CharField("Валюта платежа", max_length=10)
is_paid = models.BooleanField("Оплачен?")
class Meta:
verbose_name = "Платеж"
verbose_name_plural = "Платежи"
def __str__(self):
return f"Оплата заказа {self.order.id} от {self.created_at}"
class OrderPaymentSummary(OrderPayment):
class Meta:
proxy = True
verbose_name = 'Отчёт по платежам'
verbose_name_plural = 'Отчеты по платежам'
``` |
{
"source": "949027/self-storage",
"score": 3
} |
#### File: management/commands/location.py
```python
from geopy import distance
from ugc.models import Warehouses
def get_distance_buttons(coordinates):
coordinates = [coordinates["latitude"], coordinates["longitude"]]
warehouses = Warehouses.objects.all()
warehouse_buttons = []
for warehouse in warehouses:
warehouse_coordinates = [warehouse.lat, warehouse.lon]
warehouse_distance = round(
distance.distance(coordinates, warehouse_coordinates).km, 1)
warehouse_buttons.append(f"{warehouse.name} {warehouse_distance} km.")
return warehouse_buttons
``` |
{
"source": "94CD94/Computer_Vision",
"score": 3
} |
#### File: 94CD94/Computer_Vision/Webcam.py
```python
import cv2
from datetime import datetime
import os
def contains(r1, r2):
return (r1[0]< r2[0] < r2[0]+r2[2] < r1[0]+r1[2] and r1[1]< r2[1] <r2[1]+r2[3] <r1[1]+r1[3])
os.chdir(r'C:\Users\LTM0110User\Desktop\vs_code\cv00-master\4 - Object Detection')
cap = cv2.VideoCapture(0)
codec= cv2.VideoWriter_fourcc(*'MJPG' )
face_cascade=cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
smile_cascade=cv2.CascadeClassifier("haarcascade_smile.xml")
out= None
rec= False
bg_mode= False
dt_mode= False
detection= False
while cap.isOpened():
_, Frame=cap.read()
if bg_mode:
Frame= cv2.cvtColor( Frame, cv2.COLOR_BGR2GRAY )
if dt_mode:
strnow= datetime.now().strftime("%d/%m/%Y %H:%M:%S")
cv2.putText(Frame,strnow, (20, Frame.shape[0]-20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0,0,0), 2 )
if rec:
out.write(Frame)
cv2.circle(Frame, (Frame.shape[1]-30,Frame.shape[0]-30), 10, (0,0,256),-1 )
if detection:
gray=cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
faces=face_cascade.detectMultiScale( gray, 1.05, 9 )
smiles=smile_cascade.detectMultiScale( gray, 1.5, 30 )
for f in faces:
cv2.rectangle(Frame,(f[0],f[1]),(f[0]+f[2],f[1]+f[3]), (0,256,0), 2 )
for s in smiles:
if contains(f,s):
cv2.rectangle(Frame,(s[0],s[1]),(s[0]+s[2],s[1]+s[3]), (256,0,0), 2 )
image= Frame[ f[1]:(f[1]+f[3]),f[0]:(f[0]+f[2]) ]
cv2.imwrite('./res/smiledetected.jpg',image )
cv2.imshow('video', Frame)
k= cv2.waitKey(1)
if k== ord("b"):
bg_mode= not bg_mode
elif k== ord("t"):
dt_mode= not dt_mode
elif k== ord("c"):
filename=datetime.now().strftime("%Y%m%d%H%M%S")+ ".jpg"
cv2.imwrite(filename, Frame)
print( 'Immagine catturata' )
elif k== ord(" "):
if out== None:
out= cv2.VideoWriter("output.avi",codec, 20., (640, 480))
rec= not rec
print("registration")
elif k==ord('r'):
detection=not detection
elif k == (ord("q")):
break
if out != None:
out.release()
cap.release()
cv2.destroyAllWindows()
``` |
{
"source": "94CD94/Similarity_Signals",
"score": 2
} |
#### File: 94CD94/Similarity_Signals/CreaDataBase.py
```python
import numpy as np
from Noises import G_R_Peaks
from Metrics import SignalStats
from numpy import random
import scipy as sc
from scipy.fftpack import fftshift
import scipy.stats as stats
# RicC2: resampled cores; RicL2: resampled logs
# mov: True shifts; Range(m): researching the match from +/- Range from starting position
def DBWhole(RicC2,RicL2,N,mov,Range ):
Ind=(N-1+1)
DATASET= []
y=[]
for j in range(0,len(RicC2),1):
u=np.argmin(abs((RicC2[j].Depth[0]-mov[j])-RicL2[j].Depth)) # Loading starting position
Dr= int(Range/np.diff(RicC2[j].Depth).min())
for i in range(-Dr,Dr,1):
Arr=np.zeros([1,Ind*2])
if u +i < 0:
i=-u
RLC=np.array(RicL2[j].Gr1[int(u+i): int(u+i+N)])
RLC=(RLC-RLC.mean())/RLC.std()
if len(RLC) < (Ind): # If the log is not long enough, zero padding
for k in range(0,Ind-len(RLC),1):
RLC=np.append(RLC,0)
Arr[0, 0:Ind]=RLC.copy() #First part of the database's row
RCC=np.array(RicC2[j].Gr1)
RCC=(RCC-RCC.mean())/RCC.std() #Second part of the database's row
Arr[0, Ind:Ind*2]=RCC.copy()
if i>=-1 and i<=1: #Relaxing prediction by including next to the match points
y.append(1)
DATASET.append(Arr)
if i != 0 and i!= -1 and i!=1 :
y.append(0)
DATASET.append(Arr)
DATASET=np.array(DATASET)
DATASET=np.squeeze(DATASET,axis=1)
y=np.array(y)
y.shape=[y.shape[0],1]
DATASET=np.append(DATASET,y,axis=1)
return(DATASET)
```
#### File: 94CD94/Similarity_Signals/DepthMatching.py
```python
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import scipy as sc
from sklearn.pipeline import Pipeline
# This program allows the operator to bring a core in the depth matching position by visualizing the core and the log and writing the number of meters for the shift
def MessaInDepth(RicCore, RicLogs):
N=RicCores[0].shape[0]
R=len(RicCores)
j=0
for CORE in RicCore:
for RicCores in CORE:
Sign='No'
while Sign !='yes':
u=np.argmin(abs(RicCores[j].Depth[0]-RicLogs[j].Depth))
plt.figure()
plt.plot(np.asarray(RicLogs[j].Gr1[u:N+u])/max(RicLogs[j].Gr1[u:N+u]),np.asarray(RicLogs[j].Depth[u:N+u]))
plt.plot(RicCores[j].Gr1/max(RicCores[j].Gr1),RicCores[j].Depth )
plt.yticks(np.arange(min(np.asarray(RicLogs[j].Depth[u:N+u])), max(np.asarray(RicLogs[j].Depth[u:N+u]))+1, 1))
plt.xlabel('Gamma Ray(API)')
plt.ylabel('Depth (m)')
plt.grid(axis='y')
plt.tight_layout()
plt.show(block=False)
j=j+1
print('Di quanto sposto?')
M=float(input())
if M == 0:
break
RicCores[j].Depth=RicCores[j].Depth-M
u=np.argmin(abs(RicCores[j].Depth[0]-RicLogs[j].Depth))
plt.figure()
plt.plot(np.asarray(RicLogs[j].Gr1[u:N+u]),np.asarray(RicLogs[j].Depth[u:N+u]))
plt.plot( RicCores[j].Gr1,RicCores[j].Depth,label='Win core' +str(j) )
plt.yticks(np.arange(min(np.asarray(RicLogs[j].Depth[u:N+u])), max(np.asarray(RicLogs[j].Depth[u:N+u]))+1, 1))
plt.xlabel('Gamma Ray(API)')
plt.ylabel('Depth (m)')
plt.grid(axis='y')
plt.tight_layout()
plt.show(block=False)
print('Are you satisfied?')
Sign=input() #Write yes if you want to break
RicCores[j].to_pickle('./Messainprof/Cores/Core'+str(j))
RicLogs[j].to_pickle('./Messainprof/Logs/Log'+str(j))
``` |
{
"source": "94JuHo/Algorithm_study",
"score": 3
} |
#### File: 2020_01_06/self_number/self_number.py
```python
def self_number(n):
return n+sum([int(i) for i in str(n)])
arr = [0] * 10000
for i in range(10000):
if(self_number(i) < 10000):
arr[self_number(i)] = 1
for i in range(10000):
if arr[i]==0:
print(i)
```
#### File: Algorithm_study/coding_exam_study/factorial_example.py
```python
def factorial_iterative(n):
result = 1
for i in range(1, n+1):
result *= i
return result
def factorial_recursive(n):
if n <= 1:
return 1
return n * factorial_recursive(n - 1)
print('iterative: ', factorial_iterative(5))
print('recursive: ', factorial_recursive(5))
```
#### File: Algorithm_study/coding_exam_study/quick_sort_impr_example.py
```python
array = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]
def quick_sort(array):
if len(array) <= 1:
return array
pivot = array[0]
tail = array[1:]
left_side = [x for x in tail if x <= pivot]
right_side = [x for x in tail if x > pivot]
print(array)
return quick_sort(left_side) + [pivot] +quick_sort(right_side)
print(quick_sort(array))
``` |
{
"source": "94JuHo/cwru_py3",
"score": 3
} |
#### File: cwru_py3/cwru_py3/__init__.py
```python
import os
import glob
import errno
import random
import urllib.request as urllib
import numpy as np
from scipy.io import loadmat
class CWRU:
def __init__(self, exp, rpm, length):
if exp not in ('12DriveEndFault', '12FanEndFault', '48DriveEndFault'):
print("wrong experiment name: {}".format(exp))
exit(1)
if rpm not in ('1797', '1772', '1750', '1730'):
print("wrong rpm value: {}".format(rpm))
exit(1)
# work directory of all data
work_dir = os.getcwd()
rdir = os.path.join(os.path.expanduser(work_dir), 'Datasets/CWRU')
fmeta = os.path.join(os.path.dirname(__file__), 'metadata.txt')
all_lines = open(fmeta).readlines()
lines = []
for line in all_lines:
l = line.split()
if (l[0] == exp or l[0] == 'NormalBaseline') and l[1] == rpm:
lines.append(l)
self.length = length # sequence length
self._load_and_slice_data(rdir, lines)
# shuffle training and test arrays
self._shuffle()
self.labels = tuple(line[2] for line in lines)
self.nclasses = len(self.labels) # number of classes
def _mkdir(self, path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
print("can't create directory '{}''".format(path))
exit(1)
def _download(self, fpath, link):
print("Downloading from '{}' to '{}'".format(link,fpath))
urllib.URLopener().retrieve(link, fpath)
def _load_and_slice_data(self, rdir, infos):
self.X_train = np.zeros((0, self.length))
self.X_test = np.zeros((0, self.length))
self.y_train = []
self.y_test = []
for idx, info in enumerate(infos):
# directory of this file
fdir = os.path.join(rdir, info[0], info[1])
self._mkdir(fdir)
fpath = os.path.join(fdir, info[2] + '.mat')
if not os.path.exists(fpath):
self._download(fpath, info[3].rstrip('\n'))
mat_dict = loadmat(fpath)
key = list(filter(lambda x: 'DE_time' in x, mat_dict.keys()))[0]
time_series = mat_dict[key][:, 0]
idx_last = -(time_series.shape[0] % self.length)
clips = time_series[:idx_last].reshape(-1, self.length)
n = clips.shape[0]
n_split = int(3 * n / 4)
self.X_train = np.vstack((self.X_train, clips[:n_split]))
self.X_test = np.vstack((self.X_test, clips[n_split:]))
self.y_train += [idx] * n_split
self.y_test += [idx] * (clips.shape[0] - n_split)
def _shuffle(self):
# shuffle training samples
index = list(range(self.X_train.shape[0]))
random.Random(0).shuffle(index)
self.X_train = self.X_train[index]
self.y_train = tuple(self.y_train[i] for i in index)
# shuffle test samples
index = list(range(self.X_test.shape[0]))
random.Random(0).shuffle(index)
self.X_test = self.X_test[index]
self.y_test = tuple(self.y_test[i] for i in index)
``` |
{
"source": "94JuHo/fastaiv2_study",
"score": 3
} |
#### File: 94JuHo/fastaiv2_study/04_mnist_basics.py
```python
from fastai.vision.all import *
from fastbook import *
from matplotlib import pyplot as plt
matplotlib.rc('image', cmap='Greys')
path = untar_data(URLs.MNIST_SAMPLE)
Path.BASE_PATH = path
print(path.ls())
print((path/'train').ls())
threes = (path/'train'/'3').ls().sorted()
sevens = (path/'train'/'7').ls().sorted()
print(threes)
im3_path = threes[1]
im3 = Image.open(im3_path)
#im3.show()
print(array(im3)[4:10, 4:10])
print(tensor(im3)[4:10, 4:10])
im3_t = tensor(im3)
df = pd.DataFrame(im3_t[4:15, 4:22])
df.style.set_properties(**{'font-size':'6pt'}).background_gradient('Greys')
pd.set_option('display.width', None)
print(df)
seven_tensors = [tensor(Image.open(o)) for o in sevens]
three_tensors = [tensor(Image.open(o)) for o in threes]
print(len(three_tensors), len(seven_tensors))
show_image(three_tensors[1])
#plt.show()
stacked_sevens = torch.stack(seven_tensors).float()/255
stacked_threes = torch.stack(three_tensors).float()/255
print(stacked_threes.shape)
print(len(stacked_threes.shape))
print(stacked_threes.ndim)
mean3 = stacked_threes.mean(0)
show_image(mean3)
#plt.show()
mean7 = stacked_sevens.mean(0)
show_image(mean7)
#plt.show()
a_3 = stacked_threes[1]
show_image(a_3)
#plt.show()
dist_3_abs = (a_3 - mean3).abs().mean()
dist_3_sqr = ((a_3 - mean3)**2).mean().sqrt()
print(dist_3_abs, dist_3_sqr)
print(F.l1_loss(a_3.float(), mean7), F.mse_loss(a_3, mean7).sqrt())
data = [[1, 2, 3], [4, 5, 6]]
arr = array(data)
tns = tensor(data)
print(arr)
print(tns)
print(tns[1])
print(tns[:, 1])
print(tns[1, 1:3])
print(tns+1)
print(tns.type())
print(tns*1.5)
valid_3_tens = torch.stack([tensor(Image.open(o)) for o in (path/'valid'/'3').ls()])
valid_3_tens = valid_3_tens.float() / 255
valid_7_tens = torch.stack([tensor(Image.open(o)) for o in (path/'valid'/'7').ls()])
valid_7_tens = valid_7_tens.float() / 255
print('valid_3_tens.shape=',valid_3_tens.shape,' valid_7_tens.shape=', valid_7_tens.shape)
def mnist_distance(a, b):
return (a-b).abs().mean((-1, -2))
print(mnist_distance(a_3, mean3))
valid_3_dist = mnist_distance(valid_3_tens, mean3)
print(valid_3_dist, valid_3_dist.shape)
print(tensor([1, 2, 3]) + tensor([1, 1, 1]))
print((valid_3_tens - mean3).shape)
def is_3(x):
return mnist_distance(x, mean3) < mnist_distance(x, mean7)
print(is_3(a_3), is_3(a_3).float())
print(is_3(valid_3_tens))
accuracy_3s = is_3(valid_3_tens).float().mean()
accuracy_7s = (1 - is_3(valid_7_tens).float()).mean()
print(accuracy_3s, accuracy_7s, (accuracy_3s + accuracy_7s)/2)
```
#### File: 94JuHo/fastaiv2_study/04_mnist_loss.py
```python
from fastai.vision.all import *
from fastbook import *
from matplotlib import pyplot as plt
path = untar_data(URLs.MNIST_SAMPLE)
Path.BASE_PATH = path
threes = (path/'train'/'3').ls().sorted()
sevens = (path/'train'/'7').ls().sorted()
seven_tensors = [tensor(Image.open(o)) for o in sevens]
three_tensors = [tensor(Image.open(o)) for o in threes]
stacked_sevens = torch.stack(seven_tensors).float()/255
stacked_threes = torch.stack(three_tensors).float()/255
valid_3_tens = torch.stack([tensor(Image.open(o)) for o in (path/'valid'/'3').ls()])
valid_3_tens = valid_3_tens.float() / 255
valid_7_tens = torch.stack([tensor(Image.open(o)) for o in (path/'valid'/'7').ls()])
valid_7_tens = valid_7_tens.float() / 255
train_x = torch.cat([stacked_threes, stacked_sevens]).view(-1, 28*28)
train_y = tensor([1] * len(threes) + [0] * len(sevens)).unsqueeze(1)
print(train_x.shape, train_y.shape)
dset = list(zip(train_x, train_y))
x, y = dset[0]
print(x.shape, y)
valid_x = torch.cat([valid_3_tens, valid_7_tens]).view(-1, 28*28)
valid_y = tensor([1] * len(valid_3_tens) + [0] * len(valid_7_tens)).unsqueeze(1)
valid_dset = list(zip(valid_x, valid_y))
def init_params(size, std=1.0):
return (torch.randn(size)*std).requires_grad_()
weights = init_params((28 * 28, 1))
bias = init_params(1)
print((train_x[0] * weights.T).sum() + bias)
def linear1(xb):
return xb@weights + bias
preds = linear1(train_x)
print(preds)
corrects = (preds > 0.0).float() == train_y
print(corrects)
print(corrects.float().mean().item())
weights[0] *= 1.0001
preds = linear1(train_x)
print( ((preds > 0.0).float() == train_y).float().mean().item())
trgts = tensor([1, 0, 1])
prds = tensor([0.9, 0.4, 0.2])
def mnist_loss(predictions, targets):
return torch.where(targets==1, 1-predictions, predictions).mean()
print(torch.where(trgts==1, 1-prds, prds))
print(mnist_loss(prds, trgts))
print(mnist_loss(tensor([0.9, 0.4, 0.8]), trgts))
def sigmoid(x):
return 1/(1+torch.exp(-x))
plot_function(torch.sigmoid, title='Sigmoid', min=-4, max=4)
#plt.show()
def mnist_loss(predictions, targets):
predictions = predictions.sigmoid()
return torch.where(targets==1, 1-predictions, predictions).mean()
coll = range(15)
dl = DataLoader(coll, batch_size=5, shuffle=True)
print(list(dl))
ds = L(enumerate(string.ascii_lowercase))
print(ds)
dl = DataLoader(ds, batch_size=6, shuffle=True)
print(list(dl))
```
#### File: 94JuHo/fastaiv2_study/04_puttingitalltogether.py
```python
from fastai.vision.all import *
from fastbook import *
from matplotlib import pyplot as plt
def init_params(size, std=1.0):
return (torch.randn(size)*std).requires_grad_()
def linear1(xb):
return xb@weights + bias
def mnist_loss(predictions, targets):
return torch.where(targets==1, 1-predictions, predictions).mean()
path = untar_data(URLs.MNIST_SAMPLE)
Path.BASE_PATH = path
threes = (path/'train'/'3').ls().sorted()
sevens = (path/'train'/'7').ls().sorted()
seven_tensors = [tensor(Image.open(o)) for o in sevens]
three_tensors = [tensor(Image.open(o)) for o in threes]
stacked_sevens = torch.stack(seven_tensors).float()/255
stacked_threes = torch.stack(three_tensors).float()/255
weights = init_params((28*28, 1))
bias = init_params(1)
train_x = torch.cat([stacked_threes, stacked_sevens]).view(-1, 28*28)
train_y = tensor([1] * len(threes) + [0] * len(sevens)).unsqueeze(1)
valid_3_tens = torch.stack([tensor(Image.open(o)) for o in (path/'valid'/'3').ls()])
valid_3_tens = valid_3_tens.float() / 255
valid_7_tens = torch.stack([tensor(Image.open(o)) for o in (path/'valid'/'7').ls()])
valid_7_tens = valid_7_tens.float() / 255
valid_x = torch.cat([valid_3_tens, valid_7_tens]).view(-1, 28*28)
valid_y = tensor([1] * len(valid_3_tens) + [0] * len(valid_7_tens)).unsqueeze(1)
valid_dset = list(zip(valid_x, valid_y))
dset = list(zip(train_x, train_y))
dl = DataLoader(dset, batch_size=256)
xb, yb = first(dl)
print(xb.shape, yb.shape)
valid_dl = DataLoader(valid_dset, batch_size=256)
batch = train_x[:4]
print(batch.shape)
preds = linear1(batch)
print(preds)
loss = mnist_loss(preds, train_y[:4])
print(loss)
loss.backward()
print(weights.grad.shape, weights.grad.mean(), bias.grad)
def calc_grad(xb, yb, model):
preds = model(xb)
loss = mnist_loss(preds, yb)
loss.backward()
calc_grad(batch, train_y[:4], linear1)
print(weights.grad.mean(), bias.grad)
calc_grad(batch, train_y[:4], linear1)
print(weights.grad.mean(), bias.grad)
weights.grad.zero_()
bias.grad.zero_()
def train_epoch(model, lr, params):
for xb, yb in dl:
calc_grad(xb, yb, model)
for p in params:
p.data -= p.grad * lr
p.grad.zero_()
print((preds>0.0).float() == train_y[:4])
def batch_accuracy(xb, yb):
preds = xb.sigmoid()
correct = (preds > 0.5) == yb
return correct.float().mean()
print(batch_accuracy(linear1(batch), train_y[:4]))
def validate_epoch(model):
accs = [batch_accuracy(model(xb), yb) for xb, yb in valid_dl]
return round(torch.stack(accs).mean().item(), 4)
print(validate_epoch(linear1))
lr = 1.
params = weights, bias
train_epoch(linear1, lr, params)
print(validate_epoch(linear1))
for _ in range(20):
train_epoch(linear1, lr, params)
print(validate_epoch(linear1), end=' ')
``` |
{
"source": "94JuHo/OpenCV_Lecture",
"score": 3
} |
#### File: 94JuHo/OpenCV_Lecture/2dHist.py
```python
import cv2
import numpy as np
import matplotlib.pyplot as plt
def showHistogram():
filename = "Images/lena.jpg"
img = cv2.imread(filename, cv2.IMREAD_COLOR)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
cv2.imshow('2DHist', hist)
plt.imshow(hist, interpolation='nearest')
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
showHistogram()
```
#### File: 94JuHo/OpenCV_Lecture/histogram2.py
```python
import cv2
import numpy as np
import matplotlib.pyplot as plt
def showHistogram():
filename = "images/lena.jpg"
gray_img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
hist = cv2.calcHist([gray_img], [0], None, [256], [0, 256])
plt.plot(hist, color='black')
color_img = cv2.imread(filename, cv2.IMREAD_COLOR)
for i, c in enumerate(('blue', 'green', 'red')):
hist = cv2.calcHist([color_img], [i], None, [256], [0, 256])
plt.plot(hist, color=c)
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
showHistogram()
```
#### File: 94JuHo/OpenCV_Lecture/histogram_eqaulizeHist.py
```python
import cv2
import numpy as np
def showImage():
filename="Images/lena.jpg"
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
cv2.imshow('image', img)
equ_img = cv2.equalizeHist(img)
cv2.imshow('equalized image', equ_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
showImage()
```
#### File: 94JuHo/OpenCV_Lecture/histogram_streching.py
```python
import cv2
import numpy as np
def main():
filename="images/coin.bmp"
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
cv2.imshow('image', img)
Hist = np.zeros((256))
ysize= img.shape[0]
xsize = img.shape[1]
for y in range(ysize):
for x in range(xsize):
Hist[img.item(y, x)] = Hist[img.item(y, x)]+ 1
low = 0
high = 255
for i in range(256):
if Hist[i] != 0:
low = i
break
for i in range(255, -1, -1):
if Hist[i] != 0:
high = i
break
for y in range(ysize):
for x in range(xsize):
value = round((img.item(y, x) - low) / (high-low) * 255)
img.itemset((y, x) , value)
cv2.imshow('result', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
main()
```
#### File: 94JuHo/OpenCV_Lecture/lut.py
```python
import cv2
import numpy as np
def showImage():
filename = "Images/lena.jpg"
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
cv2.imshow('image', img)
lut = np.arange(255, -1, -1, dtype='uint8')
ysize = img.shape[0]
xsize = img.shape[1]
for y in range(ysize):
for x in range(xsize):
img.itemset((y, x), lut[img.item(y, x)])
#Opencv 방식
#result = cv2.LUT(img, lut)
cv2.imshow('result', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
showImage()
```
#### File: 94JuHo/OpenCV_Lecture/median_pass_filter.py
```python
import cv2
import numpy as np
from tkinter.filedialog import askopenfilename
def main():
filename = askopenfilename()
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
cv2.imshow('image', img)
array = np.zeros(9)
ysize = img.shape[0]
xsize = img.shape[1]
for y in range(1, ysize-1):
for x in range(1, xsize-1):
for mr in range(3):
for mc in range(3):
array[mr*3+mc] = img.item(y+mr-1, x+mc-1)
array.sort()
img.itemset((y,x), array[4])
cv2.imshow('result',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
main()
``` |
{
"source": "94JuHo/study_for_deeplearning",
"score": 3
} |
#### File: 94JuHo/study_for_deeplearning/01.LeastSquare.py
```python
import numpy as np
def top(x, mx, y, my):
d = 0
for i in range(len(x)):
d += (x[i] - mx) * (y[i] - my)
return d
x = [2, 4, 6, 8]
y = [81, 93, 91, 97]
mx = np.mean(x)
my = np.mean(y)
divisor = sum([(mx - i) ** 2 for i in x])
dividend = top(x, mx, y, my)
a = dividend / divisor
b = my - (mx * a)
print(a, b)
```
#### File: study_for_deeplearning/deep_class/02_RMSE.py
```python
import numpy as np
#기울기 a와 y 절편 b
ab=[3,76]
# x,y의 데이터 값
data = [[2, 81], [4, 93], [6, 91], [8, 97]]
x = [i[0] for i in data]
y = [i[1] for i in data]
# y=ax + b에 a,b 값 대입하여 결과를 출력하는 함수
def predict(x):
return ab[0]*x + ab[1]
# RMSE 함수
def rmse(p, a):
return np.sqrt(((p - a) ** 2).mean())
# RMSE 함수를 각 y값에 대입하여 최종 값을 구하는 함수
def rmse_val(predict_result,y):
return rmse(np.array(predict_result), np.array(y))
# 예측값이 들어갈 빈 리스트
predict_result = []
# 모든 x값을 한 번씩 대입하여 predict_result 리스트완성.
for i in range(len(x)):
predict_result.append(predict(x[i]))
print("공부시간=%.f, 실제점수=%.f, 예측점수=%.f" % (x[i], y[i], predict(x[i])))
# 최종 RMSE 출력
print("rmse 최종값: " + str(rmse_val(predict_result,y)))
``` |
{
"source": "94mia/DeepLEGO",
"score": 2
} |
#### File: 94mia/DeepLEGO/config.py
```python
from utils.functions import print_config, create_train_dir
from backbone.resnet import *
from heads.deeplabv3_plus import *
from heads.aspp_plus import ASPP_plus
""" Dataset parameters """
class Params():
def __init__(self):
# network structure parameters
self.model = 'ResNet101' # define your model
self.dataset = 'cityscapes'
self.output_stride = 8
self.down_sample_rate = 32 # classic down sample rate, DO NOT CHANGE!
self.se_mode = None # Squeeze and Excitation mode, 0->cSE, 1-> sSE, 2->scSE
self.HDC = True # Hybrid Dilated Convolution, type bool
self.multi_grid = None # multi_grid in DeepLabv3, type bool
# dataset parameters
self.rescale_size = 600 # rescale image when training
self.image_size = 512 # the final image size after crop
self.num_class = 20 # 20 classes for training
self.dataset_root = '/path/to/your/dataset'
self.dataloader_workers = 12
self.shuffle = True # if shuffle data when training
self.train_batch = 4
self.val_batch = 2
self.test_batch = 1
# train parameters
self.num_epoch = 150
self.base_lr = 0.0025
self.power = 0.9 # lr decay power
self.head_lr_mult = 1
self.backbone_lr_mult = 1
self.momentum = 0.9
self.weight_decay = 0.0005
self.should_val = True
self.val_every = 1 # how often will we evaluate model on val set
self.display = 1 # how often will we show train result
# model restore parameters
self.resume_from = None # None for train from scratch
self.pre_trained_from = None # None for train from scratch
self.should_save = False
self.save_every = 10 # how often will we save checkpoint
# create training dir
self.summary = False
if self.summary:
self.summary_dir, self.ckpt_dir = create_train_dir(self)
""" Class name transform """
name2net = {'resnet18': ResNet18,
'resnet34': ResNet34,
'resnet50': ResNet50,
'resnet101': ResNet101,
'resnet152': ResNet152,
'aspp_plus': ASPP_plus,
'deeplabv3_plus_decoder': DeepLabv3_plus_decoder
}
if __name__ == '__main__':
aa = Params()
print_config(aa)
```
#### File: DeepLEGO/heads/pspnet.py
```python
import torch.nn.functional as F
import torch.nn as nn
import torch
def conv_bn_relu(in_channels, out_channels, kernel_size=1):
""" 1x1 Convolution with batch norm and relu """
pad = (kernel_size-1) // 2
return nn.Sequential(nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, padding=pad, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU()).cuda()
class PSP(nn.Module):
def __init__(self, params):
super(PSP, self).__init__()
self.pool1 = nn.AdaptiveAvgPool2d((1, 1))
self.pool2 = nn.AdaptiveAvgPool2d((2, 2))
self.pool3 = nn.AdaptiveAvgPool2d((3, 3))
self.pool4 = nn.AdaptiveAvgPool2d((6, 6))
self.conv1 = conv_bn_relu(params.output_channels, 512)
self.conv2 = conv_bn_relu(params.output_channels, 512)
self.conv3 = conv_bn_relu(params.output_channels, 512)
self.conv4 = conv_bn_relu(params.output_channels, 512)
self.conv5 = conv_bn_relu(512*4+params.output_channels, 512, 3)
self.class_conv = nn.Conv2d(512, params.num_class, 1)
self.output_stride = params.output_stride
def forward(self, logits):
x = logits[-1]
input_size = x.shape[2:]
x1 = self.pool1(x)
x2 = self.pool2(x)
x3 = self.pool3(x)
x4 = self.pool4(x)
x1 = self.conv1(x1)
x2 = self.conv2(x2)
x3 = self.conv3(x3)
x4 = self.conv4(x4)
x1 = F.upsample(x1, size=input_size, mode='bilinear', align_corners=False)
x2 = F.upsample(x2, size=input_size, mode='bilinear', align_corners=False)
x3 = F.upsample(x3, size=input_size, mode='bilinear', align_corners=False)
x4 = F.upsample(x4, size=input_size, mode='bilinear', align_corners=False)
x = torch.cat((x, x1, x2, x3, x4), dim=1)
x = self.conv5(x)
x = self.class_conv(x)
x = F.upsample(x, scale_factor=self.output_stride)
return x
```
#### File: DeepLEGO/layers/resnet.py
```python
import torch.nn as nn
def conv3x3(in_channels, out_channels, stride=1, dilation=1):
""" 3x3 Convolution with padding and stride """
return nn.Conv2d(in_channels, out_channels,
kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
def conv1x1(in_channels, out_channels, stride=1, dilation=1):
""" 1x1 Convolution with padding and stride """
return nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, padding=0, dilation=dilation, bias=False)
def conv3x3_bn_relu(in_channels, out_channels, stride=1, dilation=1):
""" 3x3 Convolution with batch norm and relu """
return nn.Sequential(nn.Conv2d(in_channels, out_channels,
kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU())
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, base_channels, stride=1, dilation=1, se_mode=None):
super(BasicBlock, self).__init__()
self.stride = stride
self.se_mode = se_mode
self.conv1 = conv3x3(in_channels, base_channels, stride=stride, dilation=dilation)
self.bn1 = nn.BatchNorm2d(base_channels)
self.conv2 = conv3x3(base_channels, base_channels, dilation=dilation)
self.bn2 = nn.BatchNorm2d(base_channels)
self.relu = nn.ReLU()
if in_channels != base_channels*self.expansion or stride != 1:
self.shortcut = conv1x1(in_channels, base_channels*self.expansion, stride=stride)
else:
self.shortcut = None
if se_mode is not None:
if se_mode % 2 == 0: # 0 or 2
self.se1 = nn.Sequential(nn.AdaptiveAvgPool2d(1),
nn.Conv2d(base_channels, base_channels//16, 1, bias=False),
nn.ReLU(),
nn.Conv2d(base_channels//16, base_channels, 1, bias=False),
nn.Sigmoid())
else:
self.se1 = None
if se_mode > 0: # 0 or 1
self.se2 = nn.Sequential(nn.Conv2d(base_channels, 1, 1, bias=False),
nn.Sigmoid())
else:
self.se2 = None
def forward(self, x):
orig = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
if self.se_mode == 0:
x = x*self.se1(x)
elif self.se_mode == 1:
x = x*self.se2(x)
elif self.se_mode == 2:
x = x*self.se1(x) + x*self.se2(x)
if self.shortcut is not None:
orig = self.shortcut(orig)
assert orig.shape == x.shape
x += orig
x = self.relu(x)
return x
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, base_channels, stride=1, dilation=1, se_mode=None):
super(Bottleneck, self).__init__()
self.stride = stride
self.se_mode = se_mode
self.conv1 = conv1x1(in_channels, base_channels)
self.bn1 = nn.BatchNorm2d(base_channels)
self.conv2 = conv3x3(base_channels, base_channels, stride=stride, dilation=dilation)
self.bn2 = nn.BatchNorm2d(base_channels)
self.conv3 = conv1x1(base_channels, base_channels*self.expansion)
self.bn3 = nn.BatchNorm2d(base_channels*self.expansion)
self.relu = nn.ReLU()
if in_channels != base_channels*self.expansion:
self.shortcut = conv1x1(in_channels, base_channels*self.expansion, stride=stride)
else:
self.shortcut = None
if se_mode is not None:
if se_mode % 2 == 0: # 0 or 2
self.se1 = nn.Sequential(nn.AdaptiveAvgPool2d(1),
nn.Conv2d(base_channels*self.expansion, base_channels*self.expansion//16, 1,
bias=False),
nn.ReLU(),
nn.Conv2d(base_channels*self.expansion//16, base_channels*self.expansion, 1,
bias=False),
nn.Sigmoid())
else:
self.se1 = None
if se_mode > 0: # 0 or 1
self.se2 = nn.Sequential(nn.Conv2d(base_channels*self.expansion, 1, 1, bias=False),
nn.Sigmoid())
else:
self.se2 = None
def forward(self, x):
orig = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
if self.se_mode == 0:
x = x*self.se1(x)
elif self.se_mode == 1:
x = x*self.se2(x)
elif self.se_mode == 2:
x = x*self.se1(x) + x*self.se2(x)
if self.shortcut is not None:
orig = self.shortcut(orig)
x += orig
x = self.relu(x)
return x
```
#### File: 94mia/DeepLEGO/main.py
```python
from config import *
from network import MyNetwork, LOG, WARNING
def parse_network(net_str):
net_str = net_str.split('+')
net_backbone = net_str[:-1]
net_head = net_str[-1]
module = []
for b in net_backbone:
part = name2net[b](params, in_encoder=True)
if hasattr(part, 'output_channels'):
params.output_channels = part.output_channels
module.append(part)
module.append(name2net[net_head](params))
return module
network = 'resnet101+aspp_plus+deeplabv3_plus_decoder'
params = Params()
params.model = network
params.dataset_root = '/media/ubuntu/disk/cityscapes'
params.has_max_pool = False
params.output_stride = 16
net = MyNetwork(params, module=parse_network(network))
net.Train()
```
#### File: 94mia/DeepLEGO/network.py
```python
import torch.nn as nn
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
from utils.functions import *
from utils.progressbar import bar
from datasets.cityscapes import logits2trainId, trainId2color, trainId2LabelId, create_datasets
from heads.aspp import ASPP
from backbone.resnet import ResNet18
WARNING = lambda x: print('\033[1;31;2mWARNING: ' + x + '\033[0m')
LOG = lambda x: print('\033[0;31;2m' + x + '\033[0m')
# create model
class MyNetwork(nn.Module):
"""
A Convolutional Neural Network used for Semantic Segmentation
"""
"""######################"""
"""# Model Construction #"""
"""######################"""
def __init__(self, params, module=None):
super(MyNetwork, self).__init__()
# initializing network parameters
self.params = params
LOG('Network Configurations:\n')
print_config(params)
# creating network datasets
LOG('Creating Datasets and Transformations......')
self.datasets = create_datasets(params)
LOG('Creation Succeed.\n')
self.pb = bar() # hand-made progressbar
self.epoch = 0
self.init_epoch = 0
self.ckpt_flag = False
self.train_loss = []
self.val_loss = []
if self.params.summary:
self.summary_writer = SummaryWriter(log_dir=self.params.summary_dir)
# build network structure
self.backbone = module[:-1]
self.head = module[-1]
LOG('Building and Initializing Model......')
self.build_network()
LOG('Model Built.\n')
# set default loss
self.loss_fn = nn.CrossEntropyLoss(ignore_index=255)
# set default optimizer
self.opt = torch.optim.RMSprop([{'params': self.head.parameters(), 'lr_mult': self.params.head_lr_mult},
{'params': self.backbone_params, 'lr_mult': self.params.backbone_lr_mult}],
lr=self.params.base_lr,
momentum=self.params.momentum,
weight_decay=self.params.weight_decay)
# initialize
self.build_dataloader()
# load data
self.load_checkpoint()
self.load_model()
def forward(self, x):
logits = x
for net in self.backbone:
logits = net(logits)
seg = self.head(logits)
return seg
"""######################"""
"""# Train and Validate #"""
"""######################"""
def train_one_epoch(self):
"""
Train network in one epoch
"""
print('Training......')
# set mode train
for bb in self.backbone:
bb.train()
self.head.train()
# prepare data
train_loss = 0
train_size = len(self.datasets['train'])
if train_size % self.params.train_batch != 0:
total_batch = train_size // self.params.train_batch + 1
else:
total_batch = train_size // self.params.train_batch
# train through dataset
for batch_idx, batch in enumerate(self.train_loader):
self.pb.click(batch_idx, total_batch)
image, label = batch['image'], batch['label']
image_cuda, label_cuda = image.cuda(), label.cuda()
# checkpoint split
# if self.params.should_split:
# image_cuda.requires_grad_()
# out = checkpoint_sequential(self.network, self.params.split, image_cuda)
# else:
out = self(image_cuda)
loss = self.loss_fn(out, label_cuda)
# optimize
self.opt.zero_grad()
loss.backward()
self.opt.step()
# accumulate
train_loss += loss.item()
# record first loss
if self.train_loss == []:
self.train_loss.append(train_loss)
if self.params.summary:
self.summary_writer.add_scalar('loss/train_loss', train_loss, 0)
self.pb.close()
train_loss /= total_batch
self.train_loss.append(train_loss)
# add to summary
if self.params.summary:
self.summary_writer.add_scalar('loss/train_loss', train_loss, self.epoch)
def val_one_epoch(self):
"""
Validate network in one epoch every m training epochs,
m is defined in params.val_every
"""
print('Validating......')
# set mode eval
for bb in self.backbone:
bb.eval()
self.head.eval()
# prepare data
val_loss = 0
val_size = len(self.datasets['val'])
if val_size % self.params.val_batch != 0:
total_batch = val_size // self.params.val_batch + 1
else:
total_batch = val_size // self.params.val_batch
# validate through dataset
for batch_idx, batch in enumerate(self.val_loader):
self.pb.click(batch_idx, total_batch)
image, label = batch['image'], batch['label']
image_cuda, label_cuda = image.cuda(), label.cuda()
# checkpoint split
# if self.params.should_split:
# image_cuda.requires_grad_()
# out = checkpoint_sequential(self.network, self.params.split, image_cuda)
# else:
out = self(image_cuda)
loss = self.loss_fn(out, label_cuda)
val_loss += loss.item()
# record first loss
if self.val_loss == []:
self.val_loss.append(val_loss)
if self.params.summary:
self.summary_writer.add_scalar('loss/val_loss', val_loss, 0)
self.pb.close()
val_loss /= total_batch
self.val_loss.append(val_loss)
# add to summary
if self.params.summary:
self.summary_writer.add_scalar('loss/val_loss', val_loss, self.epoch)
def Train(self):
"""
Train network in n epochs, n is defined in params.num_epoch
"""
self.init_epoch = self.epoch
if self.epoch >= self.params.num_epoch:
WARNING('Num_epoch should be smaller than current epoch. Skip training......\n')
else:
for _ in range(self.epoch, self.params.num_epoch):
self.epoch += 1
print('-' * 20 + 'Epoch.' + str(self.epoch) + '-' * 20)
# train one epoch
self.train_one_epoch()
# should display
if self.epoch % self.params.display == 0:
print('\tTrain loss: %.4f' % self.train_loss[-1])
# should save
if self.params.should_save:
if self.epoch % self.params.save_every == 0:
self.save_checkpoint()
# test every params.test_every epoch
if self.params.should_val:
if self.epoch % self.params.val_every == 0:
self.val_one_epoch()
print('\tVal loss: %.4f' % self.val_loss[-1])
# adjust learning rate
self.adjust_lr()
# save the last network state
if self.params.should_save:
self.save_checkpoint()
# train visualization
self.plot_curve()
def Test(self):
"""
Test network on test set
"""
print('Testing......')
# set mode eval
torch.cuda.empty_cache()
self.eval()
# prepare test data
test_size = len(self.datasets['test'])
if test_size % self.params.test_batch != 0:
total_batch = test_size // self.params.test_batch + 1
else:
total_batch = test_size // self.params.test_batch
# test for one epoch
for batch_idx, batch in enumerate(self.test_loader):
self.pb.click(batch_idx, total_batch)
image, label, name = batch['image'], batch['label'], batch['label_name']
image_cuda, label_cuda = image.cuda(), label.cuda()
out = self(image_cuda)
for i in range(self.params.test_batch):
idx = batch_idx*self.params.test_batch+i
id_map = logits2trainId(out[i, ...])
color_map = trainId2color(self.params.dataset_root, id_map, name=name[i])
trainId2LabelId(self.params.dataset_root, id_map, name=name[i])
image_orig = image[i].numpy().transpose(1, 2, 0)
image_orig = image_orig*255
image_orig = image_orig.astype(np.uint8)
if self.params.summary:
self.summary_writer.add_image('test/img_%d/orig' % idx, image_orig, idx)
self.summary_writer.add_image('test/img_%d/seg' % idx, color_map, idx)
"""##########################"""
"""# Model Save and Restore #"""
"""##########################"""
def save_checkpoint(self):
save_dict = {'epoch' : self.epoch,
'train_loss' : self.train_loss,
'val_loss' : self.val_loss,
'state_dict' : self.state_dict(),
'optimizer' : self.opt.state_dict()}
torch.save(save_dict, self.params.ckpt_dir+'Checkpoint_epoch_%d.pth.tar' % self.epoch)
LOG('Checkpoint saved')
def load_checkpoint(self):
"""
Load checkpoint from given path
"""
if self.params.resume_from is not None and os.path.exists(self.params.resume_from):
try:
LOG('Loading Checkpoint at %s' % self.params.resume_from)
ckpt = torch.load(self.params.resume_from)
self.epoch = ckpt['epoch']
try:
self.train_loss = ckpt['train_loss']
self.val_loss = ckpt['val_loss']
except:
self.train_loss = []
self.val_loss = []
self.load_state_dict(ckpt['state_dict'])
self.opt.load_state_dict(ckpt['optimizer'])
LOG('Checkpoint Loaded!')
LOG('Current Epoch: %d' % self.epoch)
self.ckpt_flag = True
except:
WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)
else:
WARNING('Checkpoint do not exists. Start loading pre-trained model......')
def load_model(self):
"""
Load ImageNet pre-trained model into MobileNetv2 backbone, only happen when
no checkpoint is loaded
"""
if self.ckpt_flag:
LOG('Skip Loading Pre-trained Model......')
else:
if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):
try:
LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)
pretrain = torch.load(self.params.pre_trained_from)
self.load_state_dict(pretrain)
LOG('Pre-trained Model Loaded!\n')
except:
WARNING('Cannot load pre-trained model. Start training......\n')
else:
WARNING('Pre-trained model do not exits. Start training......\n')
"""#############"""
"""# Utilities #"""
"""#############"""
def initialize(self):
"""
Initializes the model parameters
"""
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.backbone_params = []
for m in self.backbone:
self.backbone_params.extend(list(m.parameters()))
def adjust_lr(self):
"""
Adjust learning rate at each epoch
"""
learning_rate = self.params.base_lr * (1 - float(self.epoch) / self.params.num_epoch) ** self.params.power
for param_group in self.opt.param_groups:
param_group['lr'] = learning_rate
print('Change learning rate into %f' % (learning_rate))
if self.params.summary:
self.summary_writer.add_scalar('learning_rate', learning_rate, self.epoch)
def plot_curve(self):
"""
Plot train/val loss curve
"""
x1 = np.arange(self.init_epoch, self.params.num_epoch+1, dtype=np.int).tolist()
x2 = np.linspace(self.init_epoch, self.epoch,
num=(self.epoch-self.init_epoch)//self.params.val_every+1, dtype=np.int64)
plt.plot(x1, self.train_loss, label='train_loss')
plt.plot(x2, self.val_loss, label='val_loss')
plt.legend(loc='best')
plt.title('Train/Val loss')
plt.grid()
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
def build_network(self, backbone=None, head=None):
"""
Build up network depend on backbone and head, default model is ResNet18+ASPP
"""
if backbone is None:
if self.backbone is None:
self.backbone = [ResNet18(self.params)]
else:
self.backbone = backbone
if head is None:
if self.head is None:
self.head = ASPP(self.params)
else:
self.head = head
self.cuda()
self.initialize()
def build_dataloader(self):
self.train_loader = DataLoader(self.datasets['train'],
batch_size=self.params.train_batch,
shuffle=self.params.shuffle,
num_workers=self.params.dataloader_workers)
self.test_loader = DataLoader(self.datasets['test'],
batch_size=self.params.test_batch,
shuffle=False, num_workers=self.params.dataloader_workers)
self.val_loader = DataLoader(self.datasets['val'],
batch_size=self.params.val_batch,
shuffle=self.params.shuffle,
num_workers=self.params.dataloader_workers)
""" TEST """
if __name__ == '__main__':
import utils.functions as fn
from config import Params
params = Params()
params.dataset_root = '/media/ubuntu/disk/cityscapes'
LOG('Creating Dataset and Transformation......')
datasets = fn.create_datasets(params)
LOG('Creation Succeed.\n')
net = MyNetwork(params, datasets)
net.Train()
``` |
{
"source": "94pxls/ECMAScript",
"score": 2
} |
#### File: ECMAScript/quickjs/builtin_binding_generator.py
```python
import json, os
DIR = os.path.abspath( os.path.dirname(__file__) )
OUTPUT_FILE = os.path.join(DIR, "quickjs_builtin_binder.gen.cpp")
API = json.load(open(os.path.join(DIR, '..', 'buitin_api.gen.json'), 'r'))
VariantTypes = {
"boolean": "Variant::BOOL",
"number": "Variant::REAL",
"string": "Variant::STRING",
"Vector2": "Variant::VECTOR2",
"Vector3": "Variant::VECTOR3",
"Basis": "Variant::BASIS",
"Quat": "Variant::QUAT",
"Color": "Variant::COLOR",
"Rect2": "Variant::RECT2",
"RID": "Variant::_RID",
"Transform2D": "Variant::TRANSFORM2D",
"Plane": "Variant::PLANE",
"AABB": "Variant::AABB",
"Transform": "Variant::TRANSFORM",
"PoolByteArray": "Variant::POOL_BYTE_ARRAY",
"PoolIntArray": "Variant::POOL_INT_ARRAY",
"PoolRealArray": "Variant::POOL_REAL_ARRAY",
"PoolStringArray": "Variant::POOL_STRING_ARRAY",
"PoolVector2Array": "Variant::POOL_VECTOR2_ARRAY",
"PoolVector3Array": "Variant::POOL_VECTOR3_ARRAY",
"PoolColorArray": "Variant::POOL_COLOR_ARRAY",
}
JSToGodotTemplates = {
"number": 'QuickJSBinder::js_to_number(ctx, ${arg})',
"string": 'QuickJSBinder::js_to_string(ctx, ${arg})',
"boolean": 'QuickJSBinder::js_to_bool(ctx, ${arg})',
"Vector2": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getVector2()',
"Rect2": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getRect2()',
"Color": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getColor()',
"RID": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getRID()',
"AABB": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getAABB()',
"Plane": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getPlane()',
"Quat": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getQuat()',
"Transform2D": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getTransform2D()',
"Vector3": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getVector3()',
"Basis": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getBasis()',
"Transform": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getTransform()',
"PoolByteArray": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getPoolByteArray()',
"PoolIntArray": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getPoolIntArray()',
"PoolRealArray": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getPoolRealArray()',
"PoolStringArray": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getPoolStringArray()',
"PoolVector2Array": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getPoolVector2Array()',
"PoolVector3Array": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getPoolVector3Array()',
"PoolColorArray": '*(BINDING_DATA_FROM_JS(ctx, ${arg}))->getPoolColorArray()',
"Variant": '(BINDING_DATA_FROM_JS(ctx, ${arg}))->get_value()',
}
GodotTypeNames = {
"number": "real_t",
"string": "String",
"boolean": "bool",
"Vector2": "Vector2",
"Rect2": "Rect2",
"Color": "Color",
"RID": "RID",
"AABB": "AABB",
"Plane": "Plane",
"Quat": "Quat",
"Transform2D": "Transform2D",
"Vector3": "Vector3",
"Basis": "Basis",
"Transform": "Transform",
"PoolByteArray": "PoolByteArray",
"PoolIntArray": "PoolIntArray",
"PoolRealArray": "PoolRealArray",
"PoolStringArray": "PoolStringArray",
"PoolVector2Array": "PoolVector2Array",
"PoolVector3Array": "PoolVector3Array",
"PoolColorArray": "PoolColorArray",
"Variant": "Variant",
}
GodotToJSTemplates = {
"number": 'QuickJSBinder::to_js_number(ctx, ${arg})',
"string": 'QuickJSBinder::to_js_string(ctx, ${arg})',
"boolean": 'QuickJSBinder::to_js_bool(ctx, ${arg})',
"Vector2": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"Rect2": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"Color": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"RID": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"AABB": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"Plane": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"Quat": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"Transform2D": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"Vector3": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"Basis": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"Transform": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"PoolByteArray": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"PoolIntArray": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"PoolRealArray": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"PoolStringArray": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"PoolVector2Array": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"PoolVector3Array": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"PoolColorArray": 'QuickJSBuiltinBinder::new_object_from(ctx, ${arg})',
"Variant": 'QuickJSBinder::variant_to_var(ctx, ${arg})',
}
def apply_parttern(template, values):
for key in values:
template = template.replace( '${' + key + '}', values[key])
return template
def generate_constructor(cls):
TemplateConstructorName = '${class}_constructor'
TemplateConstructorDeclare = 'static JSValue ${class}_constructor(JSContext *ctx, JSValueConst new_target, int argc, JSValueConst *argv);\n'
TemplateConstructor = '''
static JSValue ${func}(JSContext *ctx, JSValueConst new_target, int argc, JSValueConst *argv) {
${class} tmp;
${initializer}
JSValue proto = JS_GetProperty(ctx, new_target, QuickJSBinder::JS_ATOM_prototype);
JSValue obj = JS_NewObjectProtoClass(ctx, proto, QuickJSBinder::get_context_binder(ctx)->get_origin_class_id());
QuickJSBuiltinBinder::bind_builtin_object(ctx, obj, ${type}, &tmp);
JS_FreeValue(ctx, proto);
return obj;
// return QuickJSBuiltinBinder::create_builtin_value(ctx, ${type}, &tmp);
}
'''
TemplateSimplePoolArrays = '''
if (argc == 1) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!JS_IsArray(ctx, argv[0]), (JS_ThrowTypeError(ctx, "Array expected for argument #0 of ${class}(from)")));
#endif
Variant arr = QuickJSBinder::var_to_variant(ctx, argv[0]);
tmp.operator=(arr);
}
'''
TemplatePoolArrays = '''
if (argc == 1) {
if (JS_IsArray(ctx, argv[0])) {
Variant arr = QuickJSBinder::var_to_variant(ctx, argv[0]);
tmp.operator=(arr);
} else if (JS_IsArrayBuffer(argv[0])) {
size_t size;
uint8_t *buffer = JS_GetArrayBuffer(ctx, &size, argv[0]);
if (size) {
if (size % sizeof(${element}) != 0) {
ERR_PRINTS("Length of the ArrayBuffer does not match for ${class}");
}
tmp.resize(size / sizeof(${element}));
copymem(tmp.write().ptr(), buffer, size / sizeof(${element}) * sizeof(${element}));
}
} else if (JS_IsDataView(argv[0])) {
JSValue byte_length = JS_GetPropertyStr(ctx, argv[0], "byteLength");
uint64_t length = QuickJSBinder::js_to_uint64(ctx, byte_length);
JS_FreeValue(ctx, byte_length);
JSValue byte_offset = JS_GetPropertyStr(ctx, argv[0], "byteOffset");
uint64_t offset = QuickJSBinder::js_to_uint64(ctx, byte_offset);
JS_FreeValue(ctx, byte_offset);
size_t size;
JSValue arraybuffer = JS_GetPropertyStr(ctx, argv[0], "buffer");
uint8_t *buffer = JS_GetArrayBuffer(ctx, &size, arraybuffer);
JS_FreeValue(ctx, arraybuffer);
if (length) {
tmp.resize(length / sizeof(${element}));
copymem(tmp.write().ptr(), buffer + offset, length / sizeof(${element}) * sizeof(${element}));
}
} else {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(false, (JS_ThrowTypeError(ctx, "Array or ArrayBuffer expected for argument #0 of ${class}(from)")));
#endif
}
}
'''
ConstructorInitializers = {
"Vector2": '''
if (argc == 2) {
tmp.x = QuickJSBinder::js_to_number(ctx, argv[0]);
tmp.y = QuickJSBinder::js_to_number(ctx, argv[1]);
} else if (argc == 1) {
if (ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, argv[0])) {
if (bind->type == Variant::VECTOR2) {
tmp = *bind->getVector2();
}
} else {
tmp.x = QuickJSBinder::js_to_number(ctx, argv[0]);
tmp.y = tmp.x;
}
}
''',
"Vector3": '''
if (argc == 1) {
if (ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, argv[0])) {
if (bind->type == Variant::VECTOR3)
tmp = *bind->getVector3();
} else {
tmp.x = QuickJSBinder::js_to_number(ctx, argv[0]);
tmp.z = tmp.y = tmp.x;
}
} else if (argc == 3) {
tmp.x = QuickJSBinder::js_to_number(ctx, argv[0]);
tmp.y = QuickJSBinder::js_to_number(ctx, argv[1]);
tmp.z = QuickJSBinder::js_to_number(ctx, argv[2]);
}
''',
"Color": '''
if (argc >= 3) {
tmp.r = QuickJSBinder::js_to_number(ctx, argv[0]);
tmp.g = QuickJSBinder::js_to_number(ctx, argv[1]);
tmp.b = QuickJSBinder::js_to_number(ctx, argv[2]);
tmp.a = (argc >= 4) ? QuickJSBinder::js_to_number(ctx, argv[3]) : 1.0f;
} else if (argc == 1) {
if (JS_IsNumber(argv[0])) {
tmp = Color::hex(QuickJSBinder::js_to_uint(ctx, argv[0]));
} else if (JS_IsString(argv[0])) {
tmp = Color::html(QuickJSBinder::js_to_string(ctx, argv[0]));
} else if (ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, argv[0])) {
if (bind->type == Variant::COLOR) {
tmp = *bind->getColor();
}
}
}
''',
"Rect2": '''
if (argc == 4) {
tmp.position.x = QuickJSBinder::js_to_number(ctx, argv[0]);
tmp.position.y = QuickJSBinder::js_to_number(ctx, argv[1]);
tmp.size.x = QuickJSBinder::js_to_number(ctx, argv[2]);
tmp.size.y = QuickJSBinder::js_to_number(ctx, argv[3]);
} else if (argc == 2) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR2, argv[0]), (JS_ThrowTypeError(ctx, "Vector2 expected for argument 0 of Rect2(position, size)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR2, argv[1]), (JS_ThrowTypeError(ctx, "Vector2 expected for argument 1 of Rect2(position, size)")));
#endif
ECMAScriptGCHandler *param0 = BINDING_DATA_FROM_JS(ctx, argv[0]);
ECMAScriptGCHandler *param1 = BINDING_DATA_FROM_JS(ctx, argv[1]);
tmp.position = *param0->getVector2();
tmp.size = *param1->getVector2();
} else if (argc == 1) {
if (ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, argv[0])) {
if (bind->type == Variant::RECT2)
tmp = *bind->getRect2();
}
}
''',
"AABB": '''
if (argc == 2) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[0]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 0 of AABB(position, size)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[1]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 1 of AABB(position, size)")));
#endif
ECMAScriptGCHandler *param0 = BINDING_DATA_FROM_JS(ctx, argv[0]);
ECMAScriptGCHandler *param1 = BINDING_DATA_FROM_JS(ctx, argv[1]);
tmp.position = *param0->getVector3();
tmp.size = *param1->getVector3();
} else if (argc == 1) {
if (ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, argv[0])) {
if (bind->type == Variant::AABB)
tmp = *bind->getAABB();
}
}
''',
"Plane": '''
if (argc == 4) {
tmp.normal.x = QuickJSBinder::js_to_number(ctx, argv[0]);
tmp.normal.y = QuickJSBinder::js_to_number(ctx, argv[1]);
tmp.normal.z = QuickJSBinder::js_to_number(ctx, argv[2]);
tmp.d = QuickJSBinder::js_to_number(ctx, argv[3]);
} else if (argc == 3) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[0]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 0 of Plane(v1, v2, v3)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[1]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 1 of Plane(v1, v2, v3)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[2]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 2 of Plane(v1, v2, v3)")));
#endif
ECMAScriptGCHandler *param0 = BINDING_DATA_FROM_JS(ctx, argv[0]);
ECMAScriptGCHandler *param1 = BINDING_DATA_FROM_JS(ctx, argv[1]);
ECMAScriptGCHandler *param2 = BINDING_DATA_FROM_JS(ctx, argv[2]);
tmp = Plane(*param0->getVector3(), *param1->getVector3(), *param2->getVector3());
} else if (argc == 2) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[0]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 0 of Plane(normal, d)")));
#endif
ECMAScriptGCHandler *param0 = BINDING_DATA_FROM_JS(ctx, argv[0]);
tmp = Plane(*param0->getVector3(), QuickJSBinder::js_to_number(ctx, argv[1]));
} else if (argc == 1) {
if (ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, argv[0])) {
if (bind->type == Variant::PLANE)
tmp = *bind->getPlane();
}
}
''',
"Quat": '''
if (argc == 4) {
tmp.x = QuickJSBinder::js_to_number(ctx, argv[0]);
tmp.y = QuickJSBinder::js_to_number(ctx, argv[1]);
tmp.z = QuickJSBinder::js_to_number(ctx, argv[2]);
tmp.w = QuickJSBinder::js_to_number(ctx, argv[3]);
} else if (argc == 2) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[0]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 0 of Quat(axis, angle)")));
#endif
ECMAScriptGCHandler *param0 = BINDING_DATA_FROM_JS(ctx, argv[0]);
tmp = Quat(*param0->getVector3(), QuickJSBinder::js_to_number(ctx, argv[1]));
} else if (argc == 1) {
if (ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, argv[0])) {
if (bind->type == Variant::QUAT) {
tmp = *bind->getQuat();
} else if (bind->type == Variant::BASIS) {
tmp = *bind->getBasis();
} else if (bind->type == Variant::VECTOR3) {
tmp.set_euler(*bind->getVector3());
}
}
}
''',
"Transform2D": '''
if (argc == 3) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR2, argv[0]), (JS_ThrowTypeError(ctx, "Vector2 expected for argument 0 of Transform2D(x_axis, y_axis, origin)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR2, argv[1]), (JS_ThrowTypeError(ctx, "Vector2 expected for argument 1 of Transform2D(x_axis, y_axis, origin)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR2, argv[2]), (JS_ThrowTypeError(ctx, "Vector2 expected for argument 2 of Transform2D(x_axis, y_axis, origin)")));
#endif
ECMAScriptGCHandler *param0 = BINDING_DATA_FROM_JS(ctx, argv[0]);
ECMAScriptGCHandler *param1 = BINDING_DATA_FROM_JS(ctx, argv[1]);
ECMAScriptGCHandler *param2 = BINDING_DATA_FROM_JS(ctx, argv[2]);
tmp.elements[0].operator=(*param0->getVector2());
tmp.elements[1].operator=(*param1->getVector2());
tmp.elements[2].operator=(*param2->getVector2());
} else if (argc == 2) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR2, argv[1]), (JS_ThrowTypeError(ctx, "Vector2 expected for argument 1 of Transform2D(rotation, position)")));
#endif
ECMAScriptGCHandler *param1 = BINDING_DATA_FROM_JS(ctx, argv[1]);
tmp.set_origin(*param1->getVector2());
tmp.set_rotation(QuickJSBinder::js_to_number(ctx, argv[0]));
} else if (argc == 1) {
if (ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, argv[0])) {
if (bind->type == Variant::TRANSFORM2D)
tmp = *bind->getTransform2D();
else if (Variant::can_convert(bind->type, Variant::TRANSFORM2D)) {
tmp = bind->get_value();
}
}
}
''',
"Basis": '''
if (argc == 3) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[0]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 0 of Basis(x_axis, y_axis, z_axis)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[1]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 1 of Basis(x_axis, y_axis, z_axis)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[2]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 2 of Basis(x_axis, y_axis, z_axis)")));
#endif
ECMAScriptGCHandler *param0 = BINDING_DATA_FROM_JS(ctx, argv[0]);
ECMAScriptGCHandler *param1 = BINDING_DATA_FROM_JS(ctx, argv[1]);
ECMAScriptGCHandler *param2 = BINDING_DATA_FROM_JS(ctx, argv[2]);
tmp.elements[0].operator=(*param0->getVector3());
tmp.elements[1].operator=(*param1->getVector3());
tmp.elements[2].operator=(*param2->getVector3());
} else if (argc == 2) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[0]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 0 of Basis(axis, phi)")));
#endif
ECMAScriptGCHandler *param0 = BINDING_DATA_FROM_JS(ctx, argv[0]);
tmp.set_axis_angle(*param0->getVector3(), QuickJSBinder::js_to_number(ctx, argv[1]));
} else if (argc == 1) {
if (ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, argv[0])) {
if (bind->type == Variant::VECTOR3) {
tmp.set_euler(*bind->getVector3());
} else if (bind->type == Variant::QUAT) {
tmp.set_quat(*bind->getQuat());
} else if (bind->type == Variant::BASIS) {
tmp.operator=(*bind->getBasis());
}
}
}
''',
"Transform": '''
if (argc == 4) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[0]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 0 of Transform(x_axis, y_axis, z_axis, origin)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[1]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 1 of Transform(x_axis, y_axis, z_axis, origin)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[2]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 2 of Transform(x_axis, y_axis, z_axis, origin)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[3]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 3 of Transform(x_axis, y_axis, z_axis, origin)")));
#endif
ECMAScriptGCHandler *param0 = BINDING_DATA_FROM_JS(ctx, argv[0]);
ECMAScriptGCHandler *param1 = BINDING_DATA_FROM_JS(ctx, argv[1]);
ECMAScriptGCHandler *param2 = BINDING_DATA_FROM_JS(ctx, argv[2]);
ECMAScriptGCHandler *param3 = BINDING_DATA_FROM_JS(ctx, argv[3]);
tmp.basis.elements[0].operator=(*param0->getVector3());
tmp.basis.elements[1].operator=(*param1->getVector3());
tmp.basis.elements[2].operator=(*param2->getVector3());
tmp.origin.operator=(*param3->getVector3());
} else if (argc == 2) {
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::BASIS, argv[0]), (JS_ThrowTypeError(ctx, "Basis expected for argument 0 of Transform(basis, origin)")));
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, Variant::VECTOR3, argv[1]), (JS_ThrowTypeError(ctx, "Vector3 expected for argument 1 of Transform(basis, origin)")));
#endif
ECMAScriptGCHandler *param0 = BINDING_DATA_FROM_JS(ctx, argv[0]);
ECMAScriptGCHandler *param1 = BINDING_DATA_FROM_JS(ctx, argv[1]);
tmp.basis.operator=(*param0->getBasis());
tmp.origin.operator=(*param1->getVector3());
} else if (argc == 1) {
if (ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, argv[0])) {
if (bind->type == Variant::TRANSFORM) {
tmp.operator=(*bind->getTransform());
} else if (Variant::can_convert(bind->type, Variant::TRANSFORM)) {
tmp.operator=(bind->get_value());
}
}
}
''',
"PoolByteArray": apply_parttern(TemplatePoolArrays, {"class": "PoolByteArray", "type": "Variant::POOL_BYTE_ARRAY", "element": "uint8_t"}),
"PoolIntArray": apply_parttern(TemplatePoolArrays, {"class": "PoolIntArray", "type": "Variant::POOL_INT_ARRAY", "element": "int"}),
"PoolRealArray": apply_parttern(TemplatePoolArrays, {"class": "PoolRealArray", "type": "Variant::POOL_REAL_ARRAY", "element": "real_t"}),
"PoolVector2Array": apply_parttern(TemplatePoolArrays, {"class": "PoolVector2Array", "type": "Variant::POOL_VECTOR2_ARRAY", "element": "Vector2"}),
"PoolVector3Array": apply_parttern(TemplatePoolArrays, {"class": "PoolVector3Array", "type": "Variant::POOL_VECTOR3_ARRAY", "element": "Vector3"}),
"PoolColorArray": apply_parttern(TemplatePoolArrays, {"class": "PoolColorArray", "type": "Variant::POOL_COLOR_ARRAY", "element": "Color"}),
"PoolStringArray": apply_parttern(TemplateSimplePoolArrays,{"class": "PoolStringArray", "type": "Variant::POOL_STRING_ARRAY", "element": "String"}),
}
class_name = cls['name']
constructor_name = apply_parttern(TemplateConstructorName, {"class": class_name})
constructor_declare = apply_parttern(TemplateConstructorDeclare, {"class": class_name})
initializer = ''
if class_name in ConstructorInitializers:
initializer = ConstructorInitializers[class_name]
consturctor = apply_parttern(TemplateConstructor, {
'class': class_name,
'type': VariantTypes[class_name],
'func': constructor_name,
'initializer': initializer
})
return constructor_name, constructor_declare, consturctor
def generate_property_bindings(cls):
class_name = cls['name']
TemplateDeclar = 'static void bind_${class}_properties(JSContext *ctx);\n'
TemplateBind = '\tbind_${class}_properties(ctx);\n'
def generate_members(cls):
Template = '''
JSCFunctionMagic *getter = [](JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv, int magic) -> JSValue {
ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, this_val);
const ${class} *ptr = bind->get${class}();
switch (magic) {\
${getters}
}
return JS_UNDEFINED;
};
JSCFunctionMagic *setter = [](JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv, int magic) -> JSValue {
ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, this_val);
${class} *ptr = bind->get${class}();\
${validation}
switch (magic) {\
${setters}
}
return JS_DupValue(ctx, argv[0]);
};
${bindings}
'''
TemplateGetterItem = '''
case ${index}:
return ${value};'''
TemplateSetterItem = '''
case ${index}:
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, ${type}, argv[0]), (JS_ThrowTypeError(ctx, "${type_name} expected for ${class}.${name}")));
#endif
ptr->${native} = ${value};
break;'''
TemplateItemBinding = '\tbinder->get_builtin_binder().register_property(${type}, "${name}", getter, setter, ${index});\n'
getters = ''
setters = ''
bindings = ''
for i in range(len(cls['properties'])):
p = cls['properties'][i]
type = p['type']
name = p['name']
native_name = p['native']
getters += apply_parttern(TemplateGetterItem, {
'index': str(i),
'value': apply_parttern(GodotToJSTemplates[type], { 'arg': apply_parttern('ptr->${native}', {'native': native_name}) })
})
setters += apply_parttern(TemplateSetterItem, {
'index': str(i),
'name': name,
'native': native_name,
'type': VariantTypes[type],
'type_name': type,
'class': class_name,
'value': apply_parttern(JSToGodotTemplates[type], {'arg': 'argv[0]'})
})
bindings += apply_parttern(TemplateItemBinding, {'index': str(i), 'name': name, 'type': VariantTypes[class_name]})
return apply_parttern(Template, {
'class': class_name,
'getters': getters,
'setters': setters,
'bindings': bindings,
'validation': ''
})
def generate_methods(cls):
TemplateMethod = '''
binder->get_builtin_binder().register_method(
${type},
"${name}",
[](JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv) {
ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, this_val);
${class} *ptr = bind->get${class}();\
${arg_declars}
${call}
return ${return};
},
${argc});'''
TemplateArgDeclear = '''
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, ${type}, argv[${index}]), (JS_ThrowTypeError(ctx, "${type_name} expected for argument ${index} of ${class}.${name}")));
#endif
const ${godot_type} &arg${index} = ${arg};
'''
TemplateReturnValue = '${godot_type} ret = '
bindings = ''
for m in cls['methods']:
args = ''
arg_declars = ''
for i in range(len(m['arguments'])):
arg = m['arguments'][i]
arg_type = arg['type']
arg_declars += apply_parttern(TemplateArgDeclear, {
'index': str(i),
'type': VariantTypes[arg_type],
'type_name': arg_type,
'class': class_name,
'name': m['name'],
'arg': apply_parttern(JSToGodotTemplates[arg_type], {'arg': 'argv[' + str(i) +']'}),
'godot_type': GodotTypeNames[arg_type],
})
if i > 0: args += ', '
args += 'arg' + str(i)
CallTemplate = ('' if m['return'] == 'void' else (apply_parttern(TemplateReturnValue, {"godot_type": GodotTypeNames[m['return']]}))) + 'ptr->${native_method}(${args});'
call = apply_parttern(CallTemplate, {'native_method': m['native_method'], 'args': args})
bindings += apply_parttern(TemplateMethod, {
"class": class_name,
"type": VariantTypes[class_name],
"name": m['name'],
"call": call,
"arg_declars": arg_declars,
"argc": str(len(m['arguments'])),
"return": 'JS_UNDEFINED' if m['return'] == 'void' else apply_parttern(GodotToJSTemplates[m['return']], {'arg': 'ret'}),
})
return bindings
def generate_constants(cls):
ConstTemplate = '\tbinder->get_builtin_binder().register_constant(${type}, "${name}", ${value});\n'
bindings = ''
for c in cls['constants']:
bindings += apply_parttern(ConstTemplate, {
"name": c['name'],
"type": VariantTypes[class_name],
"value": c['value']
})
return bindings
def genertate_operators(cls):
OperatorMap = {
'operator+': '+',
'operator-': '-',
'operator*': '*',
'operator/': '/',
'operator==': '==',
'operator<': '<'
}
TargetDeclearTemplate = '''
#ifdef DEBUG_METHODS_ENABLED
ERR_FAIL_COND_V(!QuickJSBinder::validate_type(ctx, ${type}, argv[1]), (JS_ThrowTypeError(ctx, "${target_class} expected for ${class}.${operator}")));
#endif
ECMAScriptGCHandler *bind1 = BINDING_DATA_FROM_JS(ctx, argv[1]);
${target_class} *target = bind1->get${target_class}();\
'''
OperatorTemplate = '''
JS_SetPropertyStr(ctx, base_operators, "${js_op}",
JS_NewCFunction(ctx, [](JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv) {
ECMAScriptGCHandler *bind = BINDING_DATA_FROM_JS(ctx, argv[0]);
${class} *ptr = bind->get${class}();\
${target_declear}
${call}
return ${return};
},
"${name}",
${argc})
);
'''
TemplateReturnValue = '${godot_type} ret = '
bindings = '''\
Vector<JSValue> operators;
JSValue base_operators = JS_NewObject(ctx);
'''
for o in cls['operators']:
op = o['native_method']
if op in OperatorMap:
argc = len(o['arguments']) + 1
js_op = OperatorMap[op]
if argc <= 1:
if op in ['operator-']:
js_op = 'neg'
args = ''
target_declear = ''
if argc > 1:
arg_class = o['arguments'][0]['type']
target_declear = apply_parttern(TargetDeclearTemplate, {
'target_class': arg_class,
'type': VariantTypes[arg_class],
'class': class_name,
'operator': o['native_method'],
})
args = '*target'
CallTemplate = ('' if o['return'] == 'void' else apply_parttern(TemplateReturnValue, {'godot_type': GodotTypeNames[o['return']] })) + 'ptr->${op}(${args});'
call = apply_parttern(CallTemplate, {'op': op, 'args': args})
bindings += apply_parttern(OperatorTemplate, {
'type': VariantTypes[class_name],
'class': class_name,
'js_op': js_op,
'call': call,
'name': o['name'],
'target_declear': target_declear,
"return": 'JS_UNDEFINED' if o['return'] == 'void' else apply_parttern(GodotToJSTemplates[o['return']], {'arg': 'ret'}),
'argc': str(argc)
})
bindings += apply_parttern('''
operators.push_back(base_operators);
binder->get_builtin_binder().get_cross_type_operators(${type}, operators);
binder->get_builtin_binder().register_operators(${type}, operators);
''', {'type': VariantTypes[class_name]})
return bindings
TemplateBindDefine = '''
static void bind_${class}_properties(JSContext *ctx) {
QuickJSBinder *binder = QuickJSBinder::get_context_binder(ctx);
${members}
${operators}
${constants}
${methods}
}
'''
class_name = cls['name']
property_declare = apply_parttern(TemplateDeclar, {"class": class_name})
property_defines = apply_parttern(TemplateBindDefine, {
"class": class_name,
"members": generate_members(cls) if len(cls['properties']) else '',
"methods": generate_methods(cls),
"constants": generate_constants(cls),
"operators": genertate_operators(cls),
})
property_bind = apply_parttern(TemplateBind, {"class": class_name})
return property_declare, property_defines, property_bind
def generate_class_bind_action(cls, constructor):
Template = '\tregister_builtin_class(${type}, "${class}", ${constructor}, ${argc});\n'
return apply_parttern(Template, {
'class': cls['name'],
'constructor': constructor,
'type': VariantTypes[cls['name']],
'argc': str(cls['constructor_argc'])
})
def generate_builtin_bindings():
Template = '''\
#include "core/variant.h"
#include "quickjs_binder.h"
#include "quickjs_builtin_binder.h"
#ifndef inf
#define inf INFINITY
#endif
${declarations}
void QuickJSBuiltinBinder::bind_builtin_classes_gen() {
${bindings}
}
${definitions}
'''
declarations = ''
definitions = ''
bindings = ''
for cls in API:
constructor_name, constructor_declare, consturctor = generate_constructor(cls)
declarations += constructor_declare
definitions += consturctor
bindings += generate_class_bind_action(cls, constructor_name)
property_declare, property_defines, property_bind= generate_property_bindings(cls)
declarations += property_declare
definitions += property_defines
bindings += property_bind
output = apply_parttern(Template, {
'declarations': declarations,
'bindings': bindings,
'definitions': definitions,
})
file = open(OUTPUT_FILE, 'w')
file.write(output)
if __name__ == "__main__":
generate_builtin_bindings()
``` |
{
"source": "9526xu/wenda-helper",
"score": 3
} |
#### File: wenda-helper/core/ocr.py
```python
import json
import base64
import requests
from aip import AipOcr
def get_text_from_image_hanwang(image_data, appcode, timeout=3):
"""
Get text from image use HanWang OCR
:param image_data:
:param appcode:
:param timeout:
:return:
"""
data = "{\"uid\":\"172.16.17.32\",\"lang\":\"chns\",\"color\":\"color\",\"image\":\"" +bytes.decode(base64.b64encode(image_data)) + "\"}"
headers = {
"Authorization": "APPCODE {0}".format(appcode),
"Content-Type": "application/octet-stream"
}
base_url = "http://table.aliapi.hanvon.com/rt/ws/v1/ocr/table/text/recg"
resp = requests.post(
base_url,
data=str.encode(data),
headers=headers,
timeout=timeout
)
if not resp.ok:
print("汉王OCR识别出错,是不是免费使用次数用完了啊~")
return ""
rjson = resp.json()
if rjson["code"] != "0":
print(rjson["result"])
return ""
return rjson["result"]
def get_text_from_image_baidu(image_data, app_id, app_key, app_secret, api_version=0, timeout=3):
"""
Get image text use baidu ocr
:param image_data:
:param app_id:
:param app_key:
:param app_secret:
:param api_version:
:param timeout:
:return:
"""
client = AipOcr(appId=app_id, apiKey=app_key, secretKey=app_secret)
client.setConnectionTimeoutInMillis(timeout * 1000)
options = {}
options["language_type"] = "CHN_ENG"
if api_version == 1:
result = client.basicAccurate(image_data, options)
else:
result = client.basicGeneral(image_data, options)
if "error_code" in result:
print("百度OCR识别出错,是不是免费使用次数用完了啊~")
return ""
return [words["words"] for words in result["words_result"]]
```
#### File: 9526xu/wenda-helper/main.py
```python
import time
import win32gui
from argparse import ArgumentParser
from pyhooked import Hook, KeyboardEvent
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from core.ocr import get_text_from_image_hanwang, get_text_from_image_baidu
from core.windows import analyze_current_screen_text
import configparser
conf = configparser.ConfigParser()
conf.read("config.ini")
data_directory = conf.get('config',"data_directory")
vm_name = conf.get('config',"vm_name")
app_name = conf.get('config',"app_name")
search_engine = conf.get('config',"search_engine")
hot_key = conf.get('config',"hot_key")
# ocr_engine = 'baidu'
ocr_engine = conf.get('config',"ocr_engine")
### baidu orc
app_id = conf.get('config',"app_id")
app_key = conf.get('config',"app_key")
app_secret = conf.get('config',"app_secret")
### 0 表示普通识别
### 1 表示精确识别
api_version = conf.get('config',"api_version")
### hanwang orc
hanwan_appcode = conf.get('config',"hanwan_appcode")
def pre_process_question(keyword):
"""
strip charactor and strip ?
:param question:
:return:
"""
for char, repl in [("“", ""), ("”", ""), ("?", ""), (" ", ""), ("\t", "")]:
keyword = keyword.replace(char, repl)
keyword = keyword.split(r".")[-1]
keywords = keyword.split(" ")
keyword = "".join([e.strip("\r\n") for e in keywords if e])
return keyword
def main():
print('我来识别这个题目是啥!!!')
text_binary = analyze_current_screen_text(
label=vm_name,
directory=data_directory
)
if ocr_engine == 'baidu':
print("用百度去OCR识别了!!!\n")
keyword = get_text_from_image_baidu(image_data=text_binary, app_id=app_id, app_key=app_key,
app_secret=app_secret, api_version=api_version, timeout=5)
keyword = "".join([e.strip("\r\n") for e in keyword if e])
else:
print("用汉王去OCR识别了!!!\n")
keyword = get_text_from_image_hanwang(image_data=text_binary, appcode=hanwan_appcode)
if not keyword:
print("没识别出来,随机选吧!!!\n")
print("题目出现的时候按F2,我就自动帮你去搜啦~\n")
return
keyword = pre_process_question(keyword)
if len(keyword) < 2:
print("没识别出来,随机选吧!!!\n")
print("题目出现的时候按F2,我就自动帮你去搜啦~\n")
return
print("我用关键词:\" ", keyword, "\"去百度答案啦!")
elem = browser.find_element_by_id("kw")
elem.clear()
elem.send_keys(keyword)
elem.send_keys(Keys.RETURN)
print("结果在浏览器里啦~\n")
print("题目出现的时候按F2,我就自动帮你去搜啦~\n")
def handle_events(args):
if isinstance(args, KeyboardEvent):
if args.current_key == hot_key and args.event_type == 'key down':
main()
elif args.current_key == 'Q' and args.event_type == 'key down':
hk.stop()
print('退出啦~')
if __name__ == "__main__":
browser = webdriver.Chrome(r'.\tools\chromedriver.exe')
browser.get(search_engine)
hld = win32gui.FindWindow(None, vm_name)
if hld > 0:
print('使用前记得去config.ini把配置改好哦~~,主要是自己申请换key,不然次数很快就用完啦~~\n\n用模拟器打开对应应用~~\n题目出现的时候按F2,我就自动帮你去搜啦~\n')
hk = Hook()
hk.handler = handle_events
hk.hook()
else:
print('咦,你没打开' + vm_name + '吧!请打开' + vm_name + '并重启下start.bat')
``` |
{
"source": "952700/scrapy-plus",
"score": 2
} |
#### File: project_dir/middlewares/spider_middlewares.py
```python
class BaiduSpiderMiddleware(object):
def process_request(self, request):
# 处理请求
print("BaiduSpiderMiddleware: process_request")
return request
def process_response(self, response):
# 处理请求
print("BaiduSpiderMiddleware: process_response")
return response
class DoubanSpiderMiddleware(object):
def process_request(self, request):
# 处理请求
print("DoubanSpiderMiddleware: process_request")
return request
def process_response(self, response):
# 处理请求
print("DoubanSpiderMiddleware: process_response")
return response
```
#### File: scrapy_plus/core/scheduler.py
```python
from w3lib.url import canonicalize_url # 需要再requirements.txt添加依赖
# 导入hashlib
import hashlib
import six
from ..utils.log import logger
from ..conf import settings
"""
调度器模块
1. 缓存请求对象
2. 请求去重
# 3.2.1-根据配置文件, 是否需要对象请求和指纹进行持久化
1. 如果不需要持久化, 导入内存的队列和去重容器, 否则就导入基于Redis队列和去重容器
2. 修改init方法, 创建去重容器
3. 修改seen_request方法,该为使用去重容器的接口
"""
# 1. 如果不需要持久化, 导入内存的队列和去重容器, 否则就导入基于Redis队列和去重容器
if not settings.SCHEDULE_PERSIST:
# six是专门用于进行python2和python3的兼容的
from six.moves.queue import Queue
from ..utils.set import NormalFilterContainer as FilterContainer
else:
# 导入基于Redis队列
from ..utils.queue import Queue
from ..utils.set import RedisFilterContainer as FilterContainer
class Scheduler(object):
def __init__(self, stats_collector):
# 3.2.1-3: 调度器中接收统计器对象, 使用统计器对象, 对入队请求数量和过滤掉请求数量进行统计
self.stats_collector = stats_collector
# 准备队列, 来缓存请求对象
self.queue = Queue()
# 2.0.2-5: 统计总的请求数量
# self.total_request_count = 0
# 定义set集合, 用于存储指纹数据
# 2. 修改init方法, 创建去重容器
self.filter_container = FilterContainer()
# 定义变量, 用于统计过滤掉多少请求
# self.filter_reqeust_count = 0
def clear(self):
"""请求Redis中的指纹和请求数据"""
if settings.SCHEDULE_PERSIST:
# 清空Redis队列中数据
self.queue.clear()
# 请求空指纹数据
self.filter_container.clear()
def add_request(self, request):
# 3.2.1-4: 使用stats_collector来通过入队请求数量和过滤掉请求数量
# 3.3.1-2: 只有需要过滤 并且 已经重复, 才过滤
if not request.dont_filter and self.seen_request(request):
# 如果重复, 就记录日志
logger.info('过滤掉重复请求:{}'.format(request.url))
# self.filter_reqeust_count += 1
self.stats_collector.incr_filter_request_count()
return
# print(request.url)
# 添加请求对象
self.queue.put(request)
# print('添加请求:{}'.format(request.url))
# 2.0.2-5: 每次添加请求的时候, 就让total_request_count增加1
# 此处请求数量, 为入队请求数量
# self.total_request_count += 1
self.stats_collector.incr_total_request_count()
def get_request(self):
# print("获取请求")
# 获取请求对象
req = self.queue.get()
# print("取出了:{}".format(req.url))
return req
def seen_request(self, request):
# 用于爬虫请求是否已经爬取过了, 待实现
# 根据请求获取该请求对应指纹
fp = self._gen_fp(request)
# 判断fp是否在容器中
if self.filter_container.exists(fp):
# 返回True,说明这个请求重复了
return True
# 如果不重复就来到这里, 把指纹添加过滤容器中
self.filter_container.add_fp(fp)
# 返回False, 就表示不重复
return False
def _gen_fp(self, request):
"""
根据请求对象, 生成指纹
:param request: 请求对象
:return: 请求对应指纹
思路:
1. 明确需要使用那些数据生成指纹
1. URL,方法名,params,data
2. 准备数据
3. 把数据添加到sha1算法中
4. 通过sha1获取16进制的指纹
"""
# 2. 准备数据
# 对URL进行规范化处理
url = canonicalize_url(request.url)
# 方法名
method = request.method.upper()
# GET的请求参数
params = request.params if request.params else {}
# 但是字典是无序, 我们把转换为元祖在排序
params = sorted(params.items(), key=lambda x: x[0])
# POST请求的请求体数据
data = request.data if request.data else {}
# 但是字典是无序, 我们把转换为元祖在排序
data = sorted(data.items(), key=lambda x: x[0])
# 3. 获取sha1算法对象
sha1 = hashlib.sha1()
# 更新数据
sha1.update(self.get_bytes_from_str(url))
sha1.update(self.get_bytes_from_str(method))
sha1.update(self.get_bytes_from_str(str(params)))
sha1.update(self.get_bytes_from_str(str(data)))
# 获取十六进制的指纹数据
return sha1.hexdigest()
def get_bytes_from_str(self, s):
if six.PY3:
# 如果是py3, 如果是str类型就需要进行编码
if isinstance(s, str):
return s.encode('utf8')
else:
return s
else:
# 如果是py2, 如果是str类型就直接返回
if isinstance(s, str):
return s
else:
# 在py2中encode默认使用ascii码进行编码的,此处不能省
return s.encode('utf8')
``` |
{
"source": "9527567/R4ML",
"score": 3
} |
#### File: 9527567/R4ML/python4col.py
```python
def getCol():
colstr = []
for i in range(1,785):
tmpstr = "d"+str(i)
colstr.append(tmpstr)
return colstr
def write(filename,colstr):
with open(filename, 'a') as f:
for i in colstr:
f.write(i+"\n")
if __name__ == '__main__':
colstr = getCol();
write("a.txt",colstr)
``` |
{
"source": "9527567/rdkir4Pharmacophore",
"score": 3
} |
#### File: 9527567/rdkir4Pharmacophore/no_value.py
```python
from rdkit import Chem
from rdkit.Chem import AllChem
import os
from rdkit.Chem import MACCSkeys
from rdkit import DataStructs
from rdkit.Chem.rdMolDescriptors import GetMorganFingerprint
from rdkit import DataStructs
from rdkit.SimDivFilters.rdSimDivPickers import MaxMinPicker
# 读取小分子库
def readNsdf(filename):
mols_suppl = Chem.SDMolSupplier(filename)
mols_free = [x for x in mols_suppl if x is not None]
print(mols_free[0])
return mols_free
# 读取已知活性小分子化合物,建立参照药效团指纹
def read1sdf(filename):
mol = Chem.SDMolSupplier(filename)
if len(mol) > 1:
raise Exception("can't read more than one molecule for reference!")
else:
return mol
# 产生3D构象,如何使用?
def get3D(mol):
if len(mol) == 1:
m2 = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol)
m3 = Chem.RemoveHs(m2)
return m3
else:
result_3D = []
for x in mol:
temp = Chem.AddHs(x)
AllChem.EmbedMolecule(x)
temp3 = Chem.RemoveHs(temp)
result.append(temp3)
return result_3D
# 聚类算法,使用外部库
# 写操作,输出
def writesdf(writename):
pass
# 获取指纹
def getFinger(mols_suppl, method):
fps = []
if method == "top":
for x in mols_suppl:
try:
fps.append(Chem.RDKFingerprint(x))
except:
continue
elif method == "MACCS":
for x in mols_suppl:
try:
fps.append(MACCSkeys.GenMACCSKeys(x))
except:
continue
elif method == "Atoms_pairs":
for x in mols_suppl:
try:
fps.append(Chem.AtomPairs.Pairs.GetAtomPairFingerprint(x))
except:
continue
elif method == "top_torsions":
for x in mols_suppl:
try:
fps.append(Chem.AtomPairs.Torsions.GetTopologicalTorsionFingerprintAsIntVect(x))
except:
continue
else:
raise Exception("Method is bad!")
return fps
# 计算相似性,相似性方法与指纹方法并不独立,存在依赖关系
def FingerSimilarity(fps, ref_fps, method):
if method == "Tanimoto_top":
similarity = [DataStructs.FingerprintSimilarity(ref_fps, i) for i in fps]
elif method == "Dice_MACCS":
similarity = [DataStructs.FingerprintSimilarity(ref_fps, i,
metric=DataStructs.DiceSimilarity) for i in fps]
elif method == "other":
raise Exception("support bad!")
return similarity
#main,独立更多的方法出来
def __main__():
if len(os.sys.argv) == 1:
raise Exception("please input molecule library!")
elif len(os.sys.argv) == 2:
if os.sys.argv[1] == "--help" or os.sys.argv[1] == "-h":
print("Using it is easy. chack source code then you can run it!")
else:
raise Exception("please input reference molecule!")
else:
mols_suppl = readNsdf(os.sys.argv[1])
print("get", len(mols_suppl), "mol!")
# fps = getFinger(mols_suppl,method = "MACCS")
# FingerSimilarity(fps, fps[0],method = "Dice_MACCS")
__main__()
``` |
{
"source": "9527A/practice",
"score": 3
} |
#### File: 9527A/practice/crawler_gui.py
```python
from tkinter import *
# from tkinter import ttk
from tkinter import scrolledtext
import crawler_downloader
from PIL import Image,ImageTk
import tkinter.messagebox as messagebox
import crawler_mongo
import requests
import os
downloader = []
def button_download():
# t.delete(0.0,END)
# global downloader
# number = crawler_downloader.download()
# # t.insert(INSERT, downloader)
# t.insert(INSERT, '爬取完成\n')
# t.insert(INSERT,'共取得'+str(number)+'页')
win_download = Toplevel(window)
win_download.title('爬虫')
def hitBegin():
if page.get() == '':
messagebox.showerror(title='错误', message='请输入要爬取的页数')
else:
crawler_downloader.download(int(page.get()))
t.insert(INSERT, '爬取完成\n')
t.insert(INSERT,'共取得'+str(page.get())+'页')
win_download.destroy()
page = StringVar()
Label(win_download, text='要爬取的页数:').grid(row=0, column=0)
Entry(win_download, textvariable=page).grid(row=1, column=0)
Button(win_download, text='开始', command=hitBegin).grid(row=1, column=1)
Text(win_download, width=30, height=7).grid(row=2, column=0 ,columnspan=2)
def button_show():
t.delete(0.0,END)
data = crawler_mongo.data_get()
for d in data:
data_name=d['name']
data_src=d['src']
data_intro = d['intro']
t.insert(INSERT, data_name)
t.insert(INSERT, data_intro+'\n')
t.insert(INSERT, data_src)
def button_img(k):
global Photo
t.delete(0.0,END)
data = crawler_mongo.data_get()
d = data[k]
data_name = d['name']
data_src = d['src']
data_intro = d['intro']
t.insert(INSERT, data_name)
t.insert(INSERT, data_intro)
t.insert(INSERT, data_src)
img_r = requests.get(d['src'])
imgData = img_r.content
imgPath = 'a.jpeg'
with open(imgPath, 'wb') as f:
f.write(imgData)
img = Image.open('a.jpeg')
Photo =ImageTk.PhotoImage(image=img)
c.create_image(0, 0, anchor='nw', image=Photo)
def button_next():
global i
i = i+1
button_img(i)
def button_delete():
crawler_mongo.delete()
window = Tk()
window.title("获取小说图片")
win_s = window.winfo_screenwidth()
win_h = window.winfo_screenheight()
root_s = win_s/2
root_h = win_h/2-100
window.geometry('%dx%d+%d+%d' % (root_s,root_h,400,250))#使界面显示位置近似居中
# scroll = Scrollbar()
t = scrolledtext.ScrolledText(window)
t.grid(row=4, column=0, columnspan=2, padx=1, pady=2)
c = Canvas(window, width=240, height=320, bg='gray')
try:
img = Image.open('a.jpeg')
Photo =ImageTk.PhotoImage(image=img)
c.create_image(0, 0, anchor='nw', image=Photo)
except:
pass
c.grid(row=4, column=2, columnspan=2, padx=1, pady=2)
i = 0
Button(window, text='开始爬取', command=lambda:button_download()).grid(row=3, column=0, sticky=W, pady=3)
Button(window, text='显示数据', command=lambda:button_show()).grid(row=3, column=1, sticky=W, pady=3)
Button(window, text='显示图片', command=lambda:button_img(i)).grid(row=3, column=2, sticky=W, pady=3)
Button(window, text='下一张图片', command=lambda:button_next()).grid(row=3, column=3, sticky=W, pady=3)
Button(window, text='清除数据', command=lambda:button_delete()).grid(row=3, column=4, sticky=W, pady=3)
window.mainloop()
```
#### File: 9527A/practice/crawler_mongo.py
```python
import pymongo
def data_seve(mydict):
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["cralwer"]
mycol = mydb["data"]
for m in mydict:
if m in mycol.find():
pass
else:
x = mycol.insert_one(m)
def data_get():
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["cralwer"]
mycol = mydb["data"]
m = mycol.find()
return m
def delete():
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["cralwer"]
mycol = mydb["data"]
x = mycol.delete_many({})
``` |
{
"source": "954324919/TestInAppium",
"score": 3
} |
#### File: script/py/get_cpu_mem_info.py
```python
import string
import utils
from pychartdir import *
import time
from nt import times
from pip._vendor.distlib._backport.tarfile import pwd
PATH = lambda p: os.path.abspath(p)
#打开待测应用,运行脚本,默认times为20次(可自己手动修改次数),获取该应用cpu、memory占用率的曲线图,图表保存至chart目录下
#top次数
# times = 2
times = 50
#设备当前运行应用的包名
pkg_name = 'com.cmic.mmnes'
#当前文件的路径
pwd = os.getcwd()
#当前文件的父路径 在本地测试时指向classPath:/target
#targetDir = os.path.abspath(os.path.join(os.getcwd(), "../../../../../../../../"))
#print targetDir
#实际路径
chartPath = pwd+os.path.sep+'chart'
#获取cpu、mem占用
def top():
cpu = []
mem = []
# -i时间间隙
top_info = utils.shell("top -n %s | %s %s$" %(str(times), utils.find_util, pkg_name)).stdout.readlines()
for info in top_info:
temp_list = info.split()
cpu.append(temp_list[4])
mem.append(temp_list[8])
return (cpu, mem)
#绘制线性图表,具体接口的用法查看ChartDirecto的帮助文档
def line_chart():
data = top()
cpu_data = []
mem_data = []
#去掉cpu占用率中的百分号,并转换为int型
for cpu in data[0]:
cpu_data.append(string.atoi(cpu.split("%")[0]))
#去掉内存占用中的单位K,并转换为int型,以M为单位
for mem in data[1]:
mem_data.append(string.atof(mem.split("K")[0])/1024/10)
#横坐标
labels = []
for i in range(1, times + 1):
labels.append(str(i))
#自动设置图表区域宽度
if times <= 50:
xArea = times * 40
elif 50 < times <= 90:
xArea = times * 20
else:
xArea = 1800
c = XYChart(xArea, 800, 0xCCEEFF, 0x000000, 1)
c.setPlotArea(60, 100, xArea - 100, 650)
c.addLegend(50, 30, 0, "arialbd.ttf", 15).setBackground(Transparent)
c.addTitle("cpu and memery info(%s)" %pkg_name, "timesbi.ttf", 15).setBackground(0xCCEEFF, 0x000000, glassEffect())
c.yAxis().setTitle("The numerical", "arialbd.ttf", 12)
c.xAxis().setTitle("Times", "arialbd.ttf", 12)
c.xAxis().setLabels(labels)
#自动设置X轴步长
if times <= 50:
step = 1
else:
step = times / 50 + 1
c.xAxis().setLabelStep(step)
layer = c.addLineLayer()
layer.setLineWidth(2)
layer.addDataSet(cpu_data, 0xff0000, "CPU(%)")
layer.addDataSet(mem_data, 0x008800, "Memory(M)")
# os.chdir("../../../../../../../target") #由于本项目路径的特俗性
# mPath=PATH("%s/chart" %os.getcwd())
if not os.path.isdir(chartPath):
os.makedirs(chartPath)
#图片保存至脚本当前目录的chart目录下
c.makeChart(PATH("%s/%s.png" %(chartPath, utils.timestamp())))
if __name__ == "__main__":
line_chart()
print 'Analyze Chart Draw Finish..'
``` |
{
"source": "95616ARG/bazel_python",
"score": 2
} |
#### File: 95616ARG/bazel_python/bazel_python.bzl
```python
load("@bazel_tools//tools/python:toolchain.bzl", "py_runtime_pair")
def bazel_python(venv_name = "bazel_python_venv"):
"""Workspace rule setting up bazel_python for a repository.
Arguments
=========
@venv_name should match the 'name' argument given to the
bazel_python_interpreter call in the BUILD file.
"""
native.register_toolchains("//:" + venv_name + "_toolchain")
def bazel_python_interpreter(
python_version,
name = "bazel_python_venv",
requirements_file = None,
**kwargs):
"""BUILD rule setting up a bazel_python interpreter (venv).
Arguments
=========
@python_version should be the Python version string to use (e.g. 3.7.4 is
the standard for DARG projects). You must run the setup_python.sh
script with this version number.
@name is your preferred Bazel name for referencing this. The default should
work unless you run into a name conflict.
@requirements_file should be the name of a file in the repository to use as
the pip requirements.
@kwargs are passed to bazel_python_venv.
"""
bazel_python_venv(
name = name,
python_version = python_version,
requirements_file = requirements_file,
**kwargs
)
# https://stackoverflow.com/questions/47036855
native.py_runtime(
name = name + "_runtime",
files = ["//:" + name],
interpreter = "@bazel_python//:pywrapper.sh",
python_version = "PY3",
)
# https://github.com/bazelbuild/rules_python/blob/master/proposals/2019-02-12-design-for-a-python-toolchain.md
native.constraint_value(
name = name + "_constraint",
constraint_setting = "@bazel_tools//tools/python:py3_interpreter_path",
)
native.platform(
name = name + "_platform",
constraint_values = [
":python3_constraint",
],
)
py_runtime_pair(
name = name + "_runtime_pair",
py3_runtime = name + "_runtime",
)
native.toolchain(
name = name + "_toolchain",
target_compatible_with = [],
toolchain = "//:" + name + "_runtime_pair",
toolchain_type = "@bazel_tools//tools/python:toolchain_type",
)
def _bazel_python_venv_impl(ctx):
"""A Bazel rule to set up a Python virtual environment.
Also installs requirements specified by @ctx.attr.requirements_file.
"""
python_version = ctx.attr.python_version
use_system = False
only_warn = ctx.var.get("BAZEL_PYTHON_ONLY_WARN", "false").lower() == "true"
if "BAZEL_PYTHON_DIR" not in ctx.var:
if only_warn:
print("A bazel-python installation was not found. Falling back to the system python. For reproducibility, please run setup_python.sh for " + python_version)
use_system = True
else:
fail("You must run setup_python.sh for " + python_version)
if use_system:
python_dir = ""
else:
python_parent_dir = ctx.var.get("BAZEL_PYTHON_DIR")
python_dir = python_parent_dir + "/" + python_version
# TODO: Fail if python_dir does not exist.
venv_dir = ctx.actions.declare_directory("bazel_python_venv_installed")
inputs = []
if use_system:
command = ""
else:
command = """
export PATH={py_dir}/bin:$PATH
export PATH={py_dir}/include:$PATH
export PATH={py_dir}/lib:$PATH
export PATH={py_dir}/share:$PATH
export PYTHON_PATH={py_dir}:{py_dir}/bin:{py_dir}/include:{py_dir}/lib:{py_dir}/share
"""
command += """
python3 -m venv {out_dir} || exit 1
source {out_dir}/bin/activate || exit 1
"""
if ctx.attr.requirements_file:
command += "pip3 install -r " + ctx.file.requirements_file.path + " || exit 1"
inputs.append(ctx.file.requirements_file)
for src in ctx.attr.run_after_pip_srcs:
inputs.extend(src.files.to_list())
command += ctx.attr.run_after_pip
command += """
REPLACEME=$PWD/'{out_dir}'
REPLACEWITH='$PWD/bazel_python_venv_installed'
# This prevents sed from trying to modify the directory. We may want to
# do a more targeted sed in the future.
rm -rf {out_dir}/bin/__pycache__ || exit 1
sed -i'' -e s:$REPLACEME:$REPLACEWITH:g {out_dir}/bin/* || exit 1
"""
ctx.actions.run_shell(
command = command.format(py_dir = python_dir, out_dir = venv_dir.path),
inputs = inputs,
outputs = [venv_dir],
)
return [DefaultInfo(files = depset([venv_dir]))]
bazel_python_venv = rule(
implementation = _bazel_python_venv_impl,
attrs = {
"python_version": attr.string(),
"requirements_file": attr.label(allow_single_file = True),
"run_after_pip": attr.string(),
"run_after_pip_srcs": attr.label_list(allow_files = True),
},
)
def bazel_python_coverage_report(name, test_paths, code_paths):
"""Adds a rule to build the coverage report.
@name is the name of the target which, when run, creates the coverage
report.
@test_paths should be a list of the py_test targets for which coverage
has been run. Bash wildcards are supported.
@code_paths should point to the Python code for which you want to compute
the coverage.
"""
test_paths = " ".join([
"bazel-out/*/testlogs/" + test_path + "/test.outputs/outputs.zip"
for test_path in test_paths])
code_paths = " ".join(code_paths)
if "'" in test_paths or "'" in code_paths:
fail("Quotation marks in paths names not yet supported.")
# For generating the coverage report.
native.sh_binary(
name = name,
srcs = ["@bazel_python//:coverage_report.sh"],
deps = [":_dummy_coverage_report"],
args = ["'" + test_paths + "'", "'" + code_paths + "'"],
)
# This is only to get bazel_python_venv as a data dependency for
# coverage_report above. For some reason, this doesn't work if we directly put
# it on the sh_binary. This is a known issue:
# https://github.com/bazelbuild/bazel/issues/1147#issuecomment-428698802
native.sh_library(
name = "_dummy_coverage_report",
srcs = ["@bazel_python//:coverage_report.sh"],
data = ["//:bazel_python_venv"],
)
``` |
{
"source": "95616ARG/PRDNN",
"score": 2
} |
#### File: PRDNN/experiments/acas_ft.py
```python
from timeit import default_timer as timer
import numpy as np
from prdnn import DDNN, FTRepair
from experiments.acas_repair import ACASRepair
class ACASFT(ACASRepair):
"""Experiment testing fine-tuning repair on an ACAS Xu model."""
def do_repair(self, train_regions, train_syrenn, syrenn_time):
n_unique = len(set({tuple(point)
for upolytope in train_syrenn
for pre_poly in upolytope
for point in pre_poly}))
samples_per_plane = [n_unique // len(train_regions) for _ in range(len(train_regions))]
patcher = FTRepair.from_spec_function(
self.network, train_regions, self.property,
samples_per_plane=samples_per_plane)
patcher.epochs = 1000
patcher.lr = 0.001
patcher.momentum = 0.9
patcher.batch_size = 16
patched = patcher.compute()
assert patched is not None
self.record_artifact(patched, f"patched", "network")
self.record_artifact(patcher.inputs, "train_inputs", "pickle")
self.record_artifact(patcher.labels, "train_labels", "pickle")
timing = patcher.timing.copy()
timing["syrenn"] = syrenn_time
self.record_artifact(timing, f"timing", "pickle")
def analyze(self):
"""Analyze the results."""
unpatched = self.read_artifact("unpatched")
self.network = unpatched
patched = self.read_artifact("patched")
timing = self.read_artifact("timing")
train_syrenn = self.read_artifact("train_syrenn")
test_syrenn = self.read_artifact("test_syrenn")
train_inputs = self.read_artifact("train_inputs")
train_labels = self.read_artifact("train_labels")
total_points = sum(len(pre_poly) for upolytope in train_syrenn
for pre_poly in upolytope)
print("Size of repair set:", total_points)
og_train_outputs = np.argmax(unpatched.compute(train_inputs), axis=1)
print("Number of repair set points originally buggy:",
np.sum(og_train_outputs != train_labels))
gen_set, drawdown_set = self.find_counterexamples(unpatched, test_syrenn)
print("Size of generalization, drawdown sets:", len(gen_set), len(drawdown_set))
print("Time (seconds):", timing["total"])
train_outputs = np.argmax(patched.compute(train_inputs), axis=1)
print("Number of repair set points buggy after repair:",
np.sum(train_outputs != train_labels))
dd_desired = self.property(drawdown_set)
dd_outputs = np.argmax(patched.compute(drawdown_set), axis=1)
print("Drawdown-set counterexamples after repair:", np.sum(dd_desired != dd_outputs))
gen_desired = self.property(gen_set)
gen_outputs = np.argmax(patched.compute(gen_set), axis=1)
print("Generalization-set counterexamples after repair:", np.sum(gen_desired != gen_outputs))
return True
if __name__ == "__main__":
ACASFT("acas_ft").main()
```
#### File: PRDNN/experiments/acas_mft.py
```python
from timeit import default_timer as timer
import numpy as np
from pysyrenn import FullyConnectedLayer
from prdnn import DDNN, FTRepair
from experiments.acas_repair import ACASRepair
class ACASMFT(ACASRepair):
"""Experiment testing patching performance on an ACAS Xu model."""
def do_repair(self, train_regions, train_syrenn, syrenn_time):
n_unique = len(set({tuple(point)
for upolytope in train_syrenn
for pre_poly in upolytope
for point in pre_poly}))
samples_per_plane = [n_unique // len(train_regions) for _ in range(len(train_regions))]
_patcher = FTRepair.from_spec_function(
self.network, train_regions, self.property,
samples_per_plane=samples_per_plane)
self.record_artifact(_patcher.inputs, "train_inputs", "pickle")
self.record_artifact(_patcher.labels, "train_labels", "pickle")
patchable = [i for i, layer in enumerate(self.network.layers)
if isinstance(layer, FullyConnectedLayer)]
for layer in patchable:
patcher = FTRepair(self.network, _patcher.inputs.copy(), _patcher.labels.copy())
patcher.layer = layer
patcher.norm_objective = True
patcher.auto_stop = False
patcher.make_holdout_set()
patcher.epochs = 1000
patcher.lr = 0.001
patcher.momentum = 0.9
patcher.batch_size = 16
patched = patcher.compute()
assert patched is not None
self.record_artifact(patched, f"{layer}_patched", "network")
timing = patcher.timing.copy()
timing["syrenn"] = syrenn_time
self.record_artifact(timing, f"{layer}_timing", "pickle")
def analyze(self):
unpatched = self.read_artifact("unpatched")
self.network = unpatched
train_syrenn = self.read_artifact("train_syrenn")
test_syrenn = self.read_artifact("test_syrenn")
train_inputs = self.read_artifact("train_inputs")
train_labels = self.read_artifact("train_labels")
patchable = [i for i, layer in enumerate(self.network.layers)
if isinstance(layer, FullyConnectedLayer)]
for layer in patchable:
print("~~~~~ Layer:", layer, "~~~~~")
patched = self.read_artifact(f"{layer}_patched")
timing = self.read_artifact(f"{layer}_timing")
total_points = sum(len(pre_poly) for upolytope in train_syrenn
for pre_poly in upolytope)
print("Size of repair set:", total_points)
og_train_outputs = np.argmax(unpatched.compute(train_inputs), axis=1)
print("Number of repair set points originally buggy:",
np.sum(og_train_outputs != train_labels))
gen_set, drawdown_set = self.find_counterexamples(unpatched, test_syrenn)
print("Size of generalization, drawdown sets:", len(gen_set), len(drawdown_set))
print("Time (seconds):", timing["total"])
train_outputs = np.argmax(patched.compute(train_inputs), axis=1)
print("Number of repair set points buggy after repair:",
np.sum(train_outputs != train_labels))
dd_desired = self.property(drawdown_set)
dd_outputs = np.argmax(patched.compute(drawdown_set), axis=1)
print("Drawdown-set counterexamples after repair:", np.sum(dd_desired != dd_outputs))
gen_desired = self.property(gen_set)
gen_outputs = np.argmax(patched.compute(gen_set), axis=1)
print("Generalization-set counterexamples after repair:", np.sum(gen_desired != gen_outputs))
return True
if __name__ == "__main__":
ACASMFT("acas_mft").main()
```
#### File: PRDNN/experiments/mnist_mft.py
```python
from collections import defaultdict
import random
from timeit import default_timer as timer
import numpy as np
from pysyrenn import Network
from pysyrenn import ReluLayer
from experiments.mnist_repair import MNISTRepair
from prdnn import FTRepair
class MNISTMFT(MNISTRepair):
"""Attempts to MFT networks to be resillient to corruptions."""
def run(self):
"""Runs the corruption-fine-tuning experiment."""
network = self.load_network("mnist_relu_3_100")
assert isinstance(network.layers[-1], ReluLayer)
network = Network(network.layers[:-1])
self.record_artifact(network, "original", "network")
self.which_params = int(input("Which fine-tuning params? (1 or 2): "))
assert self.which_params in {1, 2}
n_rows = int(input("How many rows of Table 2 to generate (1, 2, 3, or 4): "))
for n_lines in [10, 25, 50, 100][:n_rows]:
print(f"Running with {n_lines} lines")
self.run_for(network, n_lines)
def run_for(self, network, n_lines):
"""Runs experiments for a particular # of lines."""
experiment = f"{n_lines}_lines"
# Get the training lines. Only use lines where the original image is
# correctly classified.
train_lines, train_labels = self.get_corrupted(
"train", n_lines, only_correct_on=network, corruption=self.corruption)
# Compute SyReNN for each line.
start = timer()
train_syrenn = network.exactlines(
train_lines, compute_preimages=True, include_post=False)
syrenn_time = timer() - start
# Record the SyReNNs and the labels.
self.record_artifact(
train_syrenn, f"{experiment}/train_syrenn", "pickle")
self.record_artifact(
train_labels, f"{experiment}/train_labels", "pickle")
# Unpack the SyReNNs and associated labels to points for the patcher.
points, labels = self.sample_like_syrenn(train_syrenn, train_labels)
self.record_artifact(
len(points), f"{experiment}/n_repair_points", "pickle")
for layer in [2, 4]:
print("::::", "Layer:", layer, "::::")
patcher = FTRepair(network, points, labels)
patcher.layer = layer
patcher.norm_objective = True
# Don't stop when full repair-set accuracy is reached, only when
# holdout accuracy gets worse.
patcher.auto_stop = False
patcher.make_holdout_set()
patcher.batch_size = 16
# This is just a maximum epoch timeout, it will stop once all
# constraints are met.
patcher.epochs = 1000
patcher.momentum = 0.9
if self.which_params == 1:
patcher.lr = 0.05
else:
patcher.lr = 0.01
patched = patcher.compute()
patcher.timing["syrenn_time"] = syrenn_time
self.record_artifact(
patcher.epoched_out, f"{experiment}/epoched_out_{layer}", "pickle")
self.record_artifact(
patched, f"{experiment}/patched_{layer}",
"pickle" if patched is None else "network")
self.record_artifact(
patcher.timing, f"{experiment}/timing_{layer}", "pickle")
self.record_artifact(
patcher.accuracy_on_repair_set(patched),
f"{experiment}/patched_efficacy_{layer}", "pickle")
def sample_like_syrenn(self, train_syrenn, train_labels):
points, labels = [], []
for line, label in zip(train_syrenn, train_labels):
start, end = line[0], line[-1]
points.extend([start, end])
# We always want to include the start/end
alphas = np.random.uniform(low=0.0, high=1.0, size=(len(line) - 2))
interpolated = start + np.outer(alphas, end - start)
points.extend(interpolated)
labels.extend(label for _ in range(len(interpolated) + 2))
return points, labels
def analyze(self):
"""Analyze the patched MNIST networks.
Reports: Time, Drawdown, and Generalization
"""
experiments = defaultdict(list)
for artifact in self.artifacts:
if "timing" not in artifact["key"]:
continue
# 10_lines/timing_2
n_lines, layer = artifact["key"].split("/")
experiments[n_lines].append(int(layer.split("_")[1]))
original_network = self.read_artifact("original")
test_lines, test_labels = self.get_corrupted("test", None, corruption=self.corruption)
test_images = list(map(np.array, zip(*test_lines)))
print("Size of drawdown, generalization sets:", len(test_lines))
timing_cols = ["layer", "total", "syrenn", "jacobian", "solver",
"did_timeout", "drawdown", "generalization"]
for experiment in sorted(experiments.keys(), key=lambda n: int(n.split("_")[0])):
print(f"~~~~ Analyzing: {experiment} ~~~~")
# Get the patched data.
train_syrenn = self.read_artifact(f"{experiment}/train_syrenn")
train_labels = self.read_artifact(f"{experiment}/train_labels")
n_repair_points = self.read_artifact(f"{experiment}/n_repair_points")
print("Size of repair set:", n_repair_points)
train_images = list(map(
np.array, zip(*[(l[0], l[-1]) for l in train_syrenn])))
print("Size of drawdown, generalization sets:", len(train_images))
print("Number of f-hat vertex points:",
sum((2*len(l)) - 2 for l in train_syrenn))
before = self.compute_accuracies(original_network,
train_images, train_labels, test_images, test_labels)
results = self.begin_csv(f"{experiment}/analyzed", timing_cols)
for layer in sorted(experiments[experiment]):
print("Layer:", layer)
timing = self.read_artifact(f"{experiment}/timing_{layer}")
patched = self.read_artifact(f"{experiment}/patched_{layer}")
efficacy = 100 * self.read_artifact(f"{experiment}/patched_efficacy_{layer}")
epoched_out = self.read_artifact(f"{experiment}/epoched_out_{layer}")
record = timing.copy()
record["layer"] = layer
record["syrenn"] = record["syrenn_time"]
del record["syrenn_time"]
after = self.compute_accuracies(patched,
train_images, train_labels, test_images, test_labels)
print("\tTime (seconds):", timing["total"])
if epoched_out:
print("\t(Timed Out)")
record["drawdown"] = (before["test_identity"]
- after["test_identity"])
record["generalization"] = (after["test_corrupted"]
- before["test_corrupted"])
print("\tDrawdown:", record["drawdown"])
print("\tGeneralization:", record["generalization"])
print("\tEfficacy:", efficacy, "%")
self.write_csv(results, record)
return True
if __name__ == "__main__":
np.random.seed(24)
random.seed(24)
MNISTMFT("mnist_mft").main()
```
#### File: PRDNN/experiments/mnist_repair.py
```python
from collections import defaultdict
import random
from timeit import default_timer as timer
import numpy as np
from pysyrenn import Network
from pysyrenn import ReluLayer
from experiments.experiment import Experiment
from prdnn import ProvableRepair
class MNISTRepair(Experiment):
"""Attempts to patch networks to be resillient to corruptions."""
corruption = "fog"
def run(self):
"""Runs the corruption-patching experiment."""
network = self.load_network("mnist_relu_3_100")
assert isinstance(network.layers[-1], ReluLayer)
network = Network(network.layers[:-1])
self.record_artifact(network, "original", "network")
n_rows = int(input("How many rows of Table 2 to generate (1, 2, 3, or 4): "))
for n_lines in [10, 25, 50, 100][:n_rows]:
print(f"Running with {n_lines} lines")
self.run_for(network, n_lines)
def run_for(self, network, n_lines):
"""Runs experiments for a particular # of lines."""
experiment = f"{n_lines}_lines"
# Get the training lines. Only use lines where the original image is
# correctly classified.
train_lines, train_labels = self.get_corrupted(
"train", n_lines, only_correct_on=network)
# Compute SyReNN for each line.
start = timer()
train_syrenn = network.exactlines(
train_lines, compute_preimages=True, include_post=False)
syrenn_time = timer() - start
# Record the SyReNNs and the labels.
self.record_artifact(
train_syrenn, f"{experiment}/train_syrenn", "pickle")
self.record_artifact(
train_labels, f"{experiment}/train_labels", "pickle")
# Unpack the SyReNNs and associated labels to points for the patcher.
points, representatives, labels = self.syrenn_to_points(
train_syrenn, train_labels)
for layer in [2, 4]:
print("::::", "Layer:", layer, "::::")
patcher = ProvableRepair(network, layer, points, labels,
representatives=representatives)
patcher.constraint_bufer = 0.001
patcher.gurobi_crossover = 0
patcher.gurobi_timelimit = 90 * 60
patched = patcher.compute()
patcher.timing["syrenn_time"] = syrenn_time
patcher.timing["total"] += syrenn_time
self.record_artifact(
patched, f"{experiment}/patched_{layer}",
"pickle" if patched is None else "ddnn")
self.record_artifact(
patcher.timing, f"{experiment}/timing_{layer}", "pickle")
def analyze(self):
"""Analyze the patched MNIST networks.
Reports: Time, Drawdown, and Generalization
"""
experiments = defaultdict(list)
for artifact in self.artifacts:
if "timing" not in artifact["key"]:
continue
# 10_lines/timing_2
n_lines, layer = artifact["key"].split("/")
experiments[n_lines].append(int(layer.split("_")[1]))
original_network = self.read_artifact("original")
test_lines, test_labels = self.get_corrupted("test", None)
test_images = list(map(np.array, zip(*test_lines)))
print("Size of drawdown, generalization sets:", len(test_lines))
timing_cols = ["layer", "total", "syrenn", "jacobian", "solver",
"did_timeout", "drawdown", "generalization"]
for experiment in sorted(experiments.keys(), key=lambda n: int(n.split("_")[0])):
print(f"~~~~ Analyzing: {experiment} ~~~~")
# Get the patched data.
train_syrenn = self.read_artifact(f"{experiment}/train_syrenn")
train_labels = self.read_artifact(f"{experiment}/train_labels")
train_images = list(map(
np.array, zip(*[(l[0], l[-1]) for l in train_syrenn])))
print("Size of repair set:", len(train_images[0]))
print("Number of f-hat vertex points:",
sum((2*len(l)) - 2 for l in train_syrenn))
before = self.compute_accuracies(original_network,
train_images, train_labels, test_images, test_labels)
results = self.begin_csv(f"{experiment}/analyzed", timing_cols)
for layer in sorted(experiments[experiment]):
timing = self.read_artifact(f"{experiment}/timing_{layer}")
patched = self.read_artifact(f"{experiment}/patched_{layer}")
record = timing.copy()
record["layer"] = layer
record["syrenn"] = record["syrenn_time"]
del record["syrenn_time"]
if patched is None:
record["drawdown"], record["generalization"] = "", ""
else:
after = self.compute_accuracies(patched,
train_images, train_labels, test_images, test_labels)
print("Layer:", layer)
print("\tTime (seconds):", timing["total"])
assert after["train_identity"] == 100.
assert after["train_corrupted"] == 100.
record["drawdown"] = (before["test_identity"]
- after["test_identity"])
record["generalization"] = (after["test_corrupted"]
- before["test_corrupted"])
print("\tDrawdown:", record["drawdown"])
print("\tGeneralization:", record["generalization"])
self.write_csv(results, record)
return True
def compute_accuracies(self, network, train, train_labels, test,
test_labels):
"""Compture train, test accuracy for a network."""
return dict({
"train_identity": self.accuracy(network, train[0], train_labels),
"train_corrupted": self.accuracy(network, train[1], train_labels),
"test_identity": self.accuracy(network, test[0], test_labels),
"test_corrupted": self.accuracy(network, test[1], test_labels),
})
@staticmethod
def accuracy(network, inputs, labels):
"""Measures network accuracy."""
net_labels = np.argmax(network.compute(inputs), axis=1)
return 100. * (np.count_nonzero(np.equal(net_labels, labels))
/ len(labels))
@classmethod
def syrenn_to_points(cls, syrenn, line_labels):
"""Lists all endpoints in an ExactLine/SyReNN representation.
Returns (points, representatives, labels). Representatives are
non-vertex points which should have the same activation pattern in the
network as the corresponding point.
"""
points, representatives, labels = [], [], []
for line, label in zip(syrenn, line_labels):
for start, end in zip(line, line[1:]):
points.extend([start, end])
labels.extend([label, label])
representative = (start + end) / 2.
representatives.extend([representative, representative])
return points, representatives, labels
@staticmethod
def get_corrupted(split, max_count, only_correct_on=None, corruption="fog"):
"""Returns the desired dataset."""
random.seed(24)
np.random.seed(24)
all_images = [
np
.load(f"external/mnist_c/{corruption}/{split}_images.npy")
.reshape((-1, 28 * 28))
for corruption in ("identity", corruption)
]
labels = np.load(f"external/mnist_c/identity/{split}_labels.npy")
indices = list(range(len(labels)))
random.shuffle(indices)
labels = labels[indices]
all_images = [images[indices] / 255. for images in all_images]
if only_correct_on is not None:
outputs = only_correct_on.compute(all_images[0])
outputs = np.argmax(outputs, axis=1)
correctly_labelled = (outputs == labels)
all_images = [images[correctly_labelled] for images in all_images]
labels = labels[correctly_labelled]
lines = list(zip(*all_images))
if max_count is not None:
lines = lines[:max_count]
labels = labels[:max_count]
return lines, labels
if __name__ == "__main__":
np.random.seed(24)
random.seed(24)
MNISTRepair("mnist_repair").main()
```
#### File: PRDNN/prdnn/ddnn.py
```python
import numpy as np
from pysyrenn.frontend.network import Network
from pysyrenn.frontend import FullyConnectedLayer, Conv2DLayer
from pysyrenn.frontend import ReluLayer, HardTanhLayer
from pysyrenn.frontend import ConcatLayer, AveragePoolLayer
from pysyrenn.frontend import MaxPoolLayer, NormalizeLayer
import syrenn_proto.syrenn_pb2 as transformer_pb
import torch
# NOTE: We currently only have limited support for concat layers, namely when
# the intermediate layers are all linear.
LINEAR_LAYERS = (FullyConnectedLayer, Conv2DLayer, ConcatLayer,
AveragePoolLayer, NormalizeLayer)
class DDNN:
"""Implements a DDNN.
Currently supports:
- Arbitrary layers as long as the activation and values parameters are
equal up to that layer.
- Once the activation and values parameters differ, only linear (see
above), ReLU, HardTanh, and MaxPool layers are supported. Support for
other layer types can be added by modifying the compute(...) method.
"""
def __init__(self, activation_layers, value_layers):
"""Constructs the new DDNN.
@activation_layers is a list of layers defining the values of the
activation vectors.
@value_layers is a list of layers defining the values of the value
vectors. Non-linear layers here will be re-interpreted using the
corresponding decoupled value-network layer.
Note that the number, types, and output sizes of the layers in
@activation_layers and @values_layers should match.
"""
self.activation_layers = activation_layers
self.value_layers = value_layers
self.n_layers = len(activation_layers)
assert self.n_layers == len(value_layers)
try:
self.differ_index = next(
l for l in range(self.n_layers)
if activation_layers[l] is not value_layers[l])
except StopIteration:
self.differ_index = len(value_layers)
def compute(self, inputs, representatives=None):
"""Computes the output of the Decoupled Network on @inputs.
@inputs should be a Numpy array of inputs.
"""
differ_index = self.differ_index
if representatives is not None:
differ_index = 0
# Up to differ_index, the values and activation vectors are the same.
pre_network = Network(self.activation_layers[:differ_index])
mid_inputs = pre_network.compute(inputs)
# Now we have to actually separately handle the masking when
# activations != values.
activation_vector = mid_inputs
if representatives is not None:
activation_vector = pre_network.compute(representatives)
value_vector = mid_inputs
for layer_index in range(differ_index, self.n_layers):
activation_layer = self.activation_layers[layer_index]
value_layer = self.value_layers[layer_index]
if isinstance(activation_layer, LINEAR_LAYERS):
if isinstance(activation_layer, ConcatLayer):
assert not any(
isinstance(input_layer, ConcatLayer)
for input_layer in activation_layer.input_layers)
assert all(
isinstance(input_layer, LINEAR_LAYERS)
for input_layer in activation_layer.input_layers)
activation_vector = activation_layer.compute(activation_vector)
value_vector = value_layer.compute(value_vector)
elif isinstance(activation_layer, ReluLayer):
mask = np.maximum(np.sign(activation_vector), 0.0)
if isinstance(value_vector, np.ndarray):
value_vector *= mask
else:
# NOTE: Originally this was torch.tensor(mask,
# dtype=torch.float). I changed to this to silence a
# warning from Pytorch. I don't think there will be, but it
# might be worth testing for a performance regression.
value_vector *= mask.clone().detach().float()
activation_vector *= mask
elif isinstance(activation_layer, HardTanhLayer):
mask = np.ones_like(value_vector)
value_vector[activation_vector >= 1.0] = 1.0
value_vector[activation_vector <= -1.0] = -1.0
np.clip(activation_vector, -1.0, 1.0, out=activation_vector)
elif isinstance(activation_layer, MaxPoolLayer):
activation_vector, indices = activation_layer.compute(
activation_vector, return_indices=True)
value_vector = value_layer.from_indices(value_vector, indices)
else:
raise NotImplementedError
return value_vector
def serialize(self):
"""Serializes the DDNN to the Protobuf format.
Notably, the value_net only includes layers after differ_index.
"""
serialized = transformer_pb.MaskingNetwork()
serialized.activation_layers.extend([
layer.serialize() for layer in self.activation_layers
])
serialized.value_layers.extend([
layer.serialize()
for layer in self.value_layers[self.differ_index:]
])
serialized.differ_index = self.differ_index
return serialized
@classmethod
def deserialize(cls, serialized):
"""Deserializes the DDNN from the Protobuf format."""
activation_layers = serialized.activation_layers
activation_layers = Network.deserialize_layers(activation_layers)
value_layers = serialized.value_layers
value_layers = Network.deserialize_layers(value_layers)
differ_index = serialized.differ_index
value_layers = activation_layers[:differ_index] + value_layers
return cls(activation_layers, value_layers)
```
#### File: PRDNN/prdnn/ft_repair.py
```python
import random
import sys
import os
from timeit import default_timer as timer
import torch
import numpy as np
from scipy import sparse
from tqdm import tqdm
from pysyrenn.frontend import Network, FullyConnectedLayer
from pysyrenn.frontend import Conv2DLayer, ReluLayer
from pysyrenn.frontend import ConcatLayer, HardTanhLayer
from prdnn.ddnn import DDNN, LINEAR_LAYERS
from prdnn.provable_repair import ProvableRepair
class FTRepair(ProvableRepair):
"""Helper for patching a DDNN.
"""
def __init__(self, network, inputs, labels):
super().__init__(network, -1, inputs, labels)
self.epochs = 100
self.batch_size = 16
self.lr = 0.01
self.momentum = 0.9
self.auto_stop = True
self.norm_objective = False
self.layer = None
self.holdout_set = None
self.verbose = False
def maybe_print(self, *messages):
if self.verbose:
print(*messages)
def compute(self):
network = Network.deserialize(self.network.serialize())
if self.layer is not None:
for param in self.get_parameters(network):
param.requires_grad = False
parameters = self.get_parameters(network, self.layer)
for param in parameters:
param.requires_grad = True
if self.norm_objective:
original_parameters = [param.detach().clone() for param in parameters]
for param in original_parameters:
# Do not train these, they're just for reference.
param.requires_grad = False
start = timer()
optimizer = torch.optim.SGD(parameters, lr=self.lr, momentum=self.momentum)
indices = list(range(len(self.inputs)))
random.seed(24)
self.epoched_out = None
holdout_n_correct = self.holdout_n_correct(network)
for epoch in range(self.epochs):
# NOTE: In the paper, we checked this _after_ the inner loop. It
# should only make a difference in the case where the network
# already met the specification, so should make no difference to
# the results.
if self.auto_stop and self.is_done(network):
self.maybe_print("100% training accuracy!")
self.epoched_out = False
break
random.shuffle(indices)
losses = []
for batch_start in range(0, len(self.inputs), self.batch_size):
batch = slice(batch_start, batch_start + self.batch_size)
inputs = torch.tensor([self.inputs[i] for i in indices[batch]])
labels = torch.tensor([self.labels[i] for i in indices[batch]])
# representatives = [self.representatives[i] for i in indices[batch]]
optimizer.zero_grad()
output = network.compute(inputs)
loss = torch.nn.functional.cross_entropy(output, labels)
if self.norm_objective:
for curr_param, og_param in zip(parameters, original_parameters):
delta = (curr_param - og_param).flatten()
loss += torch.linalg.norm(delta, ord=2)
loss += torch.linalg.norm(delta, ord=float("inf"))
loss.backward()
losses.append(loss)
optimizer.step()
self.maybe_print("Average Loss:", torch.mean(torch.tensor(losses)))
if self.holdout_set is not None:
new_holdout_n_correct = self.holdout_n_correct(network)
self.maybe_print("New holdout n correct:", new_holdout_n_correct, "/", len(self.holdout_set))
if new_holdout_n_correct < holdout_n_correct:
self.maybe_print("Holdout accuracy dropped, ending!")
break
holdout_n_correct = new_holdout_n_correct
else:
self.epoched_out = True
for param in parameters:
param.requires_grad = False
self.timing = dict({
"total": timer() - start,
})
return network
def is_done(self, network):
for batch_start in range(0, len(self.inputs), self.batch_size):
batch = slice(batch_start, batch_start + self.batch_size)
inputs = torch.tensor(self.inputs[batch])
labels = torch.tensor(self.labels[batch])
output = torch.argmax(network.compute(inputs), axis=1)
if not torch.all(output == labels):
return False
return True
def accuracy_on_repair_set(self, network):
n_correct = 0
for batch_start in range(0, len(self.inputs), self.batch_size):
batch = slice(batch_start, batch_start + self.batch_size)
inputs = torch.tensor(self.inputs[batch])
labels = torch.tensor(self.labels[batch])
output = torch.argmax(network.compute(inputs), axis=1)
n_correct += torch.sum(output == labels)
return n_correct / len(self.inputs)
def holdout_n_correct(self, network):
if self.holdout_set is None:
return None
n_correct = 0
for batch_start in range(0, len(self.holdout_set), self.batch_size):
batch = slice(batch_start, batch_start + self.batch_size)
inputs = torch.tensor(self.holdout_set[batch])
labels = torch.tensor(self.holdout_labels[batch])
output = torch.argmax(network.compute(inputs), axis=1)
n_correct += torch.sum(output == labels)
return n_correct
def make_holdout_set(self):
assert self.holdout_set is None
indices = list(range(len(self.inputs)))
random.shuffle(indices)
holdout_indices = indices[:len(indices)//4]
self.holdout_set = self.inputs[holdout_indices]
self.holdout_labels = self.labels[holdout_indices]
self.inputs = [x for i, x in enumerate(self.inputs)
if i not in holdout_indices]
self.labels = [x for i, x in enumerate(self.labels)
if i not in holdout_indices]
@classmethod
def from_planes(cls, network, planes, labels,
samples_per_plane, label_fn=None):
"""Constructs a ProvableRepair to patch 2D regions.
@planes should be a list of input 2D planes (Numpy arrays of their
vertices in counter-clockwise order).
@labels a list of the corresponding desired labels (integers).
"""
points = []
point_labels = []
if labels is None:
labels = [0 for i in planes]
for vertices, label, samples in zip(planes, labels, samples_per_plane):
coefficients = np.random.uniform(
0., 1., size=(samples, len(vertices)))
coefficients = (coefficients.T / np.sum(coefficients, axis=1)).T
points.extend(list(np.matmul(coefficients, vertices)))
if not label_fn:
point_labels.extend(label for _ in range(samples))
if label_fn:
point_labels = label_fn(points)
return cls(network, np.array(points), np.array(point_labels))
@classmethod
def from_spec_function(cls, network, region_plane,
spec_function, samples_per_plane):
"""Constructs a ProvableRepair for an input region and "Spec Function."
@region_plane should be a single plane (Numpy array of
counter-clockwise vertices) that defines the "region of interest"
to patch over.
@spec_function should take a set of input points (Numpy array) and
return the desired corresponding labels (list/Numpy array of ints).
"""
if len(np.asarray(region_plane).shape) == 2:
region_plane = [region_plane]
assert len(np.asarray(region_plane).shape) == 3
return cls.from_planes(network, region_plane, None,
samples_per_plane, label_fn=spec_function)
@classmethod
def get_parameters(cls, network, layer=None):
if layer is not None:
return cls.get_parameters_layer(network.layers[layer])
params = []
for layer in network.layers:
params.extend(cls.get_parameters_layer(layer))
return params
@classmethod
def get_parameters_layer(cls, layer):
if isinstance(layer, FullyConnectedLayer):
return [layer.weights, layer.biases]
if isinstance(layer, Conv2DLayer):
return [layer.filter_weights, layer.biases]
if isinstance(layer, ConcatLayer):
return [param for in_layer in layer.input_layers
for param in cls.get_parameters_layer(in_layer)]
return []
```
#### File: prdnn/tests/test_provable_repair.py
```python
import numpy as np
import torch
import pytest
from pysyrenn.frontend import Network, ReluLayer, FullyConnectedLayer
try:
from external.bazel_python.pytest_helper import main
IN_BAZEL = True
except ImportError:
IN_BAZEL = False
from prdnn.provable_repair import ProvableRepair
def test_already_good():
"""Point-wise test case where all constraints are already met.
We want to make sure that it doesn't change any weights unnecessarily.
"""
network = Network([
FullyConnectedLayer(np.eye(2), np.zeros(shape=(2,))),
ReluLayer(),
])
layer_index = 0
points = [[1.0, 0.5], [2.0, -0.5], [4.0, 5.0]]
labels = [0, 0, 1]
patcher = ProvableRepair(network, layer_index, points, labels)
patched = patcher.compute()
patched_layer = patched.value_layers[0]
assert np.allclose(patched_layer.weights.numpy(), np.eye(2))
assert np.allclose(patched_layer.biases.numpy(), np.zeros(shape=(2,)))
def test_one_point():
"""Point-wise test case with only one constraint.
This leads to weight bounds that are unbounded-on-one-side.
"""
# Where the weight is too big.
network = Network([
FullyConnectedLayer(np.eye(2), np.zeros(shape=(2,))),
ReluLayer(),
FullyConnectedLayer(np.array([[1.0, 0.0], [1.0, 0.0]]),
np.array([0.0, 0.0])),
])
layer_index = 0
points = [[1.0, 0.5]]
labels = [1]
patcher = ProvableRepair(network, layer_index, points, labels)
patched = patcher.compute()
assert np.argmax(patched.compute(points)) == 1
# Where the weight is too small.
network = Network([
FullyConnectedLayer(np.eye(2), np.array([0.0, 0.0])),
ReluLayer(),
FullyConnectedLayer(np.array([[1.0, 0.0], [1.0, 0.0]]), np.array([0.0, 2.0])),
])
layer_index = 0
points = [[1.0, 0.0]]
labels = [0]
patcher = ProvableRepair(network, layer_index, points, labels)
patched = patcher.compute()
assert np.argmax(patched.compute(points)) == 0
def test_optimal():
"""Point-wise test case that the greedy algorithm can solve in 1 step.
All it needs to do is triple the second component.
"""
network = Network([
FullyConnectedLayer(np.eye(2), np.zeros(shape=(2,))),
ReluLayer(),
])
layer_index = 0
points = [[1.0, 0.5], [2.0, -0.5], [5.0, 4.0]]
labels = [1, 0, 1]
patcher = ProvableRepair(network, layer_index, points, labels)
patched = patcher.compute()
assert patched.differ_index == 0
assert np.count_nonzero(
np.argmax(patched.compute(points), axis=1) == labels) == 3
def test_from_planes():
"""Test case to load key points from a set of labeled 2D polytopes.
"""
if not Network.has_connection():
pytest.skip("No server connected.")
network = Network([
ReluLayer(),
FullyConnectedLayer(np.eye(2), np.zeros(shape=(2,)))
])
layer_index = 1
planes = [
np.array([[-1.0, -3.0], [-0.5, -3.0], [-0.5, 9.0], [-1.0, 9.0]]),
np.array([[8.0, -2.0], [16.0, -2.0], [16.0, 6.0], [8.0, 6.0]]),
]
labels = [1, 0]
patcher = ProvableRepair.from_planes(network, layer_index, planes, labels)
assert patcher.network is network
assert patcher.layer_index is layer_index
assert len(patcher.inputs) == (4 + 2) + (4 + 2)
true_key_points = list(planes[0])
true_key_points += [np.array([-1.0, 0.0]), np.array([-0.5, 0.0])]
true_key_points += list(planes[1])
true_key_points += [np.array([8.0, 0.0]), np.array([16.0, 0.0])]
true_labels = ([1] * 6) + ([0] * 6)
for true_point, true_label in zip(true_key_points, true_labels):
try:
i = next(i for i, point in enumerate(patcher.inputs)
if np.allclose(point, true_point))
except StopIteration:
assert False
assert true_label == patcher.labels[i]
def test_from_spec():
"""Test case to load key points from a spec function.
"""
if not Network.has_connection():
pytest.skip("No server connected.")
network = Network([
ReluLayer(),
FullyConnectedLayer(np.eye(2), np.zeros(shape=(2,)))
])
layer_index = 1
region_of_interest = np.array([
[0.5, -3.0],
[1.0, -3.0],
[1.0, 9.0],
[0.5, 9.0],
])
spec_fn = lambda i: np.isclose(i[:, 0], 1.0).astype(np.float32)
patcher = ProvableRepair.from_spec_function(network, layer_index,
region_of_interest, spec_fn)
assert patcher.network is network
assert patcher.layer_index is layer_index
assert len(patcher.inputs) == (4 + 2)
true_key_points = list(region_of_interest)
true_key_points += [np.array([0.5, 0.0]), np.array([1.0, 0.0])]
true_labels = [0, 1, 1, 0, 0, 1]
for true_point, true_label in zip(true_key_points, true_labels):
try:
i = next(i for i, point in enumerate(patcher.inputs)
if np.allclose(point, true_point))
except StopIteration:
assert False
assert true_label == patcher.labels[i]
if IN_BAZEL:
main(__name__, __file__)
``` |
{
"source": "95616ARG/symbolic-abstraction",
"score": 3
} |
#### File: symbolic-abstraction/core/domain.py
```python
class ConjunctiveDomain:
"""Represents a conjunctive domain.
See Definition 3.5 in:
<NAME>. (2014, August). Symbolic Abstraction: Algorithms and
Applications (Ph.D. dissertation). Computer Sciences Department, University
of Wisconsin, Madison.
"""
def model(self, phi):
"""Returns a solution to logical formula phi.
The satisfying model is returned as a ConcreteElement, or None if the
model is unsatisfyable.
"""
raise NotImplementedError
def model_and(self, phi1, phi2):
"""Returns a solution to logical formula phi1 && phi2.
The satisfying model is returned as a ConcreteElement, or None if the
model is unsatisfyable.
This is used in Z3 domains to automatically use the iterative solver
whenever phi1 is the same between calls.
"""
return self.model(self.logic_and([phi1, phi2]))
def gamma_hat(self, alpha):
"""Translates abstract element alpha into a logical formula.
See Definition 3.10 in:
<NAME>. (2014, August). Symbolic Abstraction: Algorithms and
Applications (Ph.D. dissertation). Computer Sciences Department,
University of Wisconsin, Madison.
"""
raise NotImplementedError
def logic_and(self, formulas):
"""Returns the logical and of the given formulas.
"""
raise NotImplementedError
def logic_not(self, formula):
"""Returns the logical negation of the given formula.
"""
raise NotImplementedError
def join(self, elements):
"""Returns the least-upper-bound for elements.
Elements should be a list of AbstractElements. The existence of such an
upper bound is guaranteed by Definition 3.1 for a complete lattice.
"""
raise NotImplementedError
def meet(self, elements):
"""Returns the greatest-lower-bound for elements.
Elements should be a list of AbstractElements. The existence of such a
lower bound is guaranteed by Definition 3.1 for a complete lattice.
"""
raise NotImplementedError
def abstract_consequence(self, lower, upper):
"""Returns the "abstract consequence" of lower and upper.
The abstract consequence must be a superset of lower and *NOT* a
superset of upper.
"""
raise NotImplementedError
def beta(self, sigma):
"""Returns the least abstract state which describes sigma.
Sigma should be a ConcreteElement. See Definition 3.4 in:
<NAME>. (2014, August). Symbolic Abstraction: Algorithms and
Applications (Ph.D. dissertation). Computer Sciences Department,
University of Wisconsin, Madison.
"""
raise NotImplementedError
@property
def top(self):
"""Returns the least-upper-bound of the entire abstract space.
Guaranteed by Definition 3.1
"""
raise NotImplementedError
@property
def bottom(self):
"""Returns the greatest-lower-bound of the entire abstract space.
Guaranteed by Definition 3.1
"""
raise NotImplementedError
def translate(self, translation):
"""Rename variables in the abstract space definition.
Used in frontend/program.py to deal with "primes." We might encode x +=
y as x' = x + y, y' = y, but the user will give us a domain in terms of
x and y (which implicitly should refer to x' and y' at the end of the
program snippet). So we translate the abstract domain given by the user
in terms of x, y by the translation dictionary {"x": "x'", "y": "y'"}.
Note that we use AbstractState.translate(...) to translate back to the
user's preferred naming (x, y).
"""
raise NotImplementedError
```
#### File: domains/interval/domain.py
```python
import z3
from domains.z3_variables import Z3VariablesDomain
from .abstract import Interval, IntervalAbstractState
class IntervalDomain(Z3VariablesDomain):
"""Represents an abstract space over the intervals of variables.
"""
def __init__(self, variables):
"""Constructs a new IntervalDomain, with variables named in variables.
variables should be a list of variable names
"""
Z3VariablesDomain.__init__(self, variables, z3.Int)
def gamma_hat(self, alpha):
"""Returns a formula describing the same states as alpha
"""
conjunctions = []
for name in self.variables:
interval = alpha.interval_of(name)
if isinstance(interval.lower, int):
conjunctions.append(interval.lower <= self.z3_variable(name))
elif interval.lower == float("inf"):
conjunctions.append(False)
if isinstance(interval.upper, int):
conjunctions.append(self.z3_variable(name) <= interval.upper)
elif interval.upper == float("-inf"):
conjunctions.append(False)
return z3.And(*conjunctions)
def join(self, elements):
"""Returns the join of a set of abstract states.
join([ alpha_1, alpha_2, ..., alpha_n ]) is the smallest alpha
containing all alpha_1, ..., alpha_n. It may not be in elements.
"""
joined = self.bottom
for state in elements:
for name in self.variables:
joined_interval = joined.interval_of(name)
state_interval = state.interval_of(name)
union = joined_interval.union(state_interval)
joined.set_interval(name, union)
return joined
def meet(self, elements):
"""Returns the meet of a set of abstract states.
join([ alpha_1, alpha_2, ..., alpha_n ]) is the greatest alpha
contained by all alpha_1, ..., alpha_n. It may not be in elements.
"""
met = self.top
for state in elements:
for name in self.variables:
met_interval = met.interval_of(name)
state_interval = state.interval_of(name)
intersection = met_interval.intersection(state_interval)
met.set_interval(name, intersection)
return met
def abstract_consequence(self, lower, upper):
"""Returns the "abstract consequence" of lower and upper.
The abstract consequence must be a superset of lower and *NOT* a
superset of upper.
Note that this is a fairly "simple" abstract consequence, in that it
sets only one variable to a non-top interval. This improves performance
of the SMT solver in many cases. In certain cases, other choices for
the abstract consequence will lead to better algorithm performance.
"""
for variable in self.variables:
proposed = self.top.copy()
proposed.set_interval(variable, lower.interval_of(variable))
if not proposed >= upper:
return proposed
return lower.copy()
# Converts one concrete set of variables into an abstract element
def beta(self, sigma):
"""Returns the least abstract state describing sigma.
Sigma should be an Z3VariablesState. See Definition 3.4 in:
<NAME>. (2014, August). Symbolic Abstraction: Algorithms and
Applications (Ph.D. dissertation). Computer Sciences Department,
University of Wisconsin, Madison.
"""
return IntervalAbstractState(
dict({name: Interval(sigma.value_of(name), sigma.value_of(name))
for name in self.variables}))
@property
def top(self):
"""Returns the least upper bound of the entire abstract space.
"""
top_interval = Interval(float("-inf"), float("inf"))
return IntervalAbstractState(
dict({name: top_interval for name in self.variables}))
@property
def bottom(self):
"""Returns the greatest lower bound of the entire abstract space.
"""
bottom_interval = Interval(float("inf"), float("-inf"))
return IntervalAbstractState(
dict({name: bottom_interval for name in self.variables}))
```
#### File: domains/reduced_product/abstract.py
```python
from core.abstract import AbstractState
# TODO(masotoud): find a different naming scheme that makes this clearer, if
# possible.
# pylint: disable=invalid-name
class ReducedProductAbstractState(AbstractState):
"""Abstract state describing the signs of a collection of variables.
"""
def __init__(self, state_A, state_B):
"""Construct a new ReducedProductAbstractState.
@state_A should be an AbstractState in the first domain while @state_B
should be a state in the second domain. They should both describe the
same set of variables.
"""
self.state_A = state_A
self.state_B = state_B
def copy(self):
"""A new ReducedProductAbstractState representing the same state.
"""
return ReducedProductAbstractState(self.state_A.copy(),
self.state_B.copy())
def __le__(self, rhs):
"""True if self represents a subset of rhs.
Note that this definition means (not (a <= b)) does NOT imply a > b.
Perhaps we should raise an exception when elements are uncomparable.
This assumes that both have the exact same set of variables, and does
not check that condition.
"""
return self.state_A <= rhs.state_A and self.state_B <= rhs.state_B
def translate(self, translation):
"""Rename the variables in the abstract state.
"""
return ReducedProductAbstractState(self.state_A.translate(translation),
self.state_B.translate(translation))
def __str__(self):
"""Human-readable form of the abstract state.
"""
return str(self.state_A) + "\n" + str(self.state_B)
```
#### File: domains/reduced_product/domain.py
```python
import z3
from algorithms import bilateral
from domains.z3_variables import Z3VariablesDomain
from .abstract import ReducedProductAbstractState
# TODO(masotoud): find a different naming scheme that makes this clearer, if
# possible.
# pylint: disable=invalid-name
class ReducedProductDomain(Z3VariablesDomain):
"""Represents an abstract space combining information from two others.
For example, you may track both the interval and parity of a set of integer
variables and want to use information from one analysis to improve the
information in another.
"""
def __init__(self, variables, domain_A, domain_B):
"""Construct a ReducedProductDomain with given variables, sub-domains.
@domain_A, @domain_B should be instantiated Z3VariablesDomains with the
same variables as @variables.
"""
Z3VariablesDomain.__init__(self, variables, z3.Int)
self.domain_A = domain_A
self.domain_B = domain_B
def gamma_hat(self, alpha):
"""Returns a formula describing the same states as alpha.
"""
return z3.And(self.domain_A.gamma_hat(alpha.state_A),
self.domain_B.gamma_hat(alpha.state_B))
def join(self, elements):
"""Returns the join of a set of abstract states.
join([ alpha_1, alpha_2, ..., alpha_n ]) is the smallest alpha
containing all alpha_1, ..., alpha_n. It may not be in elements.
This method DOES reduce after joining.
"""
elements_A = [element.state_A for element in elements]
elements_B = [element.state_B for element in elements]
joined_A = self.domain_A.join(elements_A)
joined_B = self.domain_B.join(elements_B)
joined = ReducedProductAbstractState(joined_A, joined_B)
return self.reduce(joined)
def meet(self, elements):
"""Returns the meet of a set of abstract states.
join([ alpha_1, alpha_2, ..., alpha_n ]) is the greatest alpha
contained by all alpha_1, ..., alpha_n. It may not be in elements.
This method DOES NOT reduce after meeting.
TODO(masotoud): We do not reduce here because it can cause issues when
used inside of bilateral (essentially, we'll often meet with Top or
something close to Top, so we end up with infinite ascending chains
when we try to reduce). In the future we should clarify this, perhaps
having separate meet() and meet_reduce() operations, and making sure
join() has similar behavior.
"""
elements_A = [element.state_A for element in elements]
elements_B = [element.state_B for element in elements]
met_A = self.domain_A.meet(elements_A)
met_B = self.domain_B.meet(elements_B)
met = ReducedProductAbstractState(met_A, met_B)
return met
def abstract_consequence(self, lower, upper):
"""Returns the "abstract consequence" of lower and upper.
The abstract consequence must be a superset of lower and *NOT* a
superset of upper.
TODO(masotoud): ensure this is correct.
"""
consequence_A = self.domain_A.abstract_consequence(
lower.state_A, upper.state_A)
consequence_B = self.domain_B.abstract_consequence(
lower.state_B, upper.state_B)
return ReducedProductAbstractState(consequence_A, consequence_B)
# Converts one concrete set of variables into an abstract element
def beta(self, sigma):
"""Returns the least abstract state describing sigma.
Sigma should be an Z3VariablesState. See Definition 3.4 in:
<NAME>. (2014, August). Symbolic Abstraction: Algorithms and
Applications (Ph.D. dissertation). Computer Sciences Department,
University of Wisconsin, Madison.
"""
beta_A = self.domain_A.beta(sigma)
beta_B = self.domain_B.beta(sigma)
return ReducedProductAbstractState(beta_A, beta_B)
@property
def top(self):
"""Returns the least upper bound of the entire abstract space.
"""
top_A = self.domain_A.top
top_B = self.domain_B.top
return ReducedProductAbstractState(top_A, top_B)
@property
def bottom(self):
"""Returns the greatest lower bound of the entire abstract space.
"""
bottom_A = self.domain_A.bottom
bottom_B = self.domain_B.bottom
return ReducedProductAbstractState(bottom_A, bottom_B)
def reduce(self, alpha):
"""'Tightens' the consitutient states as much as possible.
For example, given ReducedProduct<Sign, Interval>, calling
Reduce(Positive, [-5, 5]) should give (Positive, [1, 5]).
"""
reduced_A = bilateral(self.domain_A, self.gamma_hat(alpha))
reduced_B = bilateral(self.domain_B, self.gamma_hat(alpha))
return ReducedProductAbstractState(reduced_A, reduced_B)
def translate(self, translation):
"""Returns a new domain with the variable names translated.
"""
variables = list(map(translation.__getitem__, self.variables))
domain_A = self.domain_A.translate(translation)
domain_B = self.domain_B.translate(translation)
return ReducedProductDomain(variables, domain_A, domain_B)
```
#### File: domains/z3_variables/domain.py
```python
import z3
from core import ConjunctiveDomain
from domains.z3_variables.concrete import Z3VariablesState
# pylint: disable=abstract-method
class Z3VariablesDomain(ConjunctiveDomain):
"""Represents an abstract space modelable by Z3.
"""
def __init__(self, variables, variable_type=z3.Int,
build_z3_variables=True):
"""Constructs a new Z3VariablesDomain, with variables @variables.
Arguments
=========
- @variables should be a list of variable names.
- @variable_type is the z3 variable type that should be used.
"""
self.variables = variables
self.variable_type = variable_type
if build_z3_variables:
self.z3_variables = dict((name, variable_type(name))
for name in self.variables)
else:
self.z3_variables = None
self.concrete_type = Z3VariablesState
self.iterative_solvers = {}
def z3_variable(self, name):
"""Returns the Z3 variable associated with name.
"""
return self.z3_variables[name]
def model(self, phi):
"""Returns a solution to phi.
If none exists (i.e. phi is unsatisfiable), None is returned.
"""
solver = z3.Solver()
solver.add(phi)
if solver.check() == z3.sat:
model = solver.model()
if self.variable_type == z3.Int:
solution = dict((d.name(), model.eval(d()).as_long())
for d in model.decls())
else:
solution = dict((d.name(), model.eval(d()).as_fraction())
for d in model.decls())
for name in self.variables:
if name not in solution:
solution[name] = 0
return Z3VariablesState(solution, self.variable_type)
return None
def model_and(self, phi1, phi2):
"""Returns a solution to phi and phi2.
If none exists (i.e. phi is unsatisfiable), None is returned. This
method will use the iterative solver as long as phi1 has been seen
before.
"""
if id(phi1) not in self.iterative_solvers:
self.iterative_solvers[id(phi1)] = z3.Solver()
self.iterative_solvers[id(phi1)].add(phi1)
solver = self.iterative_solvers[id(phi1)]
solver.push()
solver.add(phi2)
if solver.check() == z3.sat:
model = solver.model()
if self.variable_type == z3.Int:
solution = dict((d.name(), model.eval(d()).as_long())
for d in model.decls())
else:
solution = dict((d.name(), model.eval(d()).as_fraction())
for d in model.decls())
for name in self.variables:
if name not in solution:
solution[name] = 0
solver.pop()
return Z3VariablesState(solution, self.variable_type)
solver.pop()
return None
def logic_and(self, formulas):
"""Returns the logical and of the given formulas.
"""
return z3.And(formulas)
def logic_not(self, formula):
"""Returns the logical negation of the given formula.
"""
return z3.Not(formula)
def translate(self, translation):
return type(self)(list(map(translation.__getitem__, self.variables)))
```
#### File: tests/test_interval_domain/interval_abstract_test.py
```python
from domains.interval import Interval, IntervalAbstractState
import random
def test_single_interval_comparisons():
random_intervals = []
for _ in range(100):
# These bounds are chosen arbitrarily.
lower = random.randint(-100000, +100000)
upper = random.randint(lower, +100000)
random_intervals.append(Interval(lower, upper))
random_intervals.append(Interval(lower, float("inf")))
random_intervals.append(Interval(float("-inf"), upper))
# First, we test that Top is greater than everything else and Bottom is
# less than everything else.
top = Interval(float("-inf"), float("inf"))
bottom = Interval(float("inf"), float("-inf"))
assert bottom <= top
for interval in random_intervals:
assert bottom <= interval <= top
# Next, we test that nothing else is greater than Top or less than Bottom
for interval in random_intervals:
assert not interval >= top
assert not interval <= bottom
# Non-containing intervals should be incomparable.
assert not (Interval(5, 100) <= Interval(6, 101))
assert not (Interval(5, 100) >= Interval(6, 101))
def test_interval_state_creation_query():
state1 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 5),
"c": Interval(100, 200),
"d": Interval(6, float("inf")),
})
assert state1.interval_of("a") == Interval(-100, 50)
assert state1.interval_of("b") == Interval(float("-inf"), 5)
assert state1.interval_of("c") == Interval(100, 200)
assert state1.interval_of("d") == Interval(6, float("inf"))
def test_interval_state_creation_change_query():
state1 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 5),
"c": Interval(100, 200),
"d": Interval(6, float("inf")),
})
state1.set_interval("a", Interval(-99, 50))
assert state1.interval_of("a") == Interval(-99, 50)
assert state1.interval_of("b") == Interval(float("-inf"), 5)
assert state1.interval_of("c") == Interval(100, 200)
assert state1.interval_of("d") == Interval(6, float("inf"))
def test_interval_state_equality():
state1 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 5),
"c": Interval(100, 200),
"d": Interval(6, float("inf")),
})
state2 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 5),
"c": Interval(100, 200),
"d": Interval(6, float("inf")),
})
assert state1 == state2
state2.set_interval("a", Interval(-99, 50))
assert state1 != state2
def test_interval_state_ineq():
state1 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 5),
"c": Interval(100, 200),
"d": Interval(float("inf"), float("-inf")),
})
state2 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), float("inf")),
"c": Interval(100, 201),
"d": Interval(6, float("inf")),
})
state3 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 4),
"c": Interval(100, 201),
"d": Interval(7, float("inf")),
})
assert state1 <= state2
assert not (state2 <= state1)
assert not (state1 <= state3)
assert not (state3 <= state1)
assert not (state2 <= state3)
assert state3 <= state2
assert state2 >= state1
assert not (state1 >= state2)
assert not (state3 >= state1)
assert not (state1 >= state3)
assert not (state3 >= state2)
assert state2 >= state3
```
#### File: tests/test_sign_domain/sign_concrete_test.py
```python
from domains.z3_variables import Z3VariablesState
def test_sign_concrete_state_creation_query():
state1 = Z3VariablesState({
"a": 1,
"b": -1000,
"c": 50,
"d": 0
})
assert state1.value_of("a") == 1
assert state1.value_of("b") == -1000
assert state1.value_of("c") == 50
assert state1.value_of("d") == 0
def test_sign_concrete_repr():
state1 = Z3VariablesState({
"a": 1,
"b": -1000,
"c": 50,
"d": 0
})
string_repr = repr(state1)
for name, value in state1.variable_values.items():
assert f"{name}: {value}" in string_repr
assert len(string_repr.split(",")) == 4
```
#### File: tests/test_sign_domain/sign_domain_join_test.py
```python
from domains.sign import SignDomain
from domains.sign import Sign, SignAbstractState
def test_join_bottom_top():
domain = SignDomain([ "a", "b", "c" ])
joined = domain.join([ domain.bottom, domain.top ])
assert joined == domain.top
def test_join_three_states():
domain = SignDomain([ "a", "b", "c" ])
state1 = SignAbstractState({ "a": Sign.Positive, "b": Sign.Negative, "c": Sign.Bottom })
state2 = SignAbstractState({ "a": Sign.Positive, "b": Sign.Positive, "c": Sign.Negative })
state3 = SignAbstractState({ "a": Sign.Positive, "b": Sign.Positive, "c": Sign.Negative })
joined = domain.join([ state1, state2, state3 ])
assert joined.sign_of("a") == Sign.Positive
assert joined.sign_of("b") == Sign.Top
assert joined.sign_of("c") == Sign.Negative
def test_meet_three_states():
domain = SignDomain([ "a", "b", "c" ])
state1 = SignAbstractState({ "a": Sign.Positive, "b": Sign.Negative, "c": Sign.Top })
state2 = SignAbstractState({ "a": Sign.Positive, "b": Sign.Positive, "c": Sign.Top })
state3 = SignAbstractState({ "a": Sign.Positive, "b": Sign.Positive, "c": Sign.Top })
met = domain.meet([ state1, state2, state3 ])
assert met.sign_of("a") == Sign.Positive
assert met.sign_of("b") == Sign.Bottom
assert met.sign_of("c") == Sign.Top
def test_abstract_consequence_low_best():
domain = SignDomain([ "a", "b", "c" ])
lower = SignAbstractState({ "a": Sign.Positive, "b": Sign.Top, "c": Sign.Top })
upper = SignAbstractState({ "a": Sign.Positive, "b": Sign.Top, "c": Sign.Top })
consequence = domain.abstract_consequence(lower, upper)
assert consequence == lower
``` |
{
"source": "95616ARG/SyReNN",
"score": 4
} |
#### File: SyReNN/experiments/polar_image.py
```python
import numpy as np
import tqdm
class PolarImage:
"""Polar coordinates plotter.
"""
def __init__(self, plot_shape, png_shape, plot_origin=None, silent=False):
"""Initalizes a new PolarImage with the given shapes.
@plot_shape should be the (Cartesian) bounds on the data to be plotted.
@png_shape should be the shape of the PNG in (height, width)-pixels.
@plot_origin should be (Cartesian) coordinate to place at the center of
the PNG image. If None (the default), the center of the plot region
(@plot_shape) will be used.
"""
self.image = np.full(tuple(png_shape) + (3,), 255).astype(np.uint8)
self.plot_shape = plot_shape
self.png_shape = png_shape
if plot_origin is None:
plot_origin = [self.plot_shape[0] / 2, self.plot_shape[1] / 2]
self.plot_origin = plot_origin
self.silent = silent
def place_rgba(self, image, png_center):
"""Places an RGBA image @image on the plot centered at @png_center.
"""
png_center_y, png_center_x = png_center
image_height, image_width = image.shape[:2]
png_start_y = png_center_y - (image_height // 2)
png_start_x = png_center_x - (image_width // 2)
alphas = np.expand_dims(image[:, :, 3], 3).astype(np.float32) / 255.0
existing = self.image[png_start_y:(png_start_y + image_height),
png_start_x:(png_start_x + image_width)]
new = ((1.0 - alphas) * existing) + (alphas * image[:, :, :3])
self.image[png_start_y:(png_start_y + image_height),
png_start_x:(png_start_x + image_width)] = new
@staticmethod
def hex_to_int(hex_color):
"""Converts a hex color to an integer array.
Only supports strings of the form "#123456"
"""
hex_color = hex_color.replace("#", "")
red = int(hex_color[:2], 16)
green = int(hex_color[2:4], 16)
blue = int(hex_color[4:6], 16)
return [red, green, blue]
def plot_to_png(self, plot_y, plot_x):
"""Converts a plot-coordinate to a png-coordinate (pixel).
Returns (png_y, png_x) with the idea that self.image[png_y, png_x] is
the pixel corresponding to Cartesian point (plot_x, plot_y).
NOTE: The inputs are (plot_y, plot_x), not (plot_x, plot_y).
NOTE: The return value is a float; the caller should decide how to
round to the nearest pixel.
NOTE: Plot y-coordinates are ordered bottom-to-top while PNG
y-coordinates are ordered top-to-bottom!
"""
plot_height, plot_width = self.plot_shape
png_height, png_width = self.png_shape
plot_y += self.plot_origin[0]
plot_x += self.plot_origin[1]
png_y = plot_y * (png_height / plot_height)
png_y = (png_height - 1) - png_y
png_x = plot_x * (png_width / plot_width)
return (png_y, png_x)
def png_to_plot(self, png_y, png_x):
"""Converts a png-coordinate (pixel) to a plot-coordinate.
This is the inverse of @plot_to_png.
NOTE: Plot y-coordinates are ordered bottom-to-top while PNG
y-coordinates are ordered top-to-bottom!
"""
plot_height, plot_width = self.plot_shape
png_height, png_width = self.png_shape
plot_y = (png_height - 1) - png_y
plot_y = plot_y * (plot_height / png_height)
plot_x = png_x * (plot_width / png_width)
return (plot_y - self.plot_origin[0], plot_x - self.plot_origin[1])
@staticmethod
def max_cosine(start, end):
"""Returns max(cos(theta)) for -pi <= start <= theta <= end <= pi.
"""
assert start <= end
if start <= 0.0 <= end:
return +1
return max(np.cos(start), np.cos(end))
@staticmethod
def min_cosine(start, end):
"""Returns min(cos(theta)) for -pi <= start <= theta <= end <= pi.
"""
return min(np.cos(start), np.cos(end))
@staticmethod
def max_sine(start, end):
"""Returns max(sin(theta)) for -pi <= start <= theta <= end <= pi.
"""
if start <= (np.pi / 2.0) <= end:
return +1.0
return max(np.sin(start), np.sin(end))
@staticmethod
def min_sine(start, end):
"""Returns min(sin(theta)) for -pi <= start <= theta <= end <= pi.
"""
assert start <= end
if start <= (-np.pi / 2.0) <= end:
return -1.0
return min(np.sin(start), np.sin(end))
@classmethod
def polar_cartesian_box(cls, vertices):
"""Given a *polar*-polytope, return a *Cartesian* box containing it.
Our main algorithm (below) basically guess-and-checks each pixel, so we
use this to limit the region of guessing.
The basic idea is to decompose the problem into finding Cartesian boxes
around each of the edges, then joining them together.
"""
if len(vertices) > 2:
global_x_box = [np.Infinity, -np.Infinity]
global_y_box = [np.Infinity, -np.Infinity]
# Decompose into edges and join back up.
for start, end in zip(vertices[:-1], vertices[1:]):
x_box, y_box = cls.polar_cartesian_box([start, end])
global_x_box[0] = min(x_box[0], global_x_box[0])
global_y_box[0] = min(y_box[0], global_y_box[0])
global_x_box[1] = max(x_box[1], global_x_box[1])
global_y_box[1] = max(y_box[1], global_y_box[1])
return global_x_box, global_y_box
# This is just a line segment. We effectively find the min/max rho and
# min/max cos/sin on the segment. Multiplying the two gives us safe
# upper/lower bound.
start, end = vertices
start_rho, start_theta = start
end_rho, end_theta = end
min_rho, max_rho = sorted((start_rho, end_rho))
min_theta, max_theta = sorted((start_theta, end_theta))
max_cos = cls.max_cosine(min_theta, max_theta)
min_cos = cls.min_cosine(min_theta, max_theta)
max_sin = cls.max_sine(min_theta, max_theta)
min_sin = cls.min_sine(min_theta, max_theta)
return np.array([
[min(np.floor((min_rho * min_cos, max_rho * min_cos))),
max(np.ceil((min_rho * max_cos, max_rho * max_cos)))],
[min(np.floor((min_rho * min_sin, max_rho * min_sin))),
max(np.ceil((min_rho * max_sin, max_rho * max_sin)))]])
@staticmethod
def polygon_contains(polygon, point):
"""True if @point is inside of @polygon.
NOTE: This uses code from softSurfer, see below this class for
reference. @polygon should be in V-representation (i.e., a Numpy array
of counter-clockwise vertices).
"""
return polyline_contains(polygon, point)
def plot_polygon(self, box, polygon, color):
"""Plots a @polygon given its Cartesian @box, and corresponding @color.
@box should be computed with .polar_cartesian_box(@polygon).
@polygon should be a Numpy array of counter-clockwise vertices
(V-representation polytope) describing a polygon in Polar
space. Note that I think _technically_ any polygon would work, but
I haven't tested it.
@color should be a string hex color to plot, compatible with
.hex_to_int.
"""
x_box, y_box = box
png_y_start, png_x_start = self.plot_to_png(y_box[0], x_box[0])
png_y_start, png_x_start = int(png_y_start), int(png_x_start)
png_y_end, png_x_end = self.plot_to_png(y_box[1], x_box[1])
png_y_end, png_x_end = int(np.ceil(png_y_end)), int(np.ceil(png_x_end))
# These get inverted when we switch to PNG.
png_y_start, png_y_end = sorted((png_y_start, png_y_end))
png_y_start = max(png_y_start - 1, 0)
png_y_end = min(png_y_end + 1, self.image.shape[0])
png_x_start = max(png_x_start - 1, 0)
png_x_end = min(png_x_end + 1, self.image.shape[1])
color = self.hex_to_int(color)
for png_y in range(png_y_start, png_y_end):
if np.array_equiv(self.image[png_y, png_x_start:png_x_end, :],
color):
continue
for png_x in range(png_x_start, png_x_end):
plot_y, plot_x = self.png_to_plot(png_y, png_x)
rho = np.linalg.norm((plot_x, plot_y))
theta = np.arctan2(plot_y, plot_x)
if self.polygon_contains(polygon, [rho, theta]):
self.image[png_y, png_x, :] = color
def window_plot(self, polygons, colors, n_splits):
"""Plots @polygons when possible by quantizing the space.
The basic idea is that, in many plots, there exist Cartesian boxes
("windows") that contain many polygons and all of the same color. This
method plots (a subset of) those boxes by:
1. Slide a window over the PNG image; for each window:
1a. Find (a superset of) all polygons that overlap with the window,
along with their corresponding colors.
1b. If they all have the same color, plot the window as that color
and continue to 1c. Otherwise, go on to the next window.
1c. Find (a subset of) all polygons that lie entirely within the
window, and delete them and their corresponding colors from
@polygons and @colors.
The super/subset parenthesized remarks above refer to the fact that we
over-approximate each polytope with a box to make the steps feasible.
However, when including the super/subset, everything still holds
correctly.
We use tuples of (start, end) for intervals and tuples of (y_interval,
x_interval) for boxes.
"""
def interval_overlaps(interval1, interval2):
"""True if interval1 has an intersection with interval2.
"""
return not (interval1[1] < interval2[0] or
interval2[1] < interval1[0])
def box_overlaps(box1, box2):
"""True if box1 has an overlap with box2.
"""
return (interval_overlaps(box1[0], box2[0]) and
interval_overlaps(box1[1], box2[1]))
def interval_contains(big, small):
"""True if interval "big" entirely contains interval "small."
"""
return big[0] <= small[0] and small[1] <= big[1]
def box_contains(big, small):
"""True if box "big" entirely contains box "small."
"""
return (interval_contains(big[0], small[0]) and
interval_contains(big[1], small[1]))
# Precompute the boxes of all @polygons; after this, we won't use
# @polygons at all, just the boxes.
boxes = list(map(self.polar_cartesian_box, polygons))
y_step = self.image.shape[0] // n_splits
x_step = self.image.shape[1] // n_splits
for y_start in range(0, self.image.shape[0], y_step):
for x_start in range(0, self.image.shape[0], x_step):
y_min, x_min = self.png_to_plot(y_start, x_start)
y_end = min(y_start + y_step, self.image.shape[0])
x_end = min(x_start + x_step, self.image.shape[1])
y_max, x_max = self.png_to_plot(y_end, x_end)
# Note that png_to_plot flips the order of y_coordinates
# (because PNG pixels are indexed top-to-bottom instead of
# bottom-to-top).
window_box = [(x_min, x_max), sorted((y_min, y_max))]
# (1a) Find all overlapping polytopes.
overlapping_i = [i for i, box in enumerate(boxes)
if box_overlaps(window_box, box)]
window_colors = set(colors[i] for i in overlapping_i)
if len(window_colors) != 1:
# (1b) Multiple polytopes (possibly) in this window have
# different colors --- we can't safely plot this window in
# a single color.
continue
# (1b) They all have the same color, so plot it!
window_color = self.hex_to_int(next(iter(window_colors)))
self.image[y_start:y_end, x_start:x_end, :] = window_color
# (1c) Remove entirely-contained polytopes.
contained_i = [i for i in overlapping_i
if box_contains(window_box, boxes[i])]
for i in contained_i[::-1]:
del polygons[i]
del boxes[i]
del colors[i]
def circle_frame(self, rho, color):
"""Plots a circular frame around plot-(0, 0) with min-radius @rho.
The circular frame includes the @rho-circle about the origin and all
circles with radius greater that @rho.
We use this because .window_plot will "color outside the lines," so we
need to clean its mess up before getting a final plot.
The implementation essentially looks for the intersection points in
plot-coordinates then converts them to png-coordinates and corrects
them.
NOTE: This method is untested and probably shouldn't be used in cases
where the @rho-circle extends beyond the bounds of the plot.
"""
for png_y in range(self.image.shape[0]):
plot_y, _ = self.png_to_plot(png_y, 0)
# Now we need to find where the rho-circle intersects with y =
# plot_y. We have y = rho*sin(theta), so the intersection is
# theta = arcsin(y / rho).
theta = np.arcsin(plot_y / rho)
plot_x_intersection = rho * np.abs(np.cos(theta))
_, png_x_intersection = self.plot_to_png(0, -plot_x_intersection)
png_x_intersection = int(png_x_intersection)
self.image[png_y, 0:png_x_intersection, :] = self.hex_to_int(color)
_, png_x_intersection = self.plot_to_png(0, +plot_x_intersection)
png_x_intersection = int(png_x_intersection)
self.image[png_y, png_x_intersection:, :] = self.hex_to_int(color)
def plot_polygons(self, polygons, colors, plot_windows=0):
"""Plots a set of polar polygons on the image.
@polygons should be a list of Numpy arrays, each one a set of vertices
for a particular polygon with shape (n_vertices, 2={rho, theta})
@colors should be a list of string hex colors (prepended with "#")
associated with each polygon in @polygons.
@plot_windows controls whether a first-pass plotting is performed. If
@plot_windows <= 0, no first-pass plotting is performed. If
@plot_windows > 0, then @plot_windows**2 windows will be used for
the first-pass plotting. Note that, while this can significantly
speed up the plotting, it may color pixels outside of @polygons, so
two considerations should be made:
1. @plot_windows should *NOT* be used if you intend to call
@plot_polygons multiple times.
2. Points not covered by @polygons may be colored in when
@plot_windows is used; if necessary, you should manually clear
such points using, eg., .circle_frame if @polygons form a
circle about 0.0 (i.e., partition a rectangle in polar-space).
"""
if plot_windows > 0:
# window_plot modifies polygons/colors, lets make sure "the buck
# stops here" (so callers don't have to worry about it).
polygons = polygons.copy()
colors = colors.copy()
self.window_plot(polygons, colors, plot_windows)
boxes = list(map(self.polar_cartesian_box, polygons))
box_sizes = [(box[0][1] - box[0][0])*(box[1][1] - box[1][0])
for box in boxes]
ordered = sorted(range(len(boxes)), key=lambda i: box_sizes[i],
reverse=True)
for i in tqdm.tqdm(ordered, disable=self.silent):
self.plot_polygon(boxes[i], polygons[i], colors[i])
# Copyright 2001, softSurfer (www.softsurfer.com)
# This code may be freely used and modified for any purpose
# providing that this copyright notice is included with it.
# SoftSurfer makes no warranty for this code, and cannot be held
# liable for any real or imagined damage resulting from its use.
# Users of this code must verify correctness for their application.
# Translated to Python by <NAME> <<EMAIL>>.
# https://www.dgp.toronto.edu/~mac/e-stuff/point_in_polygon.py
# is_left(): tests if a point is Left|On|Right of an infinite line.
# Input: three points P0, P1, and P2
# Return: >0 for P2 left of the line through P0 and P1
# =0 for P2 on the line
# <0 for P2 right of the line
# See: the January 2001 Algorithm "Area of 2D and 3D Triangles and Polygons"
def is_left(P0, P1, P2):
return (P1[0] - P0[0]) * (P2[1] - P0[1]) - (P2[0] - P0[0]) * (P1[1] - P0[1])
# wn_PnPoly(): winding number test for a point in a polygon
# Input: P = a point,
# V[] = vertex points of a polygon
# Return: wn = the winding number (=0 only if P is outside V[])
def polyline_contains(V, P):
wn = 0 # the winding number counter
# repeat the first vertex at end
V = tuple(V[:]) + (V[0],)
# loop through all edges of the polygon
for i in range(len(V)-1): # edge from V[i] to V[i+1]
if V[i][1] <= P[1]: # start y <= P[1]
if V[i+1][1] > P[1]: # an upward crossing
if is_left(V[i], V[i+1], P) > 0: # P left of edge
wn += 1 # have a valid up intersect
else: # start y > P[1] (no test needed)
if V[i+1][1] < P[1]: # a downward crossing
if is_left(V[i], V[i+1], P) < 0: # P right of edge
wn -= 1 # have a valid down intersect
return wn != 0
```
#### File: SyReNN/models/translate_acas_model.py
```python
import numpy as np
import sys
from_file = sys.argv[1]
values = []
with open(from_file) as nnet:
for line in nnet.readlines():
if line.startswith("//"):
continue
values.extend(line.split(",")[:-1])
assert len(values) == (4 + 8 + 1 + (2*5) +
(2*6) +
(5 * 50) + 50 +
(5 * ((50 * 50) + 50)) +
(50 * 5) + 5)
values = values[(4 + 8 + 1 + (2*5) + (2*6)):]
def read_values(number):
results = values[:number]
assert len(results) == number
del values[:number]
return results
n_inputs = [5, 50, 50, 50, 50, 50, 50]
layers = [(read_values(5 * 50), read_values(50))]
for inner_layer in range(5):
layers.append((read_values(50 * 50), read_values(50)))
layers.append((read_values(50 * 5), read_values(5)))
assert len(values) == 0
to_file = from_file.replace(".nnet", ".eran")
to_file = to_file.replace("ACASXU_run2a_", "")
to_file = to_file.replace("_batch_2000", "")
assert to_file != from_file
with open(to_file, "w") as to_file:
for i, layer in enumerate(layers):
inputs = n_inputs[i]
weights, biases = layer
to_file.write("ReLU\n")
weight_str = "["
for i, weight in enumerate(weights):
if i % inputs == 0 and weight_str != "[":
weight_str = weight_str[:-2]
weight_str += "], ["
weight_str += ("%s, " % weight)
weight_str = "[" + weight_str[:-2] + "]]\n"
to_file.write(weight_str)
bias_str = "["
for bias in biases:
bias_str += ("%s, " % bias)
bias_str += "]\n"
to_file.write(bias_str)
```
#### File: SyReNN/pip_info/setup.py
```python
import codecs
import os
import re
from setuptools import setup, find_packages
###################################################################
NAME = "pysyrenn"
PACKAGES = [
"syrenn_proto",
"pysyrenn",
"pysyrenn.frontend",
"pysyrenn.helpers",
]
META_PATH = "__metadata__.py"
KEYWORDS = ["class", "attribute", "boilerplate"]
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
INSTALL_REQUIRES = ["torch"]
with open("requirements.txt") as requirements:
reading = False
for line in requirements.readlines():
if line.startswith("# PYSYRENN"):
reading = True
elif line.startswith("# END"):
reading = False
elif line.startswith("#"):
pass
elif reading:
INSTALL_REQUIRES.append(line.strip().split("==")[0])
###################################################################
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=find_meta("uri"),
version=find_meta("version"),
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
keywords=KEYWORDS,
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=PACKAGES,
package_dir={"": "."},
package_data={"": ["pysyrenn/**/*.py"]},
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
)
```
#### File: pysyrenn/frontend/fullyconnected_layer.py
```python
import numpy as np
import torch
from pysyrenn.frontend.layer import NetworkLayer
import syrenn_proto.syrenn_pb2 as transformer_pb
class FullyConnectedLayer(NetworkLayer):
"""Represents a fully-connected (arbitrary affine) layer in a network.
"""
def __init__(self, weights, biases):
"""Constructs a new FullyConnectedLayer.
"""
if weights is not None:
self.weights = torch.tensor(weights, dtype=torch.float32)
if biases is not None:
self.biases = torch.tensor(biases, dtype=torch.float32)
def compute(self, inputs, jacobian=False):
"""Returns the output of the layer on @inputs.
If @jacobian=True, it only computes the homogeneous portion (i.e., does
not add biases). This can be used to compute the product of the
Jacobian of the layer with its inputs.
"""
is_np = isinstance(inputs, np.ndarray)
if is_np:
inputs = torch.tensor(inputs, dtype=torch.float32)
output = torch.mm(inputs, self.weights)
if not jacobian:
output += self.biases
if is_np:
return output.numpy()
return output
def serialize(self):
"""Serializes the layer for use with the transformer server.
"""
serialized = transformer_pb.Layer()
serialized.fullyconnected_data.weights.extend(
list(self.weights.numpy().flatten()))
serialized.fullyconnected_data.biases.extend(
list(self.biases.numpy().flatten()))
return serialized
@classmethod
def deserialize(cls, serialized):
"""Deserializes from the Protobuf format.
"""
if serialized.WhichOneof("layer_data") == "fullyconnected_data":
weights = np.array(serialized.fullyconnected_data.weights)
biases = np.array(serialized.fullyconnected_data.biases)
weights = weights.reshape((-1, len(biases)))
return cls(weights, biases)
return None
```
#### File: pysyrenn/frontend/normalize_layer.py
```python
import numpy as np
import torch
from pysyrenn.frontend.layer import NetworkLayer
import syrenn_proto.syrenn_pb2 as transformer_pb
class NormalizeLayer(NetworkLayer):
"""Represents a normalization layer in a network.
"""
def __init__(self, means, standard_deviations):
"""Constructs a new NormalizeLayer.
"""
self.means = torch.tensor(means, dtype=torch.float32)
self.standard_deviations = torch.tensor(standard_deviations, dtype=torch.float32)
def compute(self, inputs):
"""Returns the normalized form of @inputs.
"""
is_np = isinstance(inputs, np.ndarray)
if is_np:
inputs = torch.tensor(inputs, dtype=torch.float32)
# Here we assume channels-last ordering, as in ERAN
n_inputs = inputs.shape[0]
inputs = inputs.reshape((-1, len(self.means)))
outputs = (inputs - self.means) / self.standard_deviations
outputs = outputs.reshape((n_inputs, -1))
if is_np:
return outputs.numpy()
return outputs
def serialize(self):
"""Serializes the layer for use with the transformer server.
"""
serialized = transformer_pb.Layer()
serialized.normalize_data.means.extend(
list(self.means.numpy().flatten()))
serialized.normalize_data.standard_deviations.extend(
list(self.standard_deviations.numpy().flatten()))
return serialized
@classmethod
def deserialize(cls, serialized):
"""Deserializes the layer from the Protobuf format.
"""
if serialized.WhichOneof("layer_data") == "normalize_data":
means = np.array(serialized.normalize_data.means)
stds = np.array(serialized.normalize_data.standard_deviations)
return cls(means, stds)
return None
```
#### File: frontend/tests/conv2d_layer.py
```python
import numpy as np
import torch
from external.bazel_python.pytest_helper import main
from pysyrenn.frontend.strided_window_data import StridedWindowData
from pysyrenn.frontend.conv2d_layer import Conv2DLayer
def test_compute():
"""Tests that the Conv2D layer correctly computes a Conv2D.
"""
batch = 101
width = 32
height = 32
channels = 3
stride = (2, 2)
pad = (0, 0)
filter_height = 4
filter_width = 4
out_channels = 5
inputs = np.random.uniform(size=(101, height * width * channels))
# TODO(masotoud): use actual numbers for the filters and actually compute
# true_outputs.
filters = np.zeros(shape=(filter_height, filter_width, channels, out_channels))
biases = np.ones(shape=(out_channels))
# out height/width = (32 - 2) / 2 = 15
true_outputs = np.ones(shape=(batch, 15 * 15 * out_channels))
window_data = StridedWindowData((height, width, channels),
(filter_height, filter_width),
stride, pad, out_channels)
conv2d_layer = Conv2DLayer(window_data, filters, biases)
assert np.allclose(conv2d_layer.compute(inputs), true_outputs)
assert np.allclose(conv2d_layer.compute(inputs, jacobian=True),
np.zeros_like(true_outputs))
torch_inputs = torch.FloatTensor(inputs)
torch_outputs = conv2d_layer.compute(torch_inputs).numpy()
assert np.allclose(torch_outputs, true_outputs)
def test_serialize():
"""Tests Conv2D.{serialize, deserialize}.py.
"""
height, width, channels, out_channels = np.random.choice(
[8, 16, 32, 64, 128], size=4)
window_height, window_width = np.random.choice([2, 4, 8], size=2)
pad = (0, 0)
window_data = StridedWindowData((height, width, channels),
(window_height, window_width),
(window_height, window_width),
pad, out_channels)
filters = np.random.uniform(size=(window_height, window_width,
channels, out_channels))
biases = np.random.uniform(size=(out_channels))
serialized = Conv2DLayer(window_data, filters, biases).serialize()
assert serialized.WhichOneof("layer_data") == "conv2d_data"
serialized_window_data = serialized.conv2d_data.window_data
assert serialized_window_data.in_height == height
assert serialized_window_data.in_width == width
assert serialized_window_data.in_channels == channels
assert serialized_window_data.window_height == window_height
assert serialized_window_data.window_width == window_width
assert serialized_window_data.stride_height == window_height
assert serialized_window_data.stride_width == window_width
assert serialized_window_data.pad_height == 0
assert serialized_window_data.pad_width == 0
assert serialized_window_data.out_channels == out_channels
serialized_filters = np.array(serialized.conv2d_data.filters)
assert np.allclose(serialized_filters.flatten(), filters.flatten())
serialized_biases = np.array(serialized.conv2d_data.biases)
assert np.allclose(serialized_biases.flatten(), biases.flatten())
deserialized = Conv2DLayer.deserialize(serialized)
assert deserialized.serialize() == serialized
serialized.relu_data.SetInParent()
assert Conv2DLayer.deserialize(serialized) is None
main(__name__, __file__)
```
#### File: frontend/tests/network.py
```python
import numpy as np
import torch
import pytest
from external.bazel_python.pytest_helper import main
from pysyrenn.frontend.network import Network
from pysyrenn.frontend.conv2d_layer import Conv2DLayer
from pysyrenn.frontend.fullyconnected_layer import FullyConnectedLayer
from pysyrenn.frontend.relu_layer import ReluLayer
from pysyrenn.frontend.hard_tanh_layer import HardTanhLayer
from pysyrenn.frontend.normalize_layer import NormalizeLayer
from pysyrenn.frontend.argmax_layer import ArgMaxLayer
def test_compute_and_gradients():
"""Tests the Network's compute and compute_gradients methods.
"""
batch = np.random.randint(1, 128)
input_dims = np.random.randint(1, 256)
output_dims = np.random.randint(1, 512)
inputs = np.random.uniform(size=(batch, input_dims))
weights = np.random.uniform(size=(input_dims, output_dims))
biases = np.random.uniform(size=(output_dims))
fullyconnected_layer = FullyConnectedLayer(weights, biases)
relu_layer = ReluLayer()
fullyconnected_outputs = fullyconnected_layer.compute(inputs)
relu_outputs = relu_layer.compute(fullyconnected_outputs)
network = Network([fullyconnected_layer, relu_layer])
network_outputs = network.compute(inputs)
assert np.allclose(network_outputs, relu_outputs)
assert np.allclose(network_outputs, network.compute(list(inputs)))
assert np.allclose(network_outputs[0], network.compute(list(inputs)[0]))
for label in range(output_dims):
gradients = network.compute_gradients(inputs, label)
for i in range(batch):
if fullyconnected_outputs[i, label] <= 0.0:
assert np.allclose(gradients[i], 0.0)
else:
assert np.allclose(gradients[i], weights[:, label])
def test_serialize():
"""Tests the Network's serialize and deserialize methods.
"""
input_dims = np.random.randint(1, 32)
output_dims = np.random.randint(1, 64)
weights = np.random.uniform(size=(input_dims, output_dims))
biases = np.random.uniform(size=(output_dims))
fullyconnected_layer = FullyConnectedLayer(weights, biases)
relu_layer = ReluLayer()
network = Network([fullyconnected_layer, relu_layer])
serialized = network.serialize()
assert len(serialized.layers) == 2
assert serialized.layers[0] == fullyconnected_layer.serialize()
assert serialized.layers[1] == relu_layer.serialize()
deserialized = Network.deserialize(serialized)
assert deserialized.serialize() == serialized
def test_exactlines():
import pysyrenn.frontend.transformer_client
transform_lines_ = pysyrenn.frontend.transformer_client.transform_lines
input_dims = np.random.randint(1, 32)
output_dims = np.random.randint(1, 64)
weights = np.random.uniform(size=(input_dims, output_dims))
biases = np.random.uniform(size=(output_dims))
fullyconnected_layer = FullyConnectedLayer(weights, biases)
relu_layer = ReluLayer()
network = Network([fullyconnected_layer, relu_layer])
lines = list(np.random.uniform(size=(100, 2, input_dims)))
def transform_lines_mock(query_network, query_lines,
query_include_post=False):
assert query_network.serialize() == network.serialize()
if len(query_lines) == 1:
assert np.allclose(query_lines, lines[:1])
else:
assert np.allclose(query_lines, lines)
output_lines = []
for i, line in enumerate(query_lines):
output_lines.append((np.array([0.0, 1.0 / float(i + 1), 1.0]),
np.array([float(2.0 * i)])))
return output_lines
pysyrenn.frontend.transformer_client.transform_lines = transform_lines_mock
ratios = network.exactlines(lines, compute_preimages=False,
include_post=False)
assert np.allclose(ratios, np.array([[0.0, 1.0 / float(i + 1), 1.0]
for i in range(100)]))
ratio = network.exactline(*lines[0], compute_preimages=False,
include_post=False)
assert np.allclose(ratio, ratios[0])
def interpolate(line_i, ratio):
start, end = lines[line_i]
return start + (ratio * (end - start))
preimages = network.exactlines(lines, compute_preimages=True,
include_post=False)
assert np.allclose(preimages, np.array([[interpolate(i, 0.0),
interpolate(i, 1.0 / float(i + 1)),
interpolate(i, 1.0)]
for i in range(100)]))
preimage = network.exactline(*lines[0], compute_preimages=True,
include_post=False)
assert np.allclose(preimage, preimages[0])
transformed = network.exactlines(lines, compute_preimages=True,
include_post=True)
pre, post = zip(*transformed)
assert np.allclose(pre, np.array([[interpolate(i, 0.0),
interpolate(i, 1.0 / float(i + 1)),
interpolate(i, 1.0)]
for i in range(100)]))
assert np.allclose(post, np.array([[float(2.0 * i)] for i in range(100)]))
transformed_single = network.exactline(*lines[0], compute_preimages=True,
include_post=True)
assert np.allclose(transformed_single[0], transformed[0][0])
assert np.allclose(transformed_single[1], transformed[0][1])
def test_fcn_from_eran():
"""Tests loading a fully-connected Network from ERAN format.
"""
path = "fcn_test.eran"
with open(path, "w") as netfile:
netfile.write("ReLU\n[[-1, 2, -3], [-4, 5, -6]]\n[7, 8]\n")
netfile.write("Normalize mean=[1, 2] std=[3, 4]\n")
netfile.write("HardTanh\n[[-8, 7, -6], [-5, 4, -3]]\n[2, 1]\n")
network = Network.from_file(path)
assert len(network.layers) == 5
assert isinstance(network.layers[0], FullyConnectedLayer)
assert np.allclose(network.layers[0].weights,
np.array([[-1, -4], [2, 5], [-3, -6]]))
assert np.allclose(network.layers[0].biases, np.array([[7, 8]]))
assert isinstance(network.layers[1], ReluLayer)
assert isinstance(network.layers[2], NormalizeLayer)
assert np.allclose(network.layers[2].means, np.array([[1, 2]]))
assert np.allclose(network.layers[2].standard_deviations,
np.array([[3, 4]]))
assert isinstance(network.layers[3], FullyConnectedLayer)
assert np.allclose(network.layers[3].weights,
np.array([[-8, -5], [7, 4], [-6, -3]]))
assert np.allclose(network.layers[3].biases, np.array([[2, 1]]))
assert isinstance(network.layers[4], HardTanhLayer)
def test_conv_from_eran():
"""Tests loading a convolutional Network from ERAN format.
"""
path = "conv_test.eran"
with open(path, "w") as netfile:
netfile.write("Conv2D\nReLU, filters=2, kernel_size=[2, 2], ")
netfile.write("input_shape=[16, 16, 2], stride=[10, 10], padding=2\n")
netfile.write("[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],")
netfile.write(" [[[8, 7], [6, 5]], [[4, 3], [2, 1]]]]\n")
netfile.write("[-1, -2]\n")
netfile.write("Affine\n[[1, 2, 3, 4, 5, 6, 7, 8], ")
netfile.write("[5, 6, 7, 8, 9, 10, 11, 12]]\n[-1, -2]\n")
netfile.write("Conv2D\nHardTanh, filters=1, kernel_size=[1, 1], ")
netfile.write("input_shape=[1, 1, 2], stride=[1, 1], padding=0\n")
netfile.write("[[[[1], [2]]]]\n")
netfile.write("[-10]\n")
network = Network.from_file(path)
assert len(network.layers) == 5
assert isinstance(network.layers[0], Conv2DLayer)
assert np.allclose(network.layers[0].filter_weights,
np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[8, 7], [6, 5]], [[4, 3], [2, 1]]]]))
assert np.allclose(network.layers[0].biases, np.array([-1, -2]))
assert network.layers[0].window_data.input_shape == (16, 16, 2)
assert network.layers[0].window_data.window_shape == (2, 2)
assert network.layers[0].window_data.strides == (10, 10)
assert network.layers[0].window_data.padding == (2, 2)
assert network.layers[0].window_data.out_channels == 2
assert isinstance(network.layers[1], ReluLayer)
assert isinstance(network.layers[2], FullyConnectedLayer)
assert np.allclose(network.layers[2].weights,
np.array([[1, 5, 2, 6, 3, 7, 4, 8],
[5, 9, 6, 10, 7, 11, 8, 12]]).T)
assert np.allclose(network.layers[2].biases, np.array([[-1, -2]]))
assert isinstance(network.layers[3], Conv2DLayer)
assert np.allclose(network.layers[3].filter_weights,
np.array([[[[1], [2]]]]))
assert np.allclose(network.layers[3].biases, np.array([-10]))
assert network.layers[3].window_data.input_shape == (1, 1, 2)
assert network.layers[3].window_data.window_shape == (1, 1)
assert network.layers[3].window_data.strides == (1, 1)
assert network.layers[3].window_data.padding == (0, 0)
assert network.layers[3].window_data.out_channels == 1
assert isinstance(network.layers[4], HardTanhLayer)
def test_eran_unimplemented():
"""Tests loading a convolutional Network from ERAN format.
"""
path = "eran_unimplemented.eran"
with open(path, "w") as netfile:
netfile.write("Sin\n")
netfile.write("[[1, 2], [3, 4]]")
try:
network = Network.from_file(path)
assert False
except NotImplementedError:
assert True
with open(path, "w") as netfile:
netfile.write("Conv2D\nSin, filters=1, kernel_size=[1, 1], ")
netfile.write("input_shape=[1, 1, 2], stride=[1, 1], padding=0\n")
netfile.write("[[[[1], [2]]]]\n")
netfile.write("[-10]\n")
try:
network = Network.from_file(path)
assert False
except NotImplementedError:
assert True
def test_squeezenet_from_onnx():
"""Tests loading a SqueezeNet Network from ONNX format.
"""
network = Network.from_file("external/onnx_squeezenet/squeezenet1.1.onnx")
assert len(network.layers) == 40
main(__name__, __file__)
```
#### File: frontend/tests/normalize_layer.py
```python
import numpy as np
import torch
from external.bazel_python.pytest_helper import main
from pysyrenn.frontend.normalize_layer import NormalizeLayer
def test_compute():
"""Tests that the Normalize layer correctly computes.
"""
dims = 1025
batch = 15
inputs = np.random.uniform(size=(batch, dims)).astype(np.float32)
means = np.random.uniform(size=(dims)).astype(np.float32)
stds = np.random.uniform(size=(dims)).astype(np.float32)
true_outputs = (inputs - means) / stds
normalize_layer = NormalizeLayer(means, stds)
assert np.allclose(normalize_layer.compute(inputs), true_outputs)
torch_inputs = torch.FloatTensor(inputs)
torch_outputs = normalize_layer.compute(torch_inputs).numpy()
assert np.allclose(torch_outputs, true_outputs)
def test_serialize():
"""Tests that the Normalize layer correctly [de]serializes itself.
"""
n_dims = 129
means = np.random.uniform(size=(n_dims))
stds = np.random.uniform(size=(n_dims))
serialized = NormalizeLayer(means, stds).serialize()
assert serialized.WhichOneof("layer_data") == "normalize_data"
serialized_means = np.array(serialized.normalize_data.means)
assert np.allclose(serialized_means.flatten(), means.flatten())
serialized_stds = np.array(serialized.normalize_data.standard_deviations)
assert np.allclose(serialized_stds.flatten(), stds.flatten())
deserialized = NormalizeLayer.deserialize(serialized)
assert deserialized.serialize() == serialized
serialized.relu_data.SetInParent()
assert NormalizeLayer.deserialize(serialized) is None
main(__name__, __file__)
```
#### File: frontend/tests/relu_layer.py
```python
import numpy as np
import torch
from external.bazel_python.pytest_helper import main
from pysyrenn.frontend.relu_layer import ReluLayer
def test_compute():
"""Tests that the ReLU layer correctly computes a ReLU.
"""
inputs = np.random.uniform(size=(101, 1025))
true_relu = np.maximum(inputs, 0.0)
relu_layer = ReluLayer()
assert np.allclose(relu_layer.compute(inputs), true_relu)
torch_inputs = torch.FloatTensor(inputs)
torch_outputs = relu_layer.compute(torch_inputs).numpy()
assert np.allclose(torch_outputs, true_relu)
def test_serialize():
"""Tests that the ReLU layer correctly [de]serializes itself.
"""
serialized = ReluLayer().serialize()
assert serialized.WhichOneof("layer_data") == "relu_data"
deserialized = ReluLayer.deserialize(serialized)
assert deserialized.serialize() == serialized
serialized.normalize_data.SetInParent()
assert ReluLayer.deserialize(serialized) is None
main(__name__, __file__)
```
#### File: pysyrenn/helpers/integrated_gradients.py
```python
import numpy as np
import gc
class IntegratedGradients:
"""Class to orchestrate computation of Integrated Gradients.
"""
def __init__(self, network, lines, batch_size=1024):
"""Initializes a new Integrated Gradients computer class.
@batch_size is the maximum number of points to compute gradients for at
a time, used to control memory usage.
"""
self.network = network
self.lines = lines
self.batch_size = batch_size
self.partially_computed = False
self.exactlines = None
self.n_samples = None
self.attributions = dict()
def partial_compute(self):
"""Computes the sampling regions needed to get an exact IG computation.
"""
if self.partially_computed:
return
self.exactlines = self.network.exactlines(
self.lines, compute_preimages=False, include_post=False)
self.n_samples = [len(endpoints) - 1 for endpoints in self.exactlines]
self.partially_computed = True
def compute_attributions(self, label):
"""Computes IG attributions for output label @label.
"""
if label in self.attributions:
return self.attributions[label]
self.partial_compute()
self.attributions[label] = []
for i, (start, end) in enumerate(self.lines):
delta = end - start
endpoints = self.exactlines[i]
attributions = np.zeros_like(start)
for batch_start in range(0, self.n_samples[i], self.batch_size):
batch_end = batch_start + self.batch_size
batch_endpoints = endpoints[batch_start:batch_end]
sample_points = (batch_endpoints[:-1] + batch_endpoints[1:])
sample_points /= 2.0
sample_points = start + np.outer(sample_points, delta)
gradients = self.network.compute_gradients(sample_points,
label)
for i, region_gradient in enumerate(gradients):
region_start, region_end = batch_endpoints[i:(i + 2)]
region_size = region_end - region_start
attributions += region_size * region_gradient
del gradients
gc.collect()
attributions *= delta
self.attributions[label].append(attributions)
return self.attributions[label]
```
#### File: helpers/tests/integrated_gradients.py
```python
import numpy as np
import torch
import pytest
from external.bazel_python.pytest_helper import main
from pysyrenn.frontend import Network, ReluLayer
from pysyrenn.helpers.integrated_gradients import IntegratedGradients
def test_compute_from_network():
"""Tests the it works given an arbitrary network and lines.
"""
if not Network.has_connection():
pytest.skip("No server connected.")
network = Network([ReluLayer()])
lines = [(np.array([0.0, 1.0]), np.array([0.0, -1.0])),
(np.array([2.0, 3.0]), np.array([4.0, 3.0]))]
helper = IntegratedGradients(network, lines)
helper.partial_compute()
assert len(helper.exactlines) == len(lines)
assert np.allclose(helper.exactlines[0], [0.0, 0.5, 1.0])
assert np.allclose(helper.exactlines[1], [0.0, 1.0])
assert helper.n_samples == [2, 1]
attributions_0 = helper.compute_attributions(0)
assert len(attributions_0) == len(lines)
# The second component doesn't affect the 0-label at all, and the first
# component is 0 everywhere, so we have int_0^0 0.0dx = 0.0
assert np.allclose(attributions_0[0], [0.0, 0.0])
# Gradient of the 0-label is (1.0, 0.0) everywhere since its in the first
# orthant, and the partition has a size of (2.0, 0.0), so the IG is (2.0,
# 0.0).
assert np.allclose(attributions_0[1], [2.0, 0.0])
attributions_1 = helper.compute_attributions(1)
assert len(attributions_1) == len(lines)
# The gradient in the first partition is (0.0, 1.0) with a size of (0.0,
# -1.0) -> contribution of (0.0, -1.0). In the second partition, (0.0,
# 0.0)*(0.0, -1.0) = (0.0, 0.0).
assert np.allclose(attributions_1[0], [0.0, -1.0])
# Gradient is (0, 1) and the size is (2, 0) so IG is (0, 0).
assert np.allclose(attributions_1[1], [0.0, 0.0])
attributions_1_re = helper.compute_attributions(1)
# Ensure it doesn't re-compute the attributions.
assert attributions_1 is attributions_1_re
main(__name__, __file__)
```
#### File: SyReNN/scripts/keras_to_syrenn.py
```python
import tensorflow.keras as keras
import numpy as np
import pysyrenn
def keras_to_syrenn(model):
"""Converts a sequential Keras model to a SyReNN Network.
Note that this conversion code makes a number of not-always-valid
assumptions about the model; you should *always* manually verify that the
returned SyReNN network has the same (within a small epsilon) output as the
Keras model.
"""
syrenn_layers = []
def append_activation(function):
"""Adds activation function @function to the SyReNN layers.
"""
if function is None or function is keras.activations.linear:
# Identity: https://github.com/keras-team/keras/blob/bd024a1fc1cd6d88e8bc5da148968ff5e079caeb/keras/activations.py#L187
pass
elif function is keras.activations.relu:
syrenn_layers.append(pysyrenn.ReluLayer())
else:
print(function)
raise NotImplementedError
for layer in model.layers:
if isinstance(layer, keras.layers.InputLayer):
continue
elif isinstance(layer, keras.layers.Conv2D):
# filters: Height, Width, InChannels, OutChannels
# biases: OutChannels
filters, biases = map(to_numpy, layer.weights)
if layer.padding == "same":
pad_height = compute_same_padding(
filters.shape[0], layer.input_shape[1], layer.strides[0])
pad_width = compute_same_padding(
filters.shape[1], layer.input_shape[2], layer.strides[1])
assert pad_height % 2 == 0
assert pad_width % 2 == 0
padding = [pad_height // 2, pad_width // 2]
elif layer.padding == "valid":
padding = [0, 0]
else:
raise NotImplementedError
window_data = pysyrenn.StridedWindowData(
layer.input_shape[1:], # HWC
filters.shape[:2], # HW
layer.strides, # HW
padding, # HW
filters.shape[3])
# Note that SyReNN *assumes* the HWIO format and transforms it
# internally to the Pytorch OIHW format.
syrenn_layers.append(
pysyrenn.Conv2DLayer(window_data, filters, biases))
append_activation(layer.activation)
elif isinstance(layer, keras.layers.Activation):
append_activation(layer.activation)
elif isinstance(layer, keras.layers.BatchNormalization):
gamma, beta, mean, var = map(to_numpy, layer.weights)
# See https://github.com/keras-team/keras/blob/cb96315a291a8515544c6dd807500073958f8928/keras/backend/numpy_backend.py#L531
# ((x - mean) / sqrt(var + epsilon)) * gamma + beta
# = ((x - (mean - (d*beta))) / d) where
# d := sqrt(var + epsilon) / gamma
std = np.sqrt(var + 0.001) / gamma
mean = mean - (std * beta)
syrenn_layers.append(pysyrenn.NormalizeLayer(mean, std))
elif isinstance(layer, keras.layers.MaxPooling2D):
assert layer.padding == "valid"
window_data = pysyrenn.StridedWindowData(
layer.input_shape[1:], # HWC
layer.pool_size, # HW
layer.strides, # HW
[0, 0], # HW
layer.input_shape[3])
# Note that SyReNN *assumes* the HWIO format and transforms it
# internally to the Pytorch OIHW format.
syrenn_layers.append(pysyrenn.MaxPoolLayer(window_data))
elif isinstance(layer, keras.layers.Dropout):
# Not needed for inference.
pass
elif isinstance(layer, keras.layers.Flatten):
# By default, SyReNN passes data around in NHWC format to match with
# ERAN/TF.
assert layer.data_format == "channels_last"
elif isinstance(layer, keras.layers.Dense):
# weights: (from, to)
# biases: (to,)
weights, biases = map(to_numpy, layer.weights)
syrenn_layers.append(pysyrenn.FullyConnectedLayer(weights, biases))
append_activation(layer.activation)
else:
raise NotImplementedError
return pysyrenn.Network(syrenn_layers)
def to_numpy(x):
"""Helper to convert TensorFlow tensors to Numpy.
"""
return x.numpy()
def compute_same_padding(filter_size, in_size, stride):
"""Helper to compute the amount of padding used by a convolution.
Computation based on https://stackoverflow.com/a/44242277
"""
out_size = (in_size + (stride - 1)) // stride
return max((out_size - 1) * stride + filter_size - in_size, 0)
```
#### File: third_party/eran_bmc/experiment.py
```python
import sys
sys.path.insert(0, "../ELINA/python_interface/")
sys.path.insert(0, ".")
from PIL import Image
import math
import numpy as np
import matplotlib
import matplotlib.image
import os
from eran import ERAN
from fppoly import *
from elina_coeff import *
from elina_linexpr0 import *
from read_net_file import *
from analyzer import *
import tensorflow as tf
import csv
import time
import argparse
from timeit import default_timer as timer
from tqdm import tqdm
args = {
"complete": False,
"timeout_lp": 1,
"timeout_milp": 1,
"use_area_heuristic": True,
}
def main():
"""Runs the ERAN analysis on the pendulum_continuous model.
This happens in a few steps:
1. ERAN doesn't specifically support HardTanh layers, so we translate the
HardTanh into an equivalent set of ReLU layers using convert_htanh().
2. We then read the controller network into an ERAN model.
3. We use ERAN/DeepPoly to extract an abstract value describing the
network's behavior over the initial set. Module float imprecision
handling, this abstract value is basically two affine transform A and B
such that Ax <= f(x) <= Bx for all x in the initial set.
4. We compute Ax and Bx for a particular point (0.35, 0.35) right on the
edge of the initial set, and show that the range determined by DeepPoly
(even after applying a concrete HardTanh at the end) is wide enough to
mark that point as unsafe even on the first iteration.
"""
# (1) Translate the model into an equivalent one without HardTanh.
with_htanh_filename = sys.argv[1]
no_htanh_filename = "/ovol/pendulum_continuous.no_htanh.eran"
convert_htanh(with_htanh_filename, no_htanh_filename)
# (2) Read it into ERAN.
num_pixels = 2
model, _, _, _ = read_net(no_htanh_filename, num_pixels, False)
eran = ERAN(model)
# (3) Extract an abstract value over the initial set.
# (3a) Load model and init set into ERAN.
initLB = np.array([-0.35, -0.35])
initUB = np.array([0.35, 0.35])
nn = layers()
nn.specLB = initLB
nn.specUB = initUB
execute_list = eran.optimizer.get_deeppoly(initLB, initUB)
# NOTE: 9 is just a placeholder specnumber to tell it we're using
# ACAS.
analyzer = Analyzer(execute_list, nn, "deeppoly", args["timeout_lp"],
args["timeout_milp"], 9, args["use_area_heuristic"])
# (3b) Perform the analysis and extract the abstract values.
element, _, _ = analyzer.get_abstract0()
lexpr = get_lexpr_for_output_neuron(analyzer.man, element, 0)
uexpr = get_uexpr_for_output_neuron(analyzer.man, element, 0)
lexpr = np.array(extract_from_expr(lexpr))
uexpr = np.array(extract_from_expr(uexpr))
# (3c) Extract the output range for initLB based on the abstract value.
lower_bound, upper_bound = compute_output_range(initLB, lexpr, uexpr)
# Apply extra knowledge that -1 <= lower_bound <= upper_bound <= 1.
lower_bound = max(lower_bound, -1.)
upper_bound = min(upper_bound, 1.)
post_lower, post_upper = post_bounds(initLB, lower_bound, upper_bound)
post_lower, post_upper = post_lower.flatten(), post_upper.flatten()
lower_safe = np.min(post_lower) >= -0.35
upper_safe = np.max(post_upper) <= 0.35
is_safe = lower_safe and upper_safe
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
if not is_safe:
print("ERAN reported initLB unsafe after the first step.")
else:
print("Something changed; ERAN used to report initLB unsafe, but now"
"it says it's safe.")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
elina_abstract0_free(analyzer.man, element)
def convert_htanh(with_htanh_filename, no_htanh_filename):
"""Converts a network using HardTanh to one using only ReLUs.
@with_htanh_filename is the path to the ERAN file describing the network
using HardTanh, while @no_htanh_filename is the path to the ERAN file this
function should write the ReLU-only version to.
"""
# y = ReLU(x + 1)
# z = ReLU(-y + 2)
with open(with_htanh_filename, "r") as original_network:
with open(no_htanh_filename, "w") as no_htanh_network:
in_hard_tanh = False
weights = None
biases = None
for line in original_network:
if line.strip() == "HardTanh":
in_hard_tanh = True
no_htanh_network.write("ReLU\n")
elif in_hard_tanh and not weights:
weights = line
no_htanh_network.write(line)
elif in_hard_tanh and not biases:
# HTanh(x) = -ReLU(-ReLU(x + 1) + 2) + 1
assert "," not in line
bias = float(line.strip("\n[]"))
no_htanh_network.write("[{}]\n".format(bias + 1.0))
no_htanh_network.write("ReLU\n")
no_htanh_network.write("[[-1.0]]\n")
no_htanh_network.write("[2.0]\n")
no_htanh_network.write("Affine\n")
no_htanh_network.write("[[-1.0]]\n")
no_htanh_network.write("[1.0]\n")
in_hard_tanh = False
else:
no_htanh_network.write(line)
def compute_output_range(point, lexpr, uexpr):
"""Computes the range of possible outputs at @point.
lexpr[:, 0] gives lower bounds on the values of A while lexpr[:, 1] gives
upper bounds on the values of A. Similarly for uexpr and B.
Together, they form A, B such that Ax <= f(x) <= Bx.
This function computes a lower bound on Ax and an upper bound on Bx.
"""
lower_bound = np.min(lexpr[-1, :])
for i in range(point.size):
assert np.sign(lexpr[i, 0]) == np.sign(lexpr[i, 1])
if np.sign(lexpr[i, 0]) == np.sign(point[i]):
lower_bound += point[i] * np.min(lexpr[i, :])
else:
lower_bound += point[i] * np.max(lexpr[i, :])
upper_bound = np.max(uexpr[-1, :])
for i in range(point.size):
assert np.sign(uexpr[i, 0]) == np.sign(uexpr[i, 1])
if np.sign(uexpr[i, 0]) == np.sign(point[i]):
upper_bound += point[i] * np.max(uexpr[i, :])
else:
upper_bound += point[i] * np.min(uexpr[i, :])
return lower_bound, upper_bound
def post_bounds(original_state, action_lower_bound, action_upper_bound):
"""Finds the tightest bounds on the post-state given bounds on the action.
A, B are environment descriptions for the Pendulum model. See
../../experiments/vrl_models.py for more details. Notably, B is positive so
we get a lower bound by multiplying action_lower_bound and an upper bound
by multiplying action_upper_bound.
"""
A = 0.01 * np.array([[0., 1.], [10.0/1.0, 0.]])
B = 0.01 * 15.0 * np.array([[0.], [1.0]])
delta_B_lower = action_lower_bound * B
delta_B_upper = action_upper_bound * B
original_state = np.array([original_state]).transpose()
delta_lower = (np.matmul(A, original_state) + delta_B_lower)
delta_upper = (np.matmul(A, original_state) + delta_B_upper)
post_lower = original_state + delta_lower
post_upper = original_state + delta_upper
return post_lower, post_upper
def extract_from_expr(expr, coeffs=2):
"""Helper method to extract a vector from the ERAN internal representation.
It returns a vector of size (n + 1, 2), where the last row is the bias and
vec[:, 0] = inf, vec[:, 1] = sup for the coefficients (used to handle
floating point imprecision).
"""
coefficients = []
for i in range(coeffs):
coeff = elina_linexpr0_coeffref(expr, i)
assert coeff.contents.discr == 1
interval = coeff.contents.val.interval.contents
assert interval.inf.contents.discr == 0
assert interval.sup.contents.discr == 0
inf = interval.inf.contents.val.dbl
sup = interval.sup.contents.val.dbl
coefficients.append([inf, sup])
cst = elina_linexpr0_cstref(expr)
assert cst.contents.discr == 1
interval = cst.contents.val.interval.contents
assert interval.inf.contents.discr == 0
assert interval.sup.contents.discr == 0
inf = interval.inf.contents.val.dbl
sup = interval.sup.contents.val.dbl
coefficients.append([inf, sup])
return np.array(coefficients)
if __name__ == "__main__":
main()
``` |
{
"source": "956836421/Python",
"score": 3
} |
#### File: 956836421/Python/LogBlame.py
```python
import sys
import re
import os
piddic = {}
timedic = {}
def statistic(time, pid, tag):
if timedic.has_key(time) == False:
timedic[time] = 1
else:
timedic[time] += 1
if piddic.has_key(pid) == False:
tagdic = {}
piddic[pid] = tagdic
else:
tagdic = piddic[pid]
if tagdic.has_key(tag) == False:
tagdic[tag] = 1
else:
tagdic[tag] += 1
def output():
countdic = {}
total = 0;
for key in piddic:
tagdic = piddic[key]
for key1 in tagdic:
if countdic.has_key(key1) == False:
countdic[key1] = 0
countdic[key1] += tagdic[key1]
for key in countdic:
total += countdic[key]
print ''
print '-----------total line of logs: ', total, '----------------'
for key, val in sorted(countdic.items(), lambda x, y: cmp(x[1], y[1]), reverse=True):
percent = float(val)/float(total)
if percent < 0.01:
break
print key, val, 'percent: {:.2%}'.format(percent)
total = 0
for key in timedic:
total += timedic[key]
print ''
print '----------- avg ', total/len(timedic), ' lines per second ------------'
i = 0
for key, val in sorted(timedic.items(), lambda x, y: cmp(x[1], y[1]), reverse=True):
percent = float(val)/float(total)
print key, val, 'percent: {:.2%}'.format(percent)
i += 1
if i >= 10:
break
def statistic_file(path):
print 'calculating ' + path
f = open(path)
for line in f:
chunk = re.sub(' +', ' ', line).split(' ')
if len(chunk) < 6:
continue
#print line
statistic(chunk[1].split('.')[0], chunk[2], chunk[5])
f.close()
def statistic_dir(path):
for f in os.listdir(path):
if os.path.isdir(path + '/' + f):
statistic_dir(path + '/' + f)
else:
statistic_file(path + '/' + f)
if len(sys.argv) != 2:
print "arguments error"
exit()
path = sys.argv[1]
if os.path.exists(path) == False:
print "file or dir does not exists"
exit()
if os.path.isdir(path):
statistic_dir(path)
elif os.path.isfile(path):
statistic_file(path)
output()
``` |
{
"source": "958099161/person_segmentation",
"score": 3
} |
#### File: person_segmentation/criterion/criterion.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None):
super().__init__()
self.loss = nn.NLLLoss2d(weight)
self.loss = nn.CrossEntropyLoss(size_average=False, reduce=False)
#self.loss=nn.MSEloss()
def forward(self, outputs, targets):
#torch version >0.2 F.log_softmax(input, dim=?)
#dim (int): A dimension along which log_softmax will be computed.
# try:
# out = F.log_softmax(outputs, dim=1)
out_loss= self.loss(outputs, targets.long())
mask = targets >= 0
out_l = torch.sum(torch.masked_select(out_loss, mask)) / torch.sum(mask.float())
return out_l
# except TypeError as t:
# return self.loss(F.log_softmax(outputs), targets) #else
``` |
{
"source": "958328814/dreamdota",
"score": 3
} |
#### File: DreamWarcraft/Build Tools/jass_lua_native.py
```python
def want_skip(str):
return False
# return (str.find('Trigger') == 0 or str.find('LoadTrigger') == 0 or 'Timer' in str or 'Group' in str or 'Dialog' in str)
def parse_line(line):
# if 'Hboolexpr' in line or 'condition' in line or 'action' in line:
# return False
jargs = ''
jrv = ''
jfun = line[0:line.find('(') - 1]
if want_skip(jfun):
return False
jarg_start = line.find('(') + 1
jarg_end = line.find(')')
jargs_str = line[jarg_start:jarg_end]
i, total = 0, len(jargs_str)
while i < total:
c = jargs_str[i]
if (c == 'H'):
i = jargs_str.find(';', i) + 1
jargs = jargs + 'H'
else:
i = i + 1
jargs = jargs + c
jrv = line[line.find(')') + 1]
alltypes = jrv + jargs
# if 'C' in alltypes:
# return False
return {"rv" : jrv, "fun" : jfun, "args" : jargs}
def parse_file(file):
lines = []
f = open(file)
for line in f.readlines():
line = parse_line(line)
if line:
lines.append(line)
f.close()
return lines
def save_lua_prototype_code(lines, filename):
file = open(filename, 'w')
code = ''
i, total = 0, len(lines)
while i < total:
f = lines[i]
code = code + ('int JASS_%s(lua_State* L);\n' % (f['fun']))
i = i + 1
file.write(code)
file.close()
def get_lua_rv_code(t, call):
if t == 'V':
return call + ';'
if t == 'I':
return ('lua_pushinteger(L, %s);' % call)
if t == 'R':
return ('lua_pushnumber(L, %s);' % call)
if t == 'H':
return ('LUA_PUSHDWORD(L, %s);' % call)
if t == 'S':
return ('lua_pushstring(L, %s);' % call)
if t == 'B':
return ('lua_pushboolean(L, %s ? 1 : 0);' % call)
if t == 'C':
return ('LUA_PUSHDWORD(L, %s);' % call)
def get_lua_arg_code(t, i):
if t == 'I':
return ('int a%d = lua_tointeger(L, %d);' % (i, i + 1))
if t == 'R':
return ('lua_Number a%d = lua_tonumber(L, %d);' % (i, i + 1))
if t == 'H':
return ('DWORD a%d = LUA_TODWORD(L, %d);' % (i, i + 1))
if t == 'S':
return ('const char* a%d = lua_tostring(L, %d);' % (i, i + 1))
if t == 'B':
return ('bool a%d = lua_toboolean(L, %d) > 0;' % (i, i + 1))
if t == 'C':
return ('DWORD a%d = LUA_TODWORD(L, %d);' % (i, i + 1))
def save_lua_impl_code(lines, filename):
file = open(filename, 'w')
code = ''
i, total = 0, len(lines)
while i < total:
f = lines[i]
code = code + ('int JASS_%s(lua_State* L) {' % (f['fun']))
code = code + ('if (lua_gettop(L) != %d) return 0;' % len(f['args']))
args_code = '';
if f['args']:
a = []
for ai in range(len(f['args'])):
if f['args'][ai] == 'R':
a.append('(float)a' + str(ai))
else:
a.append('a' + str(ai))
args_code = args_code + get_lua_arg_code(f['args'][ai], ai)
args = ', '.join(a)
else:
args = ''
code = code + args_code;
code = code + get_lua_rv_code(f['rv'], '%s(%s)' % ('JASS_WRAPPER_' + f['fun'], args))
if f['rv'] == 'V':
code = code + 'return 0;'
else:
code = code + 'return 1;'
code = code + '}\n'
i = i + 1
file.write(code)
file.close()
def save_lua_prototype_data_code(lines, filename):
file = open(filename, 'w')
code = 'static const struct luaL_reg reg [] = {\n'
i, total = 0, len(lines)
while i < total:
f = lines[i]
code = code + ('\t{"%s", JASS_%s},' % (f['fun'], f['fun']))
i = i + 1
code = code + '\t{NULL, NULL}\n};'
file.write(code)
file.close()
def save_lua_reg_code(lines, filename):
file = open(filename, 'w')
code = ''
i, total = 0, len(lines)
while i < total:
f = lines[i]
code = code + ('lua_register(L, "%s", JASS_%s);\n' % (f['fun'], f['fun']))
i = i + 1
file.write(code)
file.close()
def get_rv_fullname(t):
if t == 'V':
return 'void'
if t == 'I':
return 'integer'
if t == 'R':
return 'real'
if t == 'H':
return 'handle'
if t == 'S':
return 'DWORD'
if t == 'B':
return 'bool'
def get_arg_fullname(t):
if t == 'I':
return 'integer'
if t == 'R':
return 'float*'
if t == 'H':
return 'handle'
if t == 'S':
return 'string'
if t == 'B':
return 'bool'
if t == 'C':
return 'DWORD'
def get_wrapper_fullname(t):
if t == 'V':
return 'void'
if t == 'I':
return 'int'
if t == 'R':
return 'float'
if t == 'H':
return 'DWORD'
if t == 'S':
return 'const char*'
if t == 'B':
return 'bool'
if t == 'C':
return 'DWORD'
def get_arg_code(t, i):
if t == 'V':
return ''
if t == 'I':
return 'a' + str(i)
if t == 'R':
return '&a' + str(i)
if t == 'H':
return 'a' + str(i)
if t == 'S':
return 'STR_TO_JASSSTR(a' + str(i) + ')'
if t == 'B':
return 'a' + str(i)
if t == 'C':
return 'a' + str(i)
def save_jass_prototype_code(lines, filename):
file = open(filename, 'w')
code = ''
i, total = 0, len(lines)
while i < total:
f = lines[i]
if f['args']:
a = []
for ai in range(len(f['args'])):
a.append(get_arg_fullname(f['args'][ai]))
args = ', '.join(a)
else:
args = 'void'
code = code + ('typedef %s (*JASS_PROTOTYPE_%s)(%s);\n' % (get_rv_fullname(f['rv']), f['fun'], args))
code = code + ('extern JASS_PROTOTYPE_%s JASS_NATIVE_%s;\n' % (f['fun'], f['fun']))
i = i + 1
file.write(code)
file.close()
def save_jass_native_code(lines, filename):
file = open(filename, 'w')
code = ''
i, total = 0, len(lines)
while i < total:
f = lines[i]
if f['args']:
a = []
for ai in range(len(f['args'])):
a.append(get_arg_fullname(f['args'][ai]))
args = ', '.join(a)
else:
args = 'void'
code = code + ('JASS_PROTOTYPE_%s JASS_NATIVE_%s;\n' % (f['fun'], f['fun']))
i = i + 1
file.write(code)
file.close()
def save_jass_wrapper_prototype(lines, filename):
file = open(filename, 'w')
code = ''
i, total = 0, len(lines)
while i < total:
f = lines[i]
if f['args']:
a = []
for ai in range(len(f['args'])):
a.append(get_wrapper_fullname(f['args'][ai]))
args = ', '.join(a)
else:
args = 'void'
alltypes = f['rv'] + f['args']
if 'S' in alltypes or 'R' in alltypes:
code = code + ('%s JASS_WRAPPER_%s(%s);\n' % (get_wrapper_fullname(f['rv']), f['fun'], args))
else:
code = code + ('#define JASS_WRAPPER_%s JASS_NATIVE_%s\n' % (f['fun'], f['fun']))
i = i + 1
file.write(code)
file.close()
def save_jass_wrapper_impl(lines, filename):
file = open(filename, 'w')
code = ''
i, total = 0, len(lines)
while i < total:
f = lines[i]
alltypes = f['rv'] + f['args']
if 'S' in alltypes or 'R' in alltypes:
argcode = ''
if f['args']:
a = []
a_call = []
for ai in range(len(f['args'])):
a.append(get_wrapper_fullname(f['args'][ai]) + ' a' + str(ai))
a_call.append(get_arg_code(f['args'][ai], ai))
args = ', '.join(a)
args_call = ', '.join(a_call)
else:
args = ''
args_call = ''
code = code + ('%s JASS_WRAPPER_%s(%s) {\n' % (get_wrapper_fullname(f['rv']), f['fun'], args))
rvtype = f['rv']
if 'S' == rvtype:
code = code + ('\t%s rv = JASSSTR_TO_STR(JASS_NATIVE_%s(%s));\n' % (get_wrapper_fullname(rvtype), f['fun'], args_call))
elif 'R' == rvtype:
code = code + ('\t%s rv = JASS_FLOAT_CAST(JASS_NATIVE_%s(%s));\n' % (get_wrapper_fullname(rvtype), f['fun'], args_call))
else:
if rvtype == 'V':
code = code + ('\tJASS_NATIVE_%s(%s);\n' % (f['fun'], args_call))
else:
code = code + ('\t%s rv = JASS_NATIVE_%s(%s);\n' % (get_wrapper_fullname(rvtype), f['fun'], args_call))
if 'S' in f['args']:
code = code + '\tJASS_STR_CLEANUP();\n';
if rvtype != 'V':
code = code + '\treturn rv;\n';
code = code + '}\n\n'
i = i + 1
file.write(code)
file.close()
def save_jass_bridge_code(lines, filename):
file = open(filename, 'w')
code = ''
i, total = 0, len(lines)
while i < total:
f = lines[i]
args_def = str(f['args'])
if f['args']:
a = []
a_call = []
for ai in range(len(args_def)):
arg_item = (get_wrapper_fullname(args_def[ai]) + ' a' + str(ai))
a.append(arg_item)
a_call.append('a' + str(ai))
args = ', '.join(a)
args_call = ', '.join(a_call)
else:
args = 'void'
args_call = ''
code = code + ('inline %s %s(%s) {' % (get_wrapper_fullname(f['rv']), f['fun'], args))
if f['rv'] == 'V':
code = code + ('JASS_WRAPPER_%s(%s);' % (f['fun'], args_call))
else:
code = code + ('return JASS_WRAPPER_%s(%s);' % (f['fun'], args_call))
code = code + '}\n'
i = i + 1
file.write(code)
file.close()
def save_lua_code(lines, filename):
file = open(filename, 'w')
code = ''
i, total = 0, len(lines)
while i < total:
f = lines[i]
if f['args']:
a = []
for ai in range(len(f['args'])):
a.append(get_wrapper_fullname(f['args'][ai] + ' a' + str(ai)))
args = ', '.join(a)
else:
args = 'void'
code = code + ('inline %s %s(%s);\n' % (get_wrapper_fullname(f['rv']), f['fun'], args))
i = i + 1
file.write(code)
file.close()
if __name__ == '__main__':
lines = parse_file('JassNatives.txt')
save_lua_prototype_data_code(lines, '../JassLuaNativesData.h.inc')
save_lua_prototype_code(lines, '../JassLuaNatives.h.inc')
save_lua_reg_code(lines, '../JassLuaNatives.reg.inc')
save_lua_impl_code(lines, '../JassLuaNatives.impl.inc')
save_jass_prototype_code(lines, '../JassNatives.prototype.inc')
save_jass_wrapper_prototype(lines, '../JassNativesWrapper.h.inc')
save_jass_wrapper_impl(lines, '../JassNativesWrapper.impl.inc')
save_jass_bridge_code(lines, '../JassNativesBridge.inc')
save_jass_native_code(lines, '../JassNativesImpl.inc')
```
#### File: Build Tools/test/read_language_file.py
```python
import array
import os
FileName = '../../Strings.xml'
Root = None
Lang = {}
StringID = {}
StringOffset = {}
def XOR(arr):
#print("input size: " + str(len(arr)))
#RSA public key
f = open('../in/RSA_public_key.bin', 'rb')
b = f.read(128)
keyArr = array.array('L', b)
f.close();
for i in range(0, len(arr)):
old = arr[i]
arr[i] ^= keyArr[i % 32]
#print("old = " + hex(old) + " result = " + hex(arr[i]))
def GetArray(src, offset, size):
rv = array.array('L')
for i in range(0, size):
rv[i] = src[i + offset]
return rv
if __name__ == '__main__':
# Output Format
# HEADER:
# dwTOTAL_ID | dwLength | dwLength .... | dwDATA_SIZE | DATA
lang = input('lang ID: ')
fileName = '../out/' + lang + '.bin'
fileSize = os.path.getsize(fileName)
print('file size(byte): ' + str(fileSize))
dwordCount = fileSize / 4
f = open(fileName, 'rb')
arr = array.array('L')
arr.fromfile(f, int(dwordCount))
XOR(arr)
p = print
totalIdCount = arr[0]
p('id count: ' + str(totalIdCount))
for i in range(0, totalIdCount):
p('[String ID %d] size = %d' % (i, arr[1 + i]))
dataSize = arr[1 + totalIdCount]
p('Data Size = %d' % dataSize)
``` |
{
"source": "95880-A3-SP18/web-analytics-group10",
"score": 3
} |
#### File: web-analytics-group10/src/web_scraper_yelp.py
```python
import os
import sys
import requests
import csv
import re
import time
from bs4 import BeautifulSoup
# Scrapes all resturants for Pittsburgh by sending requests to Yelp API
def yelp():
api = "<KEY>"
headers = {"authorization": 'Bearer %s' % api}
params = {"location": "Pittsburgh", "categories" : "restaurants"}
response = requests.get('https://api.yelp.com/v3/businesses/search', headers=headers, params=params)
result = response.json()
content = ['Name','Rating','Review_Count','Price','Cuisine','Latitude','Longitude','Address']
total = result['total']
ofile = open('links_yelp.csv', "w", encoding='utf-8', newline='')
wr = csv.writer(ofile, quoting=csv.QUOTE_ALL)
wr.writerow(content)
for i in range(0, total, 20):
time.sleep(0.25)
params = {"location": "Pittsburgh", "categories" : "restaurants", "offset" : i}
response = requests.get('https://api.yelp.com/v3/businesses/search', headers=headers, params=params)
result = response.json()
if 'businesses' in result:
for restaurant in result['businesses']:
content = []
content.append(restaurant['name'])
content.append(str(restaurant['rating']))
content.append(str(restaurant['review_count']))
if 'price' not in restaurant:
price = ''
else:
price = restaurant['price']
content.append(price)
content.append(restaurant['categories'][0]['title'])
content.append(str(restaurant['coordinates']['latitude']))
content.append(str(restaurant['coordinates']['longitude']))
content.append(','.join(restaurant['location']['display_address']))
wr.writerow(content)
# main driver
if __name__ == "__main__":
os.chdir(os.path.dirname(sys.argv[0]))
yelp()
``` |
{
"source": "95880-A3-SP18/web-analytics-group15",
"score": 2
} |
#### File: web-analytics-group15/Team_CourseAnalyses/savePictures.py
```python
import django,os
from django.conf import settings
import pandas as pd
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
settings.configure()
django.setup()
def course_relation_all_graphs(filepath, picpath):
course_df = pd.read_csv(filepath)
prerequisites_df = course_df[course_df.Prerequisites != 'None'][['Course_id', 'Prerequisites']]
corequisites_df = course_df[course_df.Corequisites != 'None'][['Course_id', 'Corequisites']]
cor_G = nx.Graph()
for index in range(len(corequisites_df)):
course = corequisites_df.iloc[index]['Course_id']
cor = corequisites_df.iloc[index]['Corequisites']
try:
for item in cor.split():
if item != ',':
cor_G.add_edge(course, item)
except:
pass
plt.figure(3, figsize=(16, 16))
nx.draw(cor_G, with_labels=True, node_color='lightskyblue', edge_color='turquoise', node_size=80, alpha=0.8)
plt.savefig("{}cor_all.png".format(picpath))
plt.close()
G_pre = nx.DiGraph()
for index in range(len(prerequisites_df)):
course = prerequisites_df.iloc[index]['Course_id']
pre = prerequisites_df.iloc[index]['Prerequisites']
try:
for pre_course in pre.replace("(", " ").replace(")", " ").replace("and", " ").replace("or", " ").split():
G_pre.add_edge(pre_course, course)
except:
pass
plt.figure(3, figsize=(16, 16))
nx.draw(G_pre, with_labels=True, node_color='lightskyblue', edge_color='turquoise', node_size=80, alpha=0.8)
plt.savefig("{}pre_all.png".format(picpath))
plt.close()
def bar_all_graphs(filepath, picpath):
all_spring_df = pd.read_csv(filepath)
all_spring_df = all_spring_df.drop('Unnamed: 0', axis=1)
department_groups = all_spring_df.groupby('Department')
dep_list, dep_count = [], []
for name, group in department_groups:
dep_list.append(name)
dep_count.append(len(group))
plt.figure(1, figsize=(20, 20))
y_pos = np.arange(len(dep_list))
plt.barh(y_pos, dep_count, align='center', color='turquoise', alpha=0.8)
plt.yticks(y_pos, dep_list, fontsize = 18)
for i, num in enumerate(dep_count):
plt.text(-5.0, i, "{}".format(num), color='blue', fontsize=18, fontweight='bold')
plt.savefig("{}department_all.png".format(picpath), bbox_inches='tight')
plt.close()
bldg_room_list = all_spring_df['Bldg/Room']
bldg_list = []
for bldg in bldg_room_list:
bldg_list.append(bldg.split()[0])
ctr = Counter(bldg_list)
bldg_list, bldg_count = [],[]
for key in ctr:
bldg_list.append(key)
bldg_count.append(ctr[key])
plt.figure(1, figsize=(20, 15))
y_pos = np.arange(len(bldg_list))
plt.barh(y_pos, bldg_count, align='center', color='turquoise', alpha=0.8)
plt.yticks(y_pos, bldg_list, fontsize = 15)
for i, num in enumerate(bldg_count):
plt.text(0.0, i, "{}".format(num), color='blue', fontsize=132, fontweight='bold')
plt.savefig("{}bldg_all.png".format(picpath), bbox_inches='tight')
plt.close()
mini_list = all_spring_df['Mini']
is_mini, not_mini = 0, 0
for mini in mini_list:
if mini == 'N':
is_mini += 1
else:
not_mini += 1
plt.figure(1, figsize=(8, 8))
labels = 'Mini Course', 'Not Mini Course'
sizes = [is_mini, not_mini]
colors = ['turquoise', 'lightskyblue']
explode = (0.1, 0) # explode 1st slice
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.savefig("{}mini_all.png".format(picpath), bbox_inches='tight')
plt.close()
# I don't know how the generate this pictures before running the system
#
#
# if __name__ == "__main__":
# picpath = '{}/courses/static/courses/images/'.format(os.getcwd())
# if os.path.exists('{}bldg_all.png'.format(picpath)) \
# and os.path.exists('{}mini_all.png'.format(picpath)) \
# and os.path.exists('{}department_all.png'.format(picpath)):
# print("pass bar")
# pass
# else:
# bar_all_graphs('../parser/all_spring_courses.csv', picpath)
#
# if os.path.exists('{}pre_all.png'.format(picpath)) \
# and os.path.exists('{}cor_all.png'.format(picpath)):
# print("pass cor and pre")
# pass
# else:
# course_relation_all_graphs('../parser/course_detail_csv/Spring_2018_description.csv', picpath)
``` |
{
"source": "9590/-StockTrading",
"score": 3
} |
#### File: -StockTrading/config/createUser.py
```python
from random import randint, choice
from tradingSystem.models import UserTable
import os
banks = ['山东银行',
'江苏银行',
'上海银行',
'浙江银行',
'安徽银行',
'福建银行',
'江西银行',
'广东银行',
'广西银行',
'海南银行',
'河南银行',
'湖南银行',
'湖北银行',
'北京银行',
'天津银行',
'河北银行',
'山西银行',
'内蒙古银行',
'宁夏银行',
'青海银行',
'陕西银行',
'重庆银行',
'吉林银行']
class GenUser(object):
city = [
'山东', '江苏', '上海', '浙江', '安徽', '福建', '江西', '广东', '广西',
'海南', '河南', '湖南', '湖北', '北京', '天津', '河北', '山西', '内蒙古', '宁夏',
'青海', '陕西', '重庆', '吉林'
]
def gen_code(self, l: int):
code = ""
for i in range(l):
code += str(randint(0, 9))
return code
def gen_user_id(self):
return self.gen_code(10)
def gen_user_password(self):
return self.gen_code(8)
def gen_account_number(self):
return self.gen_code(19)
def gen_account_type(self):
return choice(self.city) + '银行'
def gen_id_no(self):
return self.gen_code(18)
def make_password(self):
pass
def gen_user_name(self):
last_name = '赵钱孙李周吴郑王冯陈褚卫蒋沈韩杨朱秦尤许何吕施张孔曹严华金魏陶姜'
first_name = '豫章故郡洪都新府星分翼轸地接衡庐襟三江而带五湖潦水尽而寒潭清落霞与孤鹜齐飞秋水共长天一色爽籁发而清风生纤歌凝而白云遏访风景于崇阿'
return choice(last_name) + "".join(choice(first_name) for i in range(randint(1, 2)))
def gen_email(self):
mails = ['qq.com', '163.com', 'gmail.com', '126.com', 'mail.edu.cn']
return self.gen_code(10) + '@' + choice(mails)
def gen_sex(self):
return choice(['男', '女'])
def gen_phone(self):
return '1' + self.gen_code(10)
def gen_account_balance(self):
return float(randint(10000, 500000))
def get_user_pic_path():
pic1 = [img for img in os.listdir('static/img') if img.startswith('user') or img.startswith('ava')]
return pic1
pics = get_user_pic_path()
def gen_photo_url():
return '/static/img/' + choice(pics)
def main():
genUser = GenUser()
cnt = 0
while cnt < 100:
user = UserTable(
user_id=genUser.gen_user_id(),
id_no=genUser.gen_id_no(),
user_name=genUser.gen_user_name(),
password=genUser.gen_user_password(),
user_sex=genUser.gen_sex(),
phone_number=genUser.gen_phone(),
user_email=genUser.gen_email(),
photo_url=gen_photo_url(),
account_num=genUser.gen_account_number(),
account_type=genUser.gen_account_type(),
account_balance=genUser.gen_account_balance(),
freeze=False,
account_opened=True
)
user.save()
cnt += 1
print(cnt)
if __name__ == '__main__':
main()
```
#### File: -StockTrading/utils/cam_stock.py
```python
import tushare as ts
from tradingSystem.models import StockInfo
from random import randint, random, choice
my_token = '6f6ee0533e28c2bde19102cccfc95de79fe56fc16633f5267fa0985c'
pro = ts.pro_api(my_token)
# data = pro.stock_basic(exchange='', list_status='L', fileds='ts_code,symbol,name,area,industry,list_date')
data = pro.query('stock_basic', exchange='', list_status='L',
fileds='ts_code,symbol,name,area,industry,list_date,market')
cnt = 0
for row in data.iterrows():
# try:
exchange = ""
ts_code = row[1]['ts_code']
if row[1]['ts_code'].find('SH'):
exchange = "上证"
else:
exchange = "深证"
close_y = randint(5, 200)
extend = choice([0.1, -0.1, 0.05, -0.05]) * random()
open_t = close_y + close_y * extend
stock = StockInfo(
stock_id=row[1]['symbol'],
stock_name=row[1]['name'],
block=row[1]['market'],
issuance_time=row[1]['list_date'],
stock_type=exchange,
closing_price_y=close_y,
open_price_t=open_t,
change_extent=extend
)
stock.save()
cnt += 1
print(cnt)
# except Exception:
# print(Exception)
for row in data.iterrows():
try:
stock = StockInfo(
stock_id=row[1]['symbol'],
stock_name=row[1]['name'],
stock_type=row[1]['market']
)
stock.save()
cnt += 1
print(cnt)
except Exception:
print(Exception)
for row in data.iterrows():
exchange = ""
ts_code = row[1]['ts_code']
if row[1]['ts_code'].find('SH'):
exchange = "上证"
else:
exchange = "深证"
stock_id = row[1]['symbol']
stock_name = row[1]['name']
stock_type = row[1]['market']
# stock = StockInfo.objects.get(stock_id=stock_id)
print(stock_id, stock_name, ts_code, stock_type, exchange)
# stock.stock_type = exchange
# stock.block = stock_type
def main():
# print(ts.get_today_all())
df = ts.get_realtime_quotes('000581') # Single stock symbol
# data = pro.stock_basic(exchange='', list_status='L', fileds='ts_code,symbol,name,area,industry,list_date')
data = pro.query('stock_basic', exchange='', list_status='L', fileds='ts_code,symbol,name,area,industry,list_data')
for ts_code, symbol, name, area, industry, list_data in data['ts_code'], data['symbol'], data['name'], data['area'], \
data['industry'], data['list_data']:
print(ts_code, symbol, name, area, industry, list_data)
print(data)
sh_data = pro.query('stock_basic', exchange='SSE', list_status='L',
fileds='ts_code,symbol,name,area,industry,list_date,market')
sz_data = pro.query('stock_basic', exchange='SZSE', list_status='L',
fileds='ts_code,symbol,name,area,industry,list_date,market')
cnt = 0
for row in sh_data.iterrows():
exchange = ""
ts_code = row[1]['ts_code']
if row[1]['ts_code'].find('SH') == -1:
exchange = "深证"
else:
exchange = "上证"
stock_id = row[1]['symbol']
stock_name = row[1]['name']
stock_type = row[1]['market']
try:
stock = StockInfo.objects.get(stock_id=stock_id)
stock.stock_type='上证'
stock.block = stock_type
stock.save()
cnt += 1
print(cnt)
except Exception:
print(Exception)
# stock = StockInfo.objects.get(stock_id=stock_id)
# print(stock_id, stock_name, ts_code, stock_type, exchange)
cnt = 0
for row in sz_data.iterrows():
exchange = ""
ts_code = row[1]['ts_code']
if row[1]['ts_code'].find('SH') == -1:
exchange = "深证"
else:
exchange = "上证"
stock_id = row[1]['symbol']
stock_name = row[1]['name']
stock_type = row[1]['market']
try:
stock = StockInfo.objects.get(stock_id=stock_id)
stock.stock_type='深证'
stock.block = stock_type
stock.save()
cnt += 1
print(cnt)
except Exception:
print(Exception)
# print(stock_id, stock_name, ts_code, stock_type, exchange)
if __name__ == '__main__':
main()
```
#### File: -StockTrading/utils/createStockTable.py
```python
import pymysql
import tushare as ts
import numpy as np
import time
from utils import getHistoryData
def createStockTable(t): # 建表 eg:表名为000001.SZ,字段是日期,开盘,最高,最低,收盘
conn = pymysql.connect(host="127.0.0.1", user="trading", password="<PASSWORD>", database="stocktrading")
cursor = conn.cursor()
sql = """CREATE TABLE `%s`(
TRADING_DAY VARCHAR(64) DEFAULT NULL,
OPEN_PRICE FLOAT DEFAULT NULL,
HIGHEST FLOAT DEFAULT NULL,
LOWEST FLOAT DEFAULT NULL,
CLOSE_PRICE FLOAT DEFAULT NULL)
"""
cursor.execute(sql, [t])
def createEvedayTable(t): # 每天实时数据
conn = pymysql.connect(host="127.0.0.1", user="trading", password="<PASSWORD>", database="stocktrading")
cursor = conn.cursor()
sql = """CREATE TABLE `%s`(
DAILY_TICKS VARCHAR(64) DEFAULT NULL,
REAL_TIME_QUOTES FLOAT DEFAULT NULL
)
"""
cursor.execute(sql, ["dailyTicks_" + t])
cursor.close()
conn.close()
def InsertOldDay(t):
res = getHistoryData.getHistoryData(t)
print(res)
conn = pymysql.connect(host="127.0.0.1", user="trading", password="<PASSWORD>", database="stocktrading")
cursor = conn.cursor()
sql = "INSERT INTO `%s`(TRADING_DAY,OPEN_PRICE,HIGHEST,LOWEST,CLOSE_PRICE) VALUES(%s, %s, %s, %s,%s)"
t = t.split(".")
for i in res:
i.insert(0, t[0] + "_" + t[1])
# print(i)
cursor.execute(sql, i)
conn.commit()
cursor.close()
conn.close()
def insertTodayTickData(t):
t = t.split(".")
res = ts.get_today_ticks(t[0])
conn = pymysql.connect(host="127.0.0.1", user="trading", password="<PASSWORD>", database="stocktrading")
cursor = conn.cursor()
sql = "INSERT INTO `%s`(TRADING_DAY,OPEN_PRICE,HIGHEST,LOWEST,CLOSE_PRICE) VALUES(%s, %s, %s, %s,%s)"
for i in res:
i.insert(0, t[0] + "_" + t[1])
# print(i)
cursor.execute(sql, i)
conn.commit()
cursor.close()
conn.close()
def getTscode():
conn = pymysql.connect(host="127.0.0.1", user="trading", password="<PASSWORD>", database="stocktrading")
cursor = conn.cursor()
sql = "select stock_id,stock_type from stock_info"
cursor.execute(sql)
stoinfo = cursor.fetchall()
for i in range(959,len(stoinfo)):
if(stoinfo[i][1] == "上证"):
tmp=stoinfo[i][0]+"_"+"SH"
# tmp = stoinfo[i][0] + "." + "SH"
else:
tmp = stoinfo[i][0] + "_" + "SZ"
# tmp = stoinfo[i][0] + "." + "SZ"
# createStockTable(tmp)
# createEvedayTable(tmp)
print(tmp)
InsertOldDay(tmp)
# time.sleep(1)
cursor.close()
conn.close()
# getTscode()
# InsertOldDay("000001.SZ")
```
#### File: -StockTrading/utils/getAstock.py
```python
import tushare as ts
import numpy as np
def getAstock(t):
# data = np.array([[1, 8, 3, 3, 4],
# [1, 8, 9, 9, 4],
# [1, 8, 3, 3, 4]])
# print(type(data))
# print(data)
# # 删除整个数组的重复元素
# uniques = np.unique(data)
# print(uniques)
# # array([1, 3, 4, 8, 9])
# # 删除重复行
# uniques = np.unique(data,axis = 0)
# print(uniques)
# # array([[1, 8, 3, 3, 4],
# # [1, 8, 9, 9, 4]])
# # 删除重复列
# uniques = np.unique(data,axis = 1)
# print(uniques)
pro = ts.pro_api('17649607a4e92be1fe38fb52b2ff2e044ac6301f665e98b278ab14a7')
# data = pro.stock_basic(exchange='', list_status='L', fileds='ts_code,symbol,name,area,industry,list_date')
df = pro.stk_rewards(ts_code=t)
m = df.dropna(axis=0)
m = m[~m['hold_vol'].isin([0])]
res = np.array(m)
res = res[0:5,[3,6]]
# print(res)
# print(res)
dic = {}
res = list(res)
# print(len(res))
# print(res)
for i in range(0,len(res)):
# print(res[i][0])
dic[res[i][0]]=res[i][1]
# print(dic)
res = []
for key in dic:
tmp = []
tmp.append(key)
print(key,dic[key])
tmp.append(dic[key])
res.append(tmp)
return res
# print(np.argmax(res[1],))
# res.append(list(m["hold_vol"]))
# res.append(list(m["name"]))
# print(res)
```
#### File: -StockTrading/utils/getRtQuotes.py
```python
import tushare as ts
import numpy as np
import time
from datetime import date, datetime
from chinese_calendar import is_workday,is_holiday
# pro = ts.pro_api('17649607a4e92be1fe38fb52b2ff2e044ac6301f665e98b278ab14a7')
# data = pro.stock_basic(exchange='SSE', list_status='L', fields='ts_code,symbol,name,area,industry,list_date')
def getworkday():
strt = time.strftime('%Y-%m-%d', time.localtime(time.time()))
strt = strt.split("-")
y = int(strt[0])
m = int(strt[1])
d = int(strt[2])
april_last = date(y , m , d)
print(april_last)
print(is_workday(april_last))
print("fuckshit",is_holiday(april_last))
print("dick",not is_holiday(april_last))
print("elecshit",is_workday(april_last) and (not is_holiday(april_last)))
return is_workday(april_last) and (not is_holiday(april_last))
def getRtQuotes(t):
f = getworkday()
df = "0"
resx="0"
res = "0"
resy="0"
print(t)
if(f):
print(f)
strt = time.strftime('%Y-%m-%d', time.localtime(time.time()))
print(strt)
df = ts.get_today_ticks(t)
print(df)
res = np.array(df)
resx = res[:, [0]]
resy = res[:, [1]]
resx = resx.reshape(-1)
resy = resy.reshape(-1)
resx = resx.tolist()
resy = resy.tolist()
return 1,resx,resy
else:
return 0,resx,resy
```
#### File: -StockTrading/utils/insertDayData.py
```python
import tushare as ts
import numpy as np
import time
import pymysql
def getTodayData(t):
pro = ts.pro_api('17649607a4e92be1fe38fb52b2ff2e044ac6301f665e98b278ab14a7')
strt = time.strftime('%Y-%m-%d', time.localtime(time.time()))
print(strt)
strt = strt.split("-")
print(t)
df = pro.daily(ts_code=t, start_date=strt[0] + strt[1] + strt[2], end_date=strt[0] + strt[1] + strt[2])
res = np.array(df)
res = res[:, [1, 2, 5, 4, 3]] # 日期,开盘,最高,最低,收盘
res = res.tolist()
return res
def InsertTodayDay(t):
res = getTodayData(t)
conn = pymysql.connect(host="127.0.0.1", user="trading", password="<PASSWORD>", database="stocktrading")
cursor = conn.cursor()
sql = "INSERT INTO `%s`(TRADING_DAY,OPEN_PRICE,HIGHEST,LOWEST,CLOSE_PRICE) VALUES(%s, %s, %s, %s,%s)"
t = t.split(".")
print(t)
for i in res:
i.insert(0, t[0] + "_" + t[1])
print(i)
cursor.execute(sql, i)
conn.commit()
cursor.close()
conn.close()
def upHold():
conn = pymysql.connect(host="127.0.0.1", user="trading", password="<PASSWORD>", database="stocktrading")
cursor = conn.cursor()
sql = "select stock_id,stock_type from stock_info"
cursor.execute(sql)
stoinfo = cursor.fetchall()
for i in range(0, len(stoinfo)):
if (stoinfo[i][1] == "上证"):
tmp = stoinfo[i][0] + "." + "SH"
else:
tmp = stoinfo[i][0] + "." + "SZ"
InsertTodayDay(tmp)
cursor.close()
conn.close()
# 接口供收盘时维护每日数据
# upHold()
``` |
{
"source": "9592/codegram",
"score": 2
} |
#### File: codegram/images/models.py
```python
from django.db import models
#jango 2.0 에서는 따로 python 2 decorator 설정 안해도 되는듯....
#from django.utils.encoding import python_2_unicode_compatible
#글로벌 models과 동일한 이름이지만, user app 안에 있는 models 을 호출해야하 하기때문에 임시 이름을 변경함 (as user_models)
from codegram.users import models as user_models
# Create your models here.
#좋아요나 이미지 생성 등 타임에 대한 베이스는 아래 클래스 사용
class TimeStampedModel(models.Model):
#create_at 은 생성 될때 신규 시간 생성
created_at = models.DateTimeField(auto_now_add=True)
#updated_at 은 새로 고침
updated_at = models.DateTimeField(auto_now=True)
# META : 데이터 베이스와 연결되지 않는 필드
# timestampModel 클래스안에 Meta 클래스를 넣음으로써 해당 클래스를 일일이 DB에 포함하지 않고, Meta Field로만 사용함 (추상 속성의 클래스 상속)
class Meta:
#추상적 모델
abstract = True
#이미지 관련 클래스 (이미지 필드, 주소, 내용)
class Image (TimeStampedModel):
file = models.ImageField()
location = models.CharField(max_length=140)
caption = models.TextField()
#on_delete 를 사용하는 이유는 foreignkey 사용 시, 연결된 foreginkey 가 삭제되거나 했을때의 행위를 정할수 있음)
#protect 는 삭제 되는 행위를 막는거고, 여러 옵션들이 있음
creator = models.ForeignKey(user_models.User, null=True, on_delete=models.PROTECT, related_name='images')
#@prperty 는 모델의 필드, 데이터로 가지 않지만, 모델 안에 존재 (펑션개념)
@property
def like_count(self):
return self.likes.all().count()
#Magic Method : __?? 사용해서 원하는 데이터를 출력 할수 있음
def __str__(self):
return '{}-{}'.format(self.location, self.caption)
#DB에서 얻는 리스트를 생성된 날짜로 정렬
class Meta:
ordering = ['-created_at']
#댓글 관련 클래스 (메시지)
# @python_2_unicode_compatible
class Comment (TimeStampedModel):
message = models.TextField()
creator = models.ForeignKey(user_models.User, null=True, on_delete=models.PROTECT)
#related_name을 지정함으로써 comments_set의 이름을 변경 할수 있음
image = models.ForeignKey(Image ,null=True, on_delete=models.PROTECT,related_name='comments')
#message 를 출력함으로써 작성한 message TextField 가 보여짐
def __str__(self):
return self.message
#좋아요 클래스 (foreign 키 지정)
#@python_2_unicode_compatible
class Like (TimeStampedModel):
creator = models.ForeignKey(user_models.User, null=True, on_delete=models.PROTECT)
image = models.ForeignKey(Image, null=True, on_delete=models.PROTECT,related_name='likes')
#like 의 경우 message field 가 없음으로 foregin key 를 가지고 message 를 불러옴 (user이름, Image 이름)
def __str__(self):
return 'User:{} - Image Caption{}'.format(self.creator.username, self.image.caption)
``` |
{
"source": "95e2/FriendlyBinaryDiff",
"score": 3
} |
#### File: 95e2/FriendlyBinaryDiff/bindiff.py
```python
import os
import sys
_EOF = b''
_USAGE_ERR = 1
_OPEN_ERR = 2
_SIZE_ERR = 3
_CHUNK_SIZE = 16
_FIRST_ONLY = False # or True
NONE = '\033[0m'
LIGHT_RED = '\033[31m'
LIGHT_GREEN = '\033[32;1m'
LIGHT_YELLOW = '\033[33;1m'
class DiffFile:
def __init__(self, fp):
self.__seek = 0
self.__fp = fp
self.__chunk = None
def __nextChunk__(self, chunkSize):
self.__seek += chunkSize
self.__fp.seek(self.__seek)
self.__chunk = self.__fp.read(chunkSize)
def getChunk(self):
self.__nextChunk__(_CHUNK_SIZE)
if self.__chunk != _EOF:
return self.__chunk
def getAddress(self):
return self.__seek
def checkSize(oldSize, newSize):
if oldSize < newSize:
sys.stderr.write("Error: the old file size less than the new file!\n")
sys.exit(_SIZE_ERR)
elif oldSize > newSize:
sys.stderr.write("Error: the old file size more than the new file!\n")
sys.exit(_SIZE_ERR)
def chunkToHexStr(chunk):
if isinstance(chunk, str):
# Python 2.x
return ''.join(['%02X '%(ord(x)) for x in chunk])
else:
# Python 3.x
return ''.join(['%02X '%(x) for x in chunk])
"""
ADDRESS: 016D8590
OLD <<<========================================
7F 25 92 BB 0D 42 20 C2 16 6D A4 AC CF 89 61 37
===============^v=============^v===============
4F 25 92 BB 0D 82 20 C2 16 6D B9 AC CF 89 61 37
========================================>>> NEW
"""
def getDiffStr(diffOffsets):
seek = 0
diffStr = ""
for offset in diffOffsets:
diffStr += (offset - seek) * "==="
diffStr += LIGHT_GREEN + "^" + \
LIGHT_RED + "V" + NONE + "="
seek = offset + 1
diffStr += (_CHUNK_SIZE - diffOffsets[-1] - 1) * "==="
return diffStr
def diffStart(old, new):
while True:
oldChunk = old.getChunk()
newChunk = new.getChunk()
if (not oldChunk or
not newChunk):
break
diffOffsets = []
for i in range(len(zip(oldChunk, newChunk))):
if oldChunk[i] != newChunk[i]:
diffOffsets.append(i) # record offset.
# FIXME: The result is not accurate.
DISPLAY_LEN = (_CHUNK_SIZE * 2 + _CHUNK_SIZE / 2 + 1)
if len(diffOffsets) > 0:
print("ADDRESS: %08X, COUNT(%d)"%(old.getAddress() or
new.getAddress(), len(diffOffsets) ) )
print(LIGHT_GREEN + "OLD <<<" + "="*DISPLAY_LEN + NONE)
print(chunkToHexStr(oldChunk))
print(getDiffStr(diffOffsets))
print(chunkToHexStr(newChunk))
print(LIGHT_YELLOW + "="*DISPLAY_LEN + ">>> NEW\n" + NONE)
if _FIRST_ONLY: break
def main(argv):
if len(argv) < 3:
sys.exit(_USAGE_ERR)
oldFile = argv[1]
newFile = argv[2]
if not os.path.exists(oldFile) or not os.path.exists(newFile):
sys.stderr.write("Error: can not found the old or the new file!\n")
sys.exit(_OPEN_ERR)
oldSzie = os.path.getsize(oldFile)
newSzie = os.path.getsize(newFile)
checkSize(oldSzie, newSzie)
with open(oldFile, "rb") as oldFp, open(newFile, "rb") as newFp:
old = DiffFile(oldFp)
new = DiffFile(newFp)
diffStart(old, new)
if __name__ == "__main__":
main(sys.argv)
``` |
{
"source": "95hali74/acconeer-python-exploration",
"score": 2
} |
#### File: examples/processing/presence_detection_sparse.py
```python
import numpy as np
import pyqtgraph as pg
from PyQt5 import QtCore
from acconeer_utils.clients.reg.client import RegClient, RegSPIClient
from acconeer_utils.clients.json.client import JSONClient
from acconeer_utils.clients import configs
from acconeer_utils import example_utils
from acconeer_utils.pg_process import PGProcess, PGProccessDiedException
def main():
args = example_utils.ExampleArgumentParser(num_sens=1).parse_args()
example_utils.config_logging(args)
if args.socket_addr:
client = JSONClient(args.socket_addr)
elif args.spi:
client = RegSPIClient()
else:
port = args.serial_port or example_utils.autodetect_serial_port()
client = RegClient(port)
sensor_config = get_sensor_config()
processing_config = get_processing_config()
sensor_config.sensor = args.sensors
client.setup_session(sensor_config)
pg_updater = PGUpdater(sensor_config, processing_config)
pg_process = PGProcess(pg_updater)
pg_process.start()
client.start_streaming()
interrupt_handler = example_utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
processor = PresenceDetectionSparseProcessor(sensor_config, processing_config)
while not interrupt_handler.got_signal:
info, sweep = client.get_next()
plot_data = processor.process(sweep)
if plot_data is not None:
try:
pg_process.put_data(plot_data)
except PGProccessDiedException:
break
print("Disconnecting...")
pg_process.close()
client.disconnect()
def get_sensor_config():
config = configs.SparseServiceConfig()
config.range_interval = [0.5, 1.5]
config.sweep_rate = 60
config.gain = 0.65
config.number_of_subsweeps = 16
return config
def get_processing_config():
return {
"threshold": {
"name": "Threshold",
"value": 0.3,
"limits": [0, 1],
"type": float,
"text": None,
},
"upper_speed_limit": {
"name": "Max movement [mm/s]",
"value": 25,
"limits": [1, 500],
"type": float,
"text": None,
},
}
class PresenceDetectionSparseProcessor:
def __init__(self, sensor_config, processing_config):
self.threshold = processing_config["threshold"]["value"]
num_subsweeps = sensor_config.number_of_subsweeps
upper_speed_limit = processing_config["upper_speed_limit"]["value"]
f = int(round(sensor_config.sweep_rate))
self.movement_history = np.zeros(f * 5) # 5 seconds
self.a_fast_tau = 1.0 / (upper_speed_limit / 2.5)
self.a_slow_tau = 1.0
self.a_move_tau = 1.0
self.static_a_fast = self.alpha(self.a_fast_tau, 1.0 / (f * num_subsweeps))
self.static_a_slow = self.alpha(self.a_slow_tau, 1.0 / (f * num_subsweeps))
self.a_move = self.alpha(self.a_move_tau, 1.0 / (f * num_subsweeps))
self.sweep_lp_fast = None
self.sweep_lp_slow = None
self.subsweep_index = 0
self.movement_lp = 0
def dynamic_filter_coefficient(self, static_alpha):
dynamic_alpha = 1.0 - 1.0 / (1 + self.subsweep_index)
return min(static_alpha, dynamic_alpha)
def process(self, sweep):
for subsweep in sweep:
if self.sweep_lp_fast is None:
self.sweep_lp_fast = subsweep.copy()
self.sweep_lp_slow = subsweep.copy()
else:
a_fast = self.dynamic_filter_coefficient(self.static_a_fast)
self.sweep_lp_fast = self.sweep_lp_fast * a_fast + subsweep * (1-a_fast)
a_slow = self.dynamic_filter_coefficient(self.static_a_slow)
self.sweep_lp_slow = self.sweep_lp_slow * a_slow + subsweep * (1-a_slow)
movement = np.mean(np.abs(self.sweep_lp_fast - self.sweep_lp_slow))
movement *= 0.01
self.movement_lp = self.movement_lp*self.a_move + movement*(1-self.a_move)
self.subsweep_index += 1
self.movement_history = np.roll(self.movement_history, -1)
self.movement_history[-1] = self.movement_lp
present = np.tanh(self.movement_history[-1]) > self.threshold
out_data = {
"movement": np.abs(self.sweep_lp_fast - self.sweep_lp_slow),
"movement_history": np.tanh(self.movement_history),
"present": present,
}
return out_data
def alpha(self, tau, dt):
return np.exp(-dt/tau)
class PGUpdater:
def __init__(self, sensor_config, processing_config):
self.config = sensor_config
self.movement_limit = processing_config["threshold"]["value"]
def setup(self, win):
win.setWindowTitle("Acconeer presence detection example")
self.move_plot = win.addPlot(title="Movement")
self.move_plot.showGrid(x=True, y=True)
self.move_plot.setLabel("bottom", "Depth (m)")
self.move_curve = self.move_plot.plot(pen=example_utils.pg_pen_cycler())
self.move_smooth_max = example_utils.SmoothMax(self.config.sweep_rate)
win.nextRow()
move_hist_plot = win.addPlot(title="Movement history")
move_hist_plot.showGrid(x=True, y=True)
move_hist_plot.setLabel("bottom", "Time (s)")
move_hist_plot.setXRange(-5, 0)
move_hist_plot.setYRange(0, 1)
self.move_hist_curve = move_hist_plot.plot(pen=example_utils.pg_pen_cycler())
limit_pen = pg.mkPen("k", width=2.5, style=QtCore.Qt.DashLine)
limit_line = pg.InfiniteLine(self.movement_limit, angle=0, pen=limit_pen)
move_hist_plot.addItem(limit_line)
present_text = '<div style="text-align: center">' \
'<span style="color: #FFFFFF;font-size:16pt;">' \
'{}</span></div>'.format("Presence detected!")
not_present_text = '<div style="text-align: center">' \
'<span style="color: #FFFFFF;font-size:16pt;">' \
'{}</span></div>'.format("No presence detected")
self.present_text_item = pg.TextItem(
html=present_text,
fill=pg.mkColor(255, 140, 0),
anchor=(0.5, 0),
)
self.not_present_text_item = pg.TextItem(
html=not_present_text,
fill=pg.mkColor("b"),
anchor=(0.5, 0),
)
self.present_text_item.setPos(-2.5, 0.95)
self.not_present_text_item.setPos(-2.5, 0.95)
move_hist_plot.addItem(self.present_text_item)
move_hist_plot.addItem(self.not_present_text_item)
self.present_text_item.hide()
def update(self, data):
move_ys = data["movement"]
move_xs = np.linspace(*self.config.range_interval, len(move_ys))
self.move_curve.setData(move_xs, move_ys)
self.move_plot.setYRange(0, self.move_smooth_max.update(np.max(move_ys)))
move_hist_ys = data["movement_history"]
move_hist_xs = np.linspace(-5, 0, len(move_hist_ys))
self.move_hist_curve.setData(move_hist_xs, move_hist_ys)
if data["present"]:
self.present_text_item.show()
self.not_present_text_item.hide()
else:
self.present_text_item.hide()
self.not_present_text_item.show()
if __name__ == "__main__":
main()
```
#### File: examples/utils/ping.py
```python
from acconeer_utils.clients.reg.client import RegClient, RegSPIClient
from acconeer_utils.clients.json.client import JSONClient
from acconeer_utils.clients import configs
from acconeer_utils import example_utils
def main():
args = example_utils.ExampleArgumentParser().parse_args()
example_utils.config_logging(args)
if args.socket_addr:
client = JSONClient(args.socket_addr)
elif args.spi:
client = RegSPIClient()
else:
port = args.serial_port or example_utils.autodetect_serial_port()
client = RegClient(port)
config = configs.EnvelopeServiceConfig()
config.sensor = args.sensors
config.range_interval = [0.2, 0.6]
config.sweep_rate = 10
client.start_streaming(config)
client.get_next()
client.disconnect()
if __name__ == "__main__":
main()
```
#### File: clients/json/client.py
```python
from time import time
from copy import deepcopy
import logging
from distutils.version import StrictVersion
from acconeer_utils.clients.base import BaseClient, ClientError
from acconeer_utils.clients import links
from acconeer_utils.clients.json import protocol
log = logging.getLogger(__name__)
MIN_VERSION = StrictVersion("1.5.2")
DEV_VERSION = StrictVersion("1.8.1")
class JSONClient(BaseClient):
def __init__(self, host, **kwargs):
super().__init__(**kwargs)
self._link = links.SocketLink(host)
self._session_cmd = None
self._session_ready = False
self._num_subsweeps = None
def _connect(self):
self._link.connect()
cmd = {"cmd": "get_version"}
self._send_cmd(cmd)
try:
header, _ = self._recv_frame()
except links.LinkError as e:
raise ClientError("no response from server") from e
log.debug("connected and got a response")
if header["status"] != "ok":
raise ClientError("server error while connecting")
msg = header["message"].lower()
log.debug("version msg: {}".format(msg))
startstr = "server version v"
if not msg.startswith(startstr):
log.warn("server version unknown")
return
server_version_str = msg[len(startstr):].strip()
try:
server_version = StrictVersion(server_version_str)
except ValueError:
log.warn("server version unknown")
return
if server_version < MIN_VERSION:
log.warn("server version is not supported (too old)")
elif server_version != DEV_VERSION:
log.warn("server version might not be fully supported")
def _setup_session(self, config):
if isinstance(config, dict):
cmd = deepcopy(config)
log.warn("setup with raw dict config - you're on your own")
else:
cmd = protocol.get_dict_for_config(config)
cmd["output_format"] = "json+binary"
self._session_cmd = cmd
info = self._init_session()
log.debug("setup session")
return info
def _start_streaming(self):
if not self._session_ready:
self._init_session()
cmd = {"cmd": "start_streaming"}
self._send_cmd(cmd)
header, _ = self._recv_frame()
if header["status"] != "start":
raise ClientError
log.debug("started streaming")
def _get_next(self):
header, payload = self._recv_frame()
status = header["status"]
if status == "end":
raise ClientError("session ended")
elif status != "ok":
raise ClientError("server error")
return protocol.decode_stream_frame(header, payload, self.squeeze, self._num_subsweeps)
def _stop_streaming(self):
cmd = {"cmd": "stop_streaming"}
self._send_cmd(cmd)
t0 = time()
while time() - t0 < self._link._timeout:
header, _ = self._recv_frame()
status = header["status"]
if status == "end":
break
elif status == "ok": # got streaming data
continue
else:
raise ClientError
else:
raise ClientError
self._session_ready = False
log.debug("stopped streaming")
def _disconnect(self):
self._link.disconnect()
self._session_cmd = None
self._session_ready = False
log.debug("disconnected")
def _init_session(self):
if self._session_cmd is None:
raise ClientError
self._send_cmd(self._session_cmd)
header, _ = self._recv_frame()
if header["status"] == "error":
raise ClientError("server error while initializing session")
elif header["status"] != "ok":
raise ClientError("got unexpected header")
log.debug("session initialized")
self._session_ready = True
info = protocol.get_session_info_for_header(header)
self._num_subsweeps = info.get("number_of_subsweeps")
return info
def _send_cmd(self, cmd_dict):
cmd_dict["api_version"] = 2
packed = protocol.pack(cmd_dict)
self._link.send(packed)
def _recv_frame(self):
packed_header = self._link.recv_until(b'\n')
header = protocol.unpack(packed_header)
payload_len = header["payload_size"]
if payload_len > 0:
payload = self._link.recv(payload_len)
else:
payload = None
return header, payload
```
#### File: clients/reg/protocol.py
```python
from collections import namedtuple
import numpy as np
Reg = namedtuple(
"Reg",
[
"name",
"mode",
"addr",
"rw",
"type",
"val_map",
"info_type",
"config_attr",
],
)
EncFuns = namedtuple("EncFuns", ["encode_fun", "decode_fun"])
UnpackedRegVal = namedtuple("UnpackedRegVal", ["addr", "val"])
UnpackedRegReadRequest = namedtuple("UnpackedRegReadRequest", ["addr"])
UnpackedRegWriteRequest = namedtuple("UnpackedRegWriteRequest", ["reg_val"])
UnpackedRegReadResponse = namedtuple("UnpackedRegReadResponse", ["reg_val"])
UnpackedRegWriteResponse = namedtuple("UnpackedRegWriteResponse", ["reg_val"])
UnpackedStreamData = namedtuple("UnpackedStreamData", ["result_info", "buffer"])
class ProtocolError(Exception):
pass
class PackError(ProtocolError):
pass
class UnpackError(ProtocolError):
pass
ADDR_SIZE = 1
REG_SIZE = 4
LEN_FIELD_SIZE = 2
MIN_FRAME_SIZE = 1 + LEN_FIELD_SIZE + 1 + 1
BYTEORDER = "little"
BO = BYTEORDER
EXPECTED_ID = 0xACC0
MIN_VERSION = 1
DEV_VERSION = 1
START_MARKER = 0xCC
END_MARKER = 0xCD
REG_READ_REQUEST = 0xF8
REG_READ_RESPONSE = 0xF6
REG_WRITE_REQUEST = 0xF9
REG_WRITE_RESPONSE = 0xF5
STREAM_PACKET = 0xFE
STREAM_RESULT_INFO = 0xFD
STREAM_BUFFER = 0xFE
BUF_READ_REQUEST = 0xFA
MAIN_BUFFER_ADDR = 232
STATUS_DATA_READY_MASK = 1 << 0
STATUS_ERROR_MASK = 1 << 2
NO_MODE = "none"
MODES = {
"power_bin": 1,
"envelope": 2,
"iq": 3,
"sparse": 4,
"distance_peak_fix_threshold": 0x100,
}
FIXED_BUF_SIZE = {
"power_bin": True,
"envelope": True,
"iq": True,
"distance_peak_fix_threshold": False,
}
BYTE_PER_POINT = {
"envelope": 2,
"iq": 4,
"sparse": 2,
}
float_to_milli_enc_funs = EncFuns(
lambda v: int(round(v * 1000)),
lambda v: v / 1000.0
)
REGS = [
Reg(
"mode_selection",
NO_MODE,
2,
"rw",
"u",
MODES,
None,
None,
),
Reg(
"main_control",
NO_MODE,
3,
"w",
"u",
{
"stop": 0,
"create": 1,
"activate": 2,
"create_and_activate": 3,
"clear_status": 4,
},
None,
None,
),
Reg(
"streaming_control",
NO_MODE,
5,
"w",
"u",
{
"disable": 0,
"uart": 1,
},
None,
None,
),
Reg(
"status",
NO_MODE,
6,
"r",
"u",
None,
None,
None,
),
Reg(
"uart_baudrate",
NO_MODE,
7,
"rw",
"u",
None,
None,
None,
),
Reg(
"profile_selection",
NO_MODE,
11,
"w",
"u",
None,
None,
"session_profile",
),
Reg(
"product_id",
NO_MODE,
16,
"r",
"u",
None,
None,
None,
),
Reg(
"product_version",
NO_MODE,
17,
"r",
"u",
None,
None,
None,
),
Reg(
"range_start",
NO_MODE,
32,
"rw",
"i",
float_to_milli_enc_funs,
None,
"range_start",
),
Reg(
"range_length",
NO_MODE,
33,
"rw",
"i",
float_to_milli_enc_funs,
None,
"range_length",
),
Reg(
"repetition_mode",
NO_MODE,
34,
"rw",
"u",
{
"fixed": 1,
"max": 2,
},
None,
None,
),
Reg(
"frequency",
NO_MODE,
35,
"rw",
"u",
float_to_milli_enc_funs,
"session",
"sweep_rate",
),
Reg(
"gain",
NO_MODE,
36,
"rw",
"u",
float_to_milli_enc_funs,
None,
"gain",
),
Reg(
"sensor_power_mode",
NO_MODE,
37,
"rw",
"u",
{
"a": 0,
"b": 1,
"c": 2,
"d": 3,
},
None,
None,
),
Reg(
"actual_range_start",
NO_MODE,
129,
"r",
"i",
float_to_milli_enc_funs,
"session",
None,
),
Reg(
"actual_range_length",
NO_MODE,
130,
"r",
"i",
float_to_milli_enc_funs,
"session",
None,
),
Reg(
"output_data_buffer_length",
NO_MODE,
233,
"r",
"u",
None,
None,
None,
),
Reg(
"data_saturated",
NO_MODE,
0x9F,
"r",
"b",
None,
"sweep",
None,
),
Reg(
"sequence_number",
NO_MODE,
0xA0,
"r",
"u",
None,
"sweep",
None,
),
Reg(
"requested_bin_count",
"power_bin",
64,
"rw",
"u",
None,
None,
"bin_count",
),
Reg(
"actual_bin_count",
"power_bin",
131,
"r",
"u",
None,
"session",
None,
),
Reg(
"running_average_factor",
"envelope",
64,
"rw",
"u",
float_to_milli_enc_funs,
None,
"running_average_factor",
),
Reg(
"compensate_phase",
"envelope",
65,
"rw",
"b",
None,
None,
"compensate_phase",
),
Reg(
"data_length",
"envelope",
131,
"r",
"u",
None,
"session",
None,
),
Reg(
"running_average_factor",
"iq",
64,
"rw",
"u",
float_to_milli_enc_funs,
None,
"running_average_factor",
),
Reg(
"output_data_compression",
"iq",
65,
"rw",
"u",
None,
None,
None,
),
Reg(
"data_length",
"iq",
131,
"r",
"u",
None,
"session",
None,
),
Reg(
"number_of_subsweeps",
"sparse",
64,
"rw",
"u",
None,
"session",
"number_of_subsweeps",
),
Reg(
"data_length",
"sparse",
131,
"r",
"u",
None,
"session",
None,
),
Reg(
"peak_count",
"distance_peak_fix_threshold",
161,
"r",
"u",
None,
None,
None,
),
]
MODE_LOOKUP = {v: k for k, v in MODES.items()}
REG_LOOKUP = {k: {} for k in MODES.keys()}
REG_LOOKUP[NO_MODE] = {}
for reg in REGS:
REG_LOOKUP[reg.mode][reg.name] = reg
REG_LOOKUP[reg.mode][reg.addr] = reg
def get_mode(mode):
if isinstance(mode, str):
return mode
if mode is None:
return NO_MODE
try:
return MODE_LOOKUP[mode]
except KeyError:
raise ProtocolError("unknown mode")
def get_reg(x, mode=None):
if isinstance(x, Reg):
return x
try:
return REG_LOOKUP[NO_MODE][x]
except KeyError:
pass
mode = get_mode(mode)
if mode != NO_MODE:
try:
return REG_LOOKUP[mode][x]
except KeyError:
pass
raise ProtocolError("unknown register")
def get_addr_for_reg(x, mode=None):
if isinstance(x, int):
return x
return get_reg(x, mode).addr
def encode_reg_val(reg, val, mode=None):
reg = get_reg(reg, mode)
if isinstance(reg.val_map, EncFuns):
x = reg.val_map.encode_fun(val)
elif isinstance(reg.val_map, dict):
try:
x = reg.val_map[val]
except KeyError:
raise ProtocolError("could not encode register value (value not in map)")
else:
x = val
if reg.type == "u":
return x.to_bytes(REG_SIZE, BO)
elif reg.type == "i":
return x.to_bytes(REG_SIZE, BO, signed=True)
elif reg.type == "b":
return int(x).to_bytes(REG_SIZE, BO)
else:
return val
def decode_reg_val(reg, enc_val, mode=None):
reg = get_reg(reg, mode)
if reg.type == "u":
x = int.from_bytes(enc_val, BO)
elif reg.type == "i":
x = int.from_bytes(enc_val, BO, signed=True)
elif reg.type == "b":
x = any(enc_val)
else:
x = enc_val
if isinstance(reg.val_map, EncFuns):
return reg.val_map.decode_fun(x)
elif isinstance(reg.val_map, dict):
for k, v in reg.val_map.items():
if v == x:
return k
else:
raise ProtocolError("could not decode register value (value not in map)")
else:
return x
def unpack_packet(packet):
if len(packet) < 1:
raise UnpackError("package is too short")
packet_type = packet[0]
segment = packet[1:]
if packet_type == REG_READ_RESPONSE:
return unpack_reg_read_res_segment(segment)
elif packet_type == REG_WRITE_RESPONSE:
return unpack_reg_write_res_segment(segment)
elif packet_type == STREAM_PACKET:
return unpack_stream_data_segment(segment)
else:
raise UnpackError("unknown packet type")
def unpack_reg_val(packed):
if len(packed) != ADDR_SIZE + REG_SIZE:
raise UnpackError("unexpected package length")
reg_addr = packed[0]
enc_val = packed[1:]
return UnpackedRegVal(reg_addr, enc_val)
def unpack_reg_read_res_segment(segment):
rv = unpack_reg_val(segment)
return UnpackedRegReadResponse(rv)
def unpack_reg_write_res_segment(segment):
rv = unpack_reg_val(segment)
return UnpackedRegWriteResponse(rv)
def unpack_stream_data_segment(segment):
result_info = None
buffer = None
rest = segment
while len(rest) > 0:
if len(rest) < 1 + LEN_FIELD_SIZE:
raise UnpackError("invalid package length")
part_type = rest[0]
data_start_index = 1+LEN_FIELD_SIZE
part_len = int.from_bytes(rest[1:data_start_index], BO)
data_end_index = data_start_index + part_len
part_data = rest[data_start_index:data_end_index]
rest = rest[data_end_index:]
if part_type == STREAM_RESULT_INFO:
s = ADDR_SIZE + REG_SIZE
if part_len % s != 0:
raise UnpackError("invalid package length")
result_info = []
num_regs = part_len // s
for i in range(num_regs):
addr = part_data[s*i]
enc_val = part_data[s*i+1:s*(i+1)]
rrv = UnpackedRegVal(addr, enc_val)
result_info.append(rrv)
elif part_type == STREAM_BUFFER:
buffer = part_data
else:
raise UnpackError("unknown stream part type")
return UnpackedStreamData(result_info, buffer)
def pack_reg_val(reg_val):
if len(reg_val.val) != REG_SIZE:
raise PackError("register value must be {} bytes".format(REG_SIZE))
packed = bytearray()
packed.extend(reg_val.addr.to_bytes(ADDR_SIZE, BO))
packed.extend(reg_val.val)
return packed
def pack_packet(packet):
if isinstance(packet, UnpackedRegReadRequest):
packet_type = REG_READ_REQUEST
packet_data = bytearray()
packet_data.extend(packet.addr.to_bytes(ADDR_SIZE, BO))
elif isinstance(packet, UnpackedRegWriteRequest):
packet_type = REG_WRITE_REQUEST
packet_data = pack_reg_val(packet.reg_val)
elif isinstance(packet, UnpackedRegReadResponse):
packet_type = REG_READ_RESPONSE
packet_data = pack_reg_val(packet.reg_val)
elif isinstance(packet, UnpackedRegWriteResponse):
packet_type = REG_WRITE_RESPONSE
packet_data = pack_reg_val(packet.reg_val)
else:
raise TypeError("unknown type of packet")
packet_bytes = bytearray()
packet_bytes.append(packet_type)
packet_bytes.extend(packet_data)
return packet_bytes
def extract_packet_from_frame(frame):
if len(frame) < MIN_FRAME_SIZE:
raise ProtocolError("invalid frame (frame too short)")
if frame[0] != START_MARKER:
raise ProtocolError("invalid frame (incorrect start marker)")
if frame[-1] != END_MARKER:
raise ProtocolError("invalid frame (incorrect start marker)")
packet_len = int.from_bytes(frame[1:1+LEN_FIELD_SIZE], BO)
packet = frame[1+LEN_FIELD_SIZE:-1]
if len(packet) - 1 != packet_len:
raise ProtocolError("invalid frame (packet length mismatch)")
return packet
def insert_packet_into_frame(packet):
if not isinstance(packet, bytearray):
packet = pack_packet(packet)
packet_len = len(packet) - 1
frame = bytearray()
frame.append(START_MARKER)
frame.extend(packet_len.to_bytes(LEN_FIELD_SIZE, BO))
frame.extend(packet)
frame.append(END_MARKER)
return frame
def decode_output_buffer(buffer, mode, number_of_subsweeps=None):
mode = get_mode(mode)
if mode == "power_bin":
return np.frombuffer(buffer, dtype="<f4").astype("float")
elif mode == "envelope":
return np.frombuffer(buffer, dtype="<u2").astype("float")
elif mode == "iq":
sweep = np.frombuffer(buffer, dtype="<i2").astype("float") * 2**(-12)
return sweep.reshape((2, -1), order="F").view(dtype="complex").reshape(-1)
elif mode == "sparse":
sweep = np.frombuffer(buffer, dtype="<u2").astype("float")
sweep -= 2**15
sweep = sweep.reshape((number_of_subsweeps, -1))
return sweep
elif mode == "distance_peak_fix_threshold":
sweep = np.frombuffer(buffer, dtype="<f4, <u2")
return sweep.astype("float, float").view("float").reshape((-1, 2))
else:
raise NotImplementedError
``` |
{
"source": "95rade/python-buildpack",
"score": 3
} |
#### File: fixtures/avisa/avisa-demo.py
```python
import argparse
import json
import pprint
import time
import uuid
import requests
AVISA = 'http://10.22.237.210:8080'
AVISA_STATUSES = {1: "New / Not Started",
2: "Started / In Progress",
3: "Completed",
-1: "Error",
0: "Stop test",
4: "No Tests"}
DEBUG = False
VERBOSE = False
QUIET = True
FAILSAFE_TIMEOUTS = {1: 15 * 60, # 15m - to start the test
2: 60 * 60} # 60m - to run the test
FAILURE_THRESHOLD = 0
PP = pprint.PrettyPrinter(indent=4, width=120)
#TODO: Need a logger. :)
class TestManager(object):
def __init__(self, playback_url, duration=120, device_type=None, device_id=None, deployment_id=None):
self.deployment_id = deployment_id
self.device_id = device_id
self.device_type = device_type
self.duration = duration
self.playback_url = playback_url
self.test_case_payload_file = 'android-demo.json'
self.test_id = None
self.test_results = None
self.test_status = None
if not device_type and not device_id:
raise Exception("TestManager requires either a device_type or device_id be specified.")
def __enter__(self):
self.reserve()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def _call_avisa(self, url, payload, method, debug=DEBUG):
r = None
status_code = None
response = None
if debug:
print("=== AVISA CALL START ===")
print("URL: {}".format(url))
print("PAYLOAD: \n{}".format(PP.pformat(payload)))
if method is 'post':
r = requests.post(url, json=payload)
elif method is 'put':
r = requests.put(url, json=payload)
elif method is 'get':
r = requests.get(url, json=payload)
elif method is 'delete':
r = requests.delete(url, json=payload)
status_code = r.status_code
if debug:
print("RESPONSE: {}".format(PP.pformat(r.content.decode())))
print("STATUS: {}".format(r.status_code))
if status_code != 200:
raise Exception("AVISA CALL FAILED!\nMESSAGE:{}\nSTATUS: {}".format(status_code, r.content.decode()))
else:
response = json.loads(r.content.decode())
if debug:
print("=== AVISA CALL END ===")
return status_code, response
def run(self):
with open(self.test_case_payload_file) as f:
test_payload = json.load(f)
test_payload["deployment_id"] = self.deployment_id
test_payload["tests"][0]["steps"][2]["duration"] = self.duration
test_payload["tests"][0]["steps"][2]["data"] = self.playback_url
test_url = '{}/api/tests/'.format(AVISA)
_, content = self._call_avisa(test_url, test_payload, 'post', debug=VERBOSE)
self.test_id = content['tests'][0]['test_id']
if not QUIET:
print("initiating test - test_id: {}".format(self.test_id))
def reserve(self):
if self.deployment_id is None:
self.deployment_id = str(uuid.uuid4())
if self.device_id:
reserve_url = "{}/api/reservations/device/".format(AVISA)
reserve_payload = {"deployment_id": self.deployment_id, "device_id": self.device_id}
if self.device_type:
raise Exception("Running tests by device type is not yet implemented.")
self._call_avisa(reserve_url, reserve_payload, 'post', debug=VERBOSE)
if not QUIET:
print("reservation - deployment_id: {} | device_id: {}".format(self.deployment_id, self.device_id))
def release(self):
release_url = "{}/api/reservations/{}".format(AVISA, self.deployment_id)
release_payload = {}
if not QUIET:
print("releasing device")
self._call_avisa(release_url, release_payload, 'delete', debug=DEBUG)
def status(self):
status_url = "{}/api/tests/status//{}".format(AVISA, self.test_id)
status_payload = {}
_, response = self._call_avisa(status_url, status_payload, 'get', debug=DEBUG)
self.test_status = response['status']
if not QUIET:
print("self.test_status: {} ({})".format(self.test_status, AVISA_STATUSES[self.test_status]))
return self.test_status
def summary_results(self):
results_url = "{}/api/results/{}".format(AVISA, self.test_id)
results_payload = {}
_, response = self._call_avisa(results_url, results_payload, 'get', debug=DEBUG)
self.test_results = response
if not QUIET:
print("self.test_results: {}".format(PP.pformat(self.test_results)))
return self.test_results
def detailed_results(self, rtype, count=None):
results_url = "{}/api/results/{}/{}".format(AVISA, rtype, self.test_id)
results_payload = {}
if count is not None:
results_url = "{}/api/results/{}/{}?count={}".format(AVISA, rtype, self.test_id, count)
results_payload = {'count': count}
_, response = self._call_avisa(results_url, results_payload, 'get', debug=DEBUG)
return response
def get_latest_results(self):
results = self.detailed_results('video', count=1)
results.update(self.detailed_results('audio', count=1))
return results
def set_log_level(debug=DEBUG, verbose=VERBOSE, quiet=QUIET):
global DEBUG
global VERBOSE
global QUIET
if debug:
verbose = True
quiet = False
elif verbose:
debug = False
quiet = False
elif quiet:
debug = False
verbose = False
DEBUG = debug
VERBOSE = verbose
QUIET = quiet
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run a basic playback test with AVISA.')
parser.add_argument('--url', type=str, default='http://10.22.244.94/BBC_WORLD_HD_TVE.m3u8', action='store',
help='The .m3u8 stream url from which to test playback.')
parser.add_argument('--device_id', type=int, default=None, action='store',
help='The specific device_id of the device with which you want to test playback.')
parser.add_argument('--duration', type=int, default=120, action='store',
help='The number of seconds to run the playback test.')
parser.add_argument('--failure_threshold', type=int, default=FAILURE_THRESHOLD, action='store',
help='The number of failures to tolerate before declaring failure and ending the playback.')
parser.add_argument('--device_type', type=str, default=None, action='store',
help='The type of device with which you want to test playback. (Not Implemented)')
parser.add_argument('--deployment_id', type=str, default=None, action='store',
help='The name of this test, as it will be registered in AVISA.')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--quiet', action='store_true')
args = parser.parse_args()
set_log_level(args.debug, args.verbose, args.quiet)
tm = TestManager(playback_url=args.url,
deployment_id=args.deployment_id,
duration=args.duration,
device_type=args.device_type,
device_id=args.device_id)
failures = {'audio': {}, 'video': {}}
with tm:
tm.run()
total_failures = 0
while tm.status() in [1, 2]:
if tm.test_status is 2:
latest_results = tm.get_latest_results()
print(PP.pformat(latest_results))
if 'audio' in latest_results.keys() and latest_results['audio']:
if latest_results['audio'][0]['audio_loudness'] == 0:
print("audio freeze failure detected")
failures['audio'].update({time.time(): "Audio Loudness: 0 - Frozen"})
if 'video' in latest_results.keys() and latest_results['video']:
if latest_results['video'][0]['video_motion'] == 0:
print("video freeze failure detected")
failures['video'].update({time.time(): "Video Motion: 0 - Frozen"})
total_failures = len(failures['audio'].keys()) + len(failures['video'].keys())
if total_failures > args.failure_threshold:
print('Exiting - Too many failures encountered.')
print("failures:\n{}".format(PP.pformat(failures)))
print("total failures: {}".format(total_failures))
exit(1)
time.sleep(5)
print("results: {}".format(PP.pformat(tm.summary_results())))
print("failures:\n{}".format(PP.pformat(failures)))
print("total failures: {}".format(total_failures))
exit(0)
``` |
{
"source": "95subodh/Leetcode",
"score": 3
} |
#### File: 95subodh/Leetcode/018. 4Sum.py
```python
import itertools
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums=sorted(nums)
ans=[]
for i in xrange(len(nums)):
for j in xrange(i+1,len(nums)):
k=j+1
l=len(nums)-1
while k<l:
t=nums[i]+nums[j]+nums[k]+nums[l]
if t<target:
k+=1
elif t>target:
l-=1
else:
ans.append([nums[i],nums[j],nums[k],nums[l]])
k+=1
ans.sort()
return list(ans for ans,_ in itertools.groupby(ans))
```
#### File: 95subodh/Leetcode/058. Length of Last Word.py
```python
class Solution(object):
def lengthOfLastWord(self, s):
"""
:type s: str
:rtype: int
"""
s=map(str,s.split())
return len(s[-1]) if len(s) else 0
```
#### File: 95subodh/Leetcode/063. Unique Paths II.py
```python
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
m,n=len(obstacleGrid),len(obstacleGrid[0])
l=[[0 for i in xrange(n)] for i in xrange(m)]
for i in xrange(m):
for j in xrange(n):
if obstacleGrid[i][j]==0:
if i==0 and j==0:
l[i][j]=1
elif i==0:
l[i][j]=l[i][j-1]
elif j==0:
l[i][j]=l[i-1][j]
else:
l[i][j]+=l[i-1][j]+l[i][j-1]
return l[m-1][n-1]
```
#### File: 95subodh/Leetcode/069. Sqrt(x).py
```python
class Solution(object):
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
return int(x**0.5)
```
#### File: 95subodh/Leetcode/082. Remove Duplicates from Sorted List II.py
```python
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
temp=head
while temp:
z=temp.val
if temp==head and temp.next and temp.next.val==z:
temp2=temp.next
while temp2 and temp2.val==z:
temp2=temp2.next
head=temp=temp2
elif temp.next and temp.next.next and temp.next.val==temp.next.next.val:
temp2=temp.next
z2=temp2.val
while temp2 and temp2.val==z2:
temp2=temp2.next
temp.next=temp2
else:
temp=temp.next
return head
```
#### File: 95subodh/Leetcode/088. Merge Sorted Array.py
```python
class Solution(object):
def merge(self, A, m, B, n):
"""
:type A: List[int]
:type m: int
:type B: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
indA = m-1;
indB = n-1;
while indA >=0 and indB>=0:
if A[indA] > B[indB]:
A[indA+indB+1] = A[indA]
indA -= 1
else:
A[indA+indB+1] = B[indB]
indB -= 1
while indB >= 0:
A[indB] = B[indB]
indB -= 1
```
#### File: 95subodh/Leetcode/103. Binary Tree Zigzag Level Order Traversal.py
```python
class Solution(object):
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
ans=[]
child=[root]
c=0
while child:
temp,temp2=[],[]
for i in child:
if i:
temp.append(i.left)
temp.append(i.right)
temp2.append(i.val)
if temp2:
if c&1:
ans.append(temp2[::-1])
else:
ans.append(temp2)
c+=1
child=temp
return ans
```
#### File: 95subodh/Leetcode/107. Binary Tree Level Order Traversal II.py
```python
class Solution(object):
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
visited,queue=[],[root]
while queue:
temp=[]
child=[]
for i in queue:
temp.append(i.val)
if i.left:
child.append(i.left)
if i.right:
child.append(i.right)
queue=child
visited.append(temp)
return visited[::-1]
```
#### File: 95subodh/Leetcode/108. Convert Sorted Array to Binary Search Tree.py
```python
class Solution(object):
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
def sol(srt,end):
if srt==end:
return None
mid=srt+(end-srt)/2
root=TreeNode(nums[mid])
root.left = sol(srt, mid)
root.right = sol(mid+1, end)
return root
return sol(0, len(nums))
```
#### File: 95subodh/Leetcode/110. Balanced Binary Tree.py
```python
class Solution(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root is None:
return True
def check(l):
if l[0] is None and l[1] is None:
return [True, l[2], l[2]]
elif l[0] is None:
x=check([l[1].left, l[1].right, l[2]+1])
return [max(x[1],x[2])<=l[2]+1 and x[0], l[2], max(x[1],x[2])]
elif l[1] is None:
x=check([l[0].left, l[0].right, l[2]+1])
return [max(x[1],x[2])<=l[2]+1 and x[0], max(x[1],x[2]), l[2]]
x=check([l[0].left, l[0].right, l[2]+1])
y=check([l[1].left, l[1].right, l[2]+1])
return [x[0] and y[0] and abs( max(x[1],x[2]) - max(y[1],y[2]) ) < 2, max(x[1],x[2]), max(y[1],y[2])]
return check([root.left, root.right, 1])[0]
```
#### File: 95subodh/Leetcode/116. Populating Next Right Pointers in Each Node.py
```python
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
stk=[]
if root:
stk=[root]
root=next = None
while stk:
z=stk.pop()
if z.left:
z.left.next=z.right
stk.append(z.left)
if z.right:
if z.next:
z.right.next=z.next.left
else:
z.right.next=None
stk.append(z.right)
```
#### File: 95subodh/Leetcode/122. Best Time to Buy and Sell Stock II.py
```python
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
profit=0
if len(prices):
buy=prices[0]
for i in xrange(1,len(prices)):
if prices[i]<prices[i-1]:
profit+=prices[i-1]-buy
buy=prices[i]
if len(prices):
profit+=prices[-1]-buy
return profit
```
#### File: 95subodh/Leetcode/141. Linked List Cycle.py
```python
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
turt=head
hare=head
while hare and hare.next:
hare=hare.next.next
turt=turt.next
if hare==turt:
return True
return False
```
#### File: 95subodh/Leetcode/151. Reverse Words in a String.py
```python
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
l=list(s.split(' '))
l=filter(None, l)
l= l[::-1]
z=""
for i in xrange(len(l)):
if i>0:
z+=" "
z+=l[i]
return z
```
#### File: 95subodh/Leetcode/152. Maximum Product Subarray.py
```python
class Solution(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def sol(num):
if len(num)==0:
return -pow(10, 18)
s=1
for i in num:
s*=i
if s>0 or len(num)==1:
return s
else:
s1=s2=s
for i in num:
s1/=i
if i<0:
break
for i in num[::-1]:
s2/=i
if i<0:
break
return max(s1,s2)
k=-1
mx=-pow(10, 18)
for i in xrange(len(nums)):
z=mx
if nums[i]==0:
z=max(sol(nums[k+1:i]),0)
k=i
elif i==len(nums)-1:
z=sol(nums[k+1:i+1])
mx=max(mx,z)
return mx
```
#### File: 95subodh/Leetcode/199. Binary Tree Right Side View.py
```python
class Solution(object):
def rightSideView(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
q=[]
visited=[]
if root:
q.append(root)
while q:
qnew=[]
visited.append(q[-1].val)
for i in q:
if i.left:
qnew.append(i.left)
if i.right:
qnew.append(i.right)
q=qnew
return visited
```
#### File: 95subodh/Leetcode/201. Bitwise AND of Numbers Range.py
```python
class Solution(object):
def rangeBitwiseAnd(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
x=pow(2, 31)
y=pow(2, 32)
c=0
while x:
if x<=m and n<y:
m-=x
n-=x
c+=x
x/=2
y/=2
return c
```
#### File: 95subodh/Leetcode/205. Isomorphic Strings.py
```python
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
dict1={}
dict2={}
if len(s)!=len(t):
return False
for i in xrange(len(s)):
if s[i] in dict1 and dict1[s[i]]!=t[i]:
return False
if t[i] in dict2 and dict2[t[i]]!=s[i]:
return False
else:
dict1[s[i]]=t[i]
dict2[t[i]]=s[i]
return True
```
#### File: 95subodh/Leetcode/206. Reverse Linked List.py
```python
Reverse Linked List.py<gh_stars>1-10
#Reverse a singly linked list.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
back=head
curr=None
if back:
curr=head.next
back.next=None
while curr:
forw=curr.next
curr.next=back
back=curr
curr=forw
return back
```
#### File: 95subodh/Leetcode/228. Summary Ranges.py
```python
class Solution(object):
def summaryRanges(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
l=[]
s=""
if nums:
s=str(nums[0])
i=0
while i<len(nums):
j=i+1
while j<len(nums) and nums[j]==nums[j-1]+1:
j+=1
if i!=j-1:
s+='->'+str(nums[j-1])
i=j
l.append(s)
s=""
if i<len(nums):
s=str(nums[i])
if s:
l.append(s)
return l
```
#### File: 95subodh/Leetcode/232. Implement Queue using Stacks.py
```python
class Queue(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.x,self.y=[],[]
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.y.append(x)
def pop(self):
"""
:rtype: nothing
"""
self.peek()
self.x.pop()
def peek(self):
"""
:rtype: int
"""
if not len(self.x):
while len(self.y):
self.x.append(self.y.pop())
return self.x[-1]
def empty(self):
"""
:rtype: bool
"""
return False if len(self.x)+len(self.y) else True
```
#### File: 95subodh/Leetcode/349. Intersection of Two Arrays.py
```python
class Solution(object):
def intersection(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
l=[]
for i in nums1:
if i in nums2:
l.append(i)
return list(set(l))
```
#### File: 95subodh/Leetcode/365. Water and Jug Problem.py
```python
import fractions
class Solution(object):
def canMeasureWater(self, x, y, z):
"""
:type x: int
:type y: int
:type z: int
:rtype: bool
"""
gcd=fractions.gcd(x,y)
return True if x+y>=z and (z==0 or (gcd and z%gcd==0)) else False
```
#### File: 95subodh/Leetcode/367. Valid Perfect Square.py
```python
class Solution(object):
def isPerfectSquare(self, num):
"""
:type num: int
:rtype: bool
"""
return num==int(num**0.5)**2
```
#### File: 95subodh/Leetcode/375. Guess Number Higher or Lower II.py
```python
class Solution(object):
def getMoneyAmount(self, n):
"""
:type n: int
:rtype: int
"""
dp = [[0] * (n + 1) for _ in range(n + 1)]
return self.solve(dp, 1, n)
def solve(self, dp, L, R):
if L >= R: return 0
if dp[L][R]: return dp[L][R]
dp[L][R] = min(i + max(self.solve(dp, L, i - 1), self.solve(dp, i + 1, R)) for i in range(L, R + 1))
return dp[L][R]
```
#### File: 95subodh/Leetcode/389. Find the Difference.py
```python
class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
s=sorted(s)
t=sorted(t)
for i in xrange(len(s)):
if s[i]!=t[i]:
return t[i]
return t[-1]
```
#### File: 95subodh/Leetcode/396. Rotate Function.py
```python
class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
s=sum(A)
ans=0
for i in xrange(len(A)):
ans+=i*A[i]
prev=ans
for i in xrange(len(A)):
prev=prev-s+A[i]*len(A)
ans=max(ans,prev)
return ans
```
#### File: 95subodh/Leetcode/423. Reconstruct Original Digits from English.py
```python
class Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
l=[0 for i in xrange(26)]
for i in s:
l[ord(i)-ord('a')]+=1
z=['zero','one','two','three','four','five','six','seven','eight','nine']
ans=""
xx=[0,8,6,2,4,7,5,1,9,3]
k=0
while k<10:
i=xx[k]
x=len(s)
for j in z[i]:
x=min(x,l[ord(j)-ord('a')])
for j in xrange(x):
ans+=str(i)
for j in z[i]:
l[ord(j)-ord('a')]-=x
k+=1
return "".join(sorted(ans))
``` |
{
"source": "9600dev/mmr",
"score": 2
} |
#### File: mmr/scripts/arctic_rename_symbols.py
```python
import sys
import os
# in order to get __main__ to work, we follow: https://stackoverflow.com/questions/16981921/relative-imports-in-python-3
PACKAGE_PARENT = '../'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import datetime as dt
import click
import random
import os
import logging
from arctic import Arctic, TICK_STORE
from typing import List, Dict, Tuple, Callable, Optional, Set, Generic, TypeVar, cast, Union
from ib_insync import Stock, IB, Contract, Forex, BarData, Future
from trader.listeners.ibrx import IBRx
from trader.data.data_access import Data, TickData
def rename_symbols(csv_file: str,
arctic_server_address: str,
arctic_library: str):
contracts = pd.read_csv(csv_file)
data = TickData(arctic_server_address, arctic_library)
symbol_list: List[str] = data.list_symbols(data.historical) # type: ignore
for index, row in contracts.iterrows():
symbol = row['symbol']
conid = row['conId']
if symbol in symbol_list:
print('getting {} {}'.format(symbol, conid))
result = data.get_data(symbol)
contract = Contract(conId=conid)
data.write(contract, result)
print('deleting {}'.format(symbol))
data.delete(contract)
@click.command()
@click.option('--contract_csv_file', required=False, help='conid csv file')
@click.option('--arctic_server_address', required=False, default='127.0.0.1', help='arctic server address 127.0.0.1')
@click.option('--arctic_library', required=False, default='Historical', help='arctic library to rename')
def main(contract_csv_file: str,
arctic_server_address: str,
arctic_library: str):
rename_symbols(contract_csv_file, arctic_server_address, arctic_library)
if __name__ == '__main__':
logging.disable(logging.CRITICAL)
logger = logging.getLogger('__main__')
logger.propagate = False
main()
```
#### File: mmr/scripts/atws_app_check.py
```python
import sys
import subprocess
from subprocess import Popen, PIPE
def atws_app_check():
process = Popen(['adb', 'shell', 'ps', '|', 'grep', 'atws.app'], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
result = stdout.decode().strip() + stderr.decode().strip()
process.wait()
errcode = process.returncode
return not ('error' in result or len(result) == 0 or errcode > 0)
if __name__ == '__main__':
print(atws_app_check())
```
#### File: mmr/scripts/test_pycron_sync.py
```python
import os
import sys
import time
import random
def main(arg):
rand_int = random.randint(61, 120)
print('test_pycron_sync {} waiting {} secs'.format(arg, str(rand_int)))
time.sleep(rand_int)
if __name__ == '__main__':
main(sys.argv[1])
```
#### File: trader/batch/ib_history_batch.py
```python
import sys
import os
# in order to get __main__ to work, we follow: https://stackoverflow.com/questions/16981921/relative-imports-in-python-3
PACKAGE_PARENT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import datetime as dt
import ib_insync as ibapi
import pandas as pd
import random
import warnings
import asyncio
from redis import Redis
from rq import Queue
from rq.job import Job
from ib_insync.ib import IB
from dateutil.tz import tzlocal
from typing import Tuple, List, Optional, cast
import exchange_calendars
from exchange_calendars import ExchangeCalendar
warnings.simplefilter(action='ignore', category=FutureWarning)
from arctic.exceptions import OverlappingDataException
from arctic.date import DateRange
from trader.data.data_access import TickData, DictData, SecurityDefinition
from trader.data.contract_metadata import ContractMetadata
from trader.data.universe import Universe
from trader.common.logging_helper import setup_logging
from trader.common.helpers import date_range, dateify, day_iter, get_exchange_calendar, pdt
from trader.common.listener_helpers import Helpers
from trader.listeners.ib_history_worker import IBHistoryWorker
from trader.batch.queuer import Queuer
from trader.container import Container
from trader.objects import WhatToShow
logging = setup_logging(module_name='ib_history_batch')
class IBHistoryQueuer(Queuer):
def __init__(
self,
ib_server_address: str,
ib_server_port: int,
arctic_server_address: str,
arctic_library: str,
redis_server_address: str,
redis_server_port: int
):
super().__init__(redis_queue='history',
redis_server_address=redis_server_address,
redis_server_port=redis_server_port)
self.data = TickData(arctic_server_address, arctic_library)
self.ib_server_address = ib_server_address
self.ib_server_port = ib_server_port
self.arctic_server_address = arctic_server_address
self.arctic_library = arctic_library
def queue_history(self,
security_definitions: List[SecurityDefinition],
bar_size: str = '1 min',
start_date: dt.datetime = dateify(dt.datetime.now() - dt.timedelta(days=5), timezone='America/New_York'),
end_date: dt.datetime = dateify(
dt.datetime.now() - dt.timedelta(days=1),
timezone='America/New_York',
make_eod=True
)):
for security in security_definitions:
# find the missing dates between start_date and end_date, and queue them up
exchange_calendar = exchange_calendars.get_calendar(security.primaryExchange)
date_ranges = self.data.missing(security,
exchange_calendar,
date_range=DateRange(start=start_date, end=end_date))
logging.debug('missing dates for {}: {}'.format(security.symbol, date_ranges))
for date_dr in date_ranges:
if (
not self.is_job_queued(self.args_id([security, date_dr.start, date_dr.end, bar_size]))
):
logging.info('enqueing {} from {} to {}'.format(Universe.to_contract(security),
pdt(date_dr.start), pdt(date_dr.end)))
history_worker = BatchIBHistoryWorker(
ib_server_address=self.ib_server_address,
ib_server_port=self.ib_server_port,
ib_client_id=random.randint(10, 100),
arctic_server_address=self.arctic_server_address,
arctic_library=self.arctic_library,
redis_server_address=self.redis_server_address,
redis_server_port=self.redis_server_port)
self.enqueue(history_worker.do_work, [security, date_dr.start, date_dr.end, bar_size])
class BatchIBHistoryWorker():
def __init__(self,
ib_server_address: str,
ib_server_port: int,
ib_client_id: int,
arctic_server_address: str,
arctic_library: str,
redis_server_address: str,
redis_server_port: int):
self.ib_server_address = ib_server_address
self.ib_server_port = ib_server_port
self.ib_client_id = ib_client_id
self.arctic_server_address = arctic_server_address
self.arctic_library = arctic_library
self.redis_server_address = redis_server_address
self.redis_server_port = redis_server_port
self.data: TickData
def do_work(self, security: SecurityDefinition, start_date: dt.datetime, end_date: dt.datetime, bar_size: str) -> bool:
setup_logging(module_name='batch_ib_history_worker', suppress_external_info=True)
# hacky way of reusing ib connections
ib = cast(IB, Container().resolve_cache(IB))
if not ib.isConnected():
ib.connect(host=self.ib_server_address, port=self.ib_server_port, clientId=self.ib_client_id)
self.ib_history = IBHistoryWorker(ib)
self.data = TickData(self.arctic_server_address, self.arctic_library)
logging.info('do_work: {} {} {} {}'.format(security.symbol, pdt(start_date), pdt(end_date), bar_size))
# result = self.ib_history.get_and_populate_stock_history(cast(Stock, contract), bar_size, start_date, end_date)
result = asyncio.run(self.ib_history.get_contract_history(
security=Universe.to_contract(security),
what_to_show=WhatToShow.TRADES,
start_date=start_date,
end_date=end_date,
bar_size=bar_size,
filter_between_dates=True
))
self.data.write(security, result)
return True
```
#### File: trader/batch/non_fork_worker.py
```python
import time
import sys
import random
import datetime
import rq
import rq.job
import rq.compat
import rq.worker
from rq.defaults import (DEFAULT_LOGGING_FORMAT, DEFAULT_LOGGING_DATE_FORMAT)
class NonForkWorker(rq.Worker):
def __init__(self, *args, **kwargs):
if kwargs.get('default_worker_ttl', None) is None:
kwargs['default_worker_ttl'] = 2
super(NonForkWorker, self).__init__(*args, **kwargs)
def work(self, burst=False, logging_level="INFO", date_format=DEFAULT_LOGGING_DATE_FORMAT,
log_format=DEFAULT_LOGGING_FORMAT, max_jobs=None, with_scheduler=False):
self.default_worker_ttl = 2
return super(NonForkWorker, self).work(
burst=burst,
logging_level=logging_level,
date_format=date_format,
log_format=log_format,
max_jobs=max_jobs,
with_scheduler=with_scheduler
)
def execute_job(self, job, queue):
self.main_work_horse(job, queue)
def main_work_horse(self, job, queue):
random.seed()
self._is_horse = True
success = self.perform_job(job, queue)
self._is_horse = False
def perform_job(self, job, queue, heartbeat_ttl=None):
self.prepare_job_execution(job)
self.procline('Processing %s from %s since %s' % (
job.func_name,
job.origin, time.time()))
try:
job.started_at = datetime.datetime.now()
# I have DISABLED the time limit!
rv = job.perform()
# Pickle the result in the same try-except block since we need to
# use the same exc handling when pickling fails
job._result = rv
job._status = rq.job.JobStatus.FINISHED
job.ended_at = datetime.datetime.now()
with self.connection.pipeline() as pipeline:
pipeline.watch(job.dependents_key)
queue.enqueue_dependents(job, pipeline=pipeline)
self.set_current_job_id(None, pipeline=pipeline)
self.increment_successful_job_count(pipeline=pipeline)
result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl != 0:
job.save(pipeline=pipeline, include_meta=False)
job.cleanup(result_ttl, pipeline=pipeline,
remove_from_queue=False)
pipeline.execute()
except:
# Use the public setter here, to immediately update Redis
job.status = rq.job.JobStatus.FAILED
self.handle_exception(job, *sys.exc_info())
return False
if rv is None:
self.log.info('Job OK')
else:
self.log.info('Job OK, result = %s' % (rq.worker.yellow(rq.compat.text_type(rv)),))
if result_ttl == 0:
self.log.info('Result discarded immediately.')
elif result_ttl > 0:
self.log.info('Result is kept for %d seconds.' % result_ttl)
else:
self.log.warning('Result will never expire, clean up result key manually.')
return True
```
#### File: trader/common/reactive.py
```python
from abc import abstractmethod
import aioreactive as rx
import datetime as dt
import asyncio
import pandas as pd
from asyncio import iscoroutinefunction
from aioreactive.types import AsyncObserver, AsyncObservable
from aioreactive.subject import AsyncMultiSubject
from typing import TypeVar, Optional, Callable, Awaitable, Tuple, Generic, Dict, cast, List, Union
from functools import wraps
from eventkit import Event, event
from expression.system.disposable import AsyncDisposable
# With aioreactive you subscribe observers to observables
TSource = TypeVar('TSource')
TResult = TypeVar('TResult')
TKey = TypeVar('TKey')
Any = TypeVar('Any')
async def anoop(value: Optional[Any] = None):
pass
# With aioreactive you subscribe observers to observables
class AsyncCachedObserver(AsyncObserver[TSource]):
def __init__(self,
asend: Callable[[TSource], Awaitable[None]] = anoop,
athrow: Callable[[Exception], Awaitable[None]] = anoop,
aclose: Callable[[], Awaitable[None]] = anoop,
capture_asend_exception: bool = False):
super().__init__()
assert iscoroutinefunction(asend)
self._asend = asend
assert iscoroutinefunction(athrow)
self._athrow = athrow
assert iscoroutinefunction(aclose)
self._aclose = aclose
self._value: Optional[TSource] = None
self._dt: Optional[dt.datetime] = None
self._capture_ex = capture_asend_exception
self._task: asyncio.Event = asyncio.Event()
async def asend(self, value: TSource) -> None:
self._value = value
self._dt = dt.datetime.now()
if self._capture_ex:
try:
self._task.set()
await self._asend(value)
except Exception as ex:
self._task.clear()
await self._athrow(ex)
else:
self._task.set()
await self._asend(value)
async def athrow(self, error: Exception) -> None:
self._task.clear()
await self._athrow(error)
async def aclose(self) -> None:
self._task.clear()
await self._aclose()
def value(self) -> Optional[TSource]:
return self._value
async def wait_value(self) -> TSource:
if self._value:
return self._value
else:
await self._task.wait()
self._task.clear()
return cast(TSource, self._value)
def dt(self) -> Optional[dt.datetime]:
return self._dt
class AsyncCachedObservable(AsyncObservable[TSource]):
@abstractmethod
async def subscribe_async(self, observer: AsyncObserver[TSource]) -> AsyncDisposable:
raise NotImplementedError
@abstractmethod
def value(self) -> Optional[TSource]:
raise NotImplementedError
@abstractmethod
def value_dt(self) -> Optional[Tuple[TSource, dt.datetime]]:
raise NotImplementedError
class AsyncCachedSubject(AsyncMultiSubject[TSource], AsyncCachedObservable[TSource]):
def __init__(self):
super().__init__()
self._value: Optional[TSource] = None
self._datetime: Optional[dt.datetime] = None
self._task: asyncio.Event = asyncio.Event()
async def asend(self, value: TSource) -> None:
self.check_disposed()
if self._is_stopped:
return
self._task.set()
self._value = value
self.datetime = dt.datetime.now()
for obv in list(self._observers):
await obv.asend(self._value)
async def subscribe_async(self, observer: AsyncObserver[TSource]) -> AsyncDisposable:
self.check_disposed()
self._observers.append(observer)
async def dispose() -> None:
if observer in self._observers:
self._observers.remove(observer)
result = AsyncDisposable.create(dispose)
# send the last cached result
if self._value:
await observer.asend(self._value)
return result
def value(self) -> Optional[TSource]:
return self._value
async def wait_value(self) -> TSource:
if self._value:
return self._value
else:
await self._task.wait()
return cast(TSource, self.value)
def value_dt(self) -> Optional[Tuple[TSource, dt.datetime]]:
if self._value and self._datetime:
return (self._value, self._datetime)
else:
return None
class AsyncCachedPandasSubject(AsyncCachedSubject[pd.DataFrame]):
def __init__(self):
super().__init__()
async def asend(self, value: pd.DataFrame) -> None:
self.check_disposed()
if self._is_stopped:
return
self._task.set()
# if self._value is not None:
# self._value = self._value.append(value)
# else:
self._value = value
self.datetime = dt.datetime.now()
for obv in list(self._observers):
await obv.asend(self._value)
class AsyncEventSubject(AsyncCachedSubject[TSource]):
def __init__(self, eventkit_event: Optional[Union[Event, List[Event]]] = None):
super().__init__()
self.eventkit_event: List[Event] = []
if eventkit_event and type(eventkit_event) is list:
self.eventkit_event = eventkit_event
for e in eventkit_event:
e += self.on_eventkit_update
elif eventkit_event and type(eventkit_event) is Event:
self.eventkit_event += [eventkit_event]
e = cast(Event, eventkit_event)
e += self.on_eventkit_update
async def subscribe_to_eventkit_event(self, eventkit: Union[List[Event], Event]) -> None:
if type(eventkit) is Event:
self.eventkit_event += [eventkit]
eventkit = cast(Event, eventkit)
eventkit += self.on_eventkit_update
elif type(eventkit) is list:
for e in eventkit:
e += self.on_eventkit_update
async def call_event_subscriber(self, awaitable_event_subscriber: Awaitable[TSource]) -> None:
result = await awaitable_event_subscriber
# todo this doesn't feel right. I want isinstance(result, TSource) but that doesn't work
if result:
await self.asend(result)
def call_event_subscriber_sync(self, callable_lambda: Callable):
result = callable_lambda()
if result:
test = asyncio.run(self.asend(result))
async def call_cancel_subscription(self, awaitable_canceller: Awaitable):
await awaitable_canceller
await self.aclose()
def call_cancel_subscription_sync(self, callable_lambda: Callable):
callable_lambda()
asyncio.run(self.aclose())
async def on_eventkit_update(self, e: TSource, *args):
await self.asend(e)
def awaitify(sync_func):
@wraps(sync_func)
async def async_func(*args, **kwargs):
return sync_func(*args, **kwargs)
return async_func
# class SubscribeEventHelper(Generic[TKey, TValue]):
# def __init__(self, source: AsyncEventSubject[TValue]):
# self.cache: Dict[TKey, rx.AsyncObservable[TValue]] = {}
# self.source: AsyncEventSubject[TValue] = source
# self.subject = AsyncCachedSubject[TValue]()
# async def subscribe(self, key: TKey,
# source: AsyncEventSubject[TValue],
# filter_function: Callable[[AsyncObservable[TValue]], AsyncObservable[TValue]]
# ) -> rx.AsyncObservable[TValue]:
# if key in self.cache:
# return self.cache[key]
# else:
# # call, then attach a filter to the result
# await source.subscribe_async(self.subject)
# xs = pipe(
# self.subject,
# filter_function
# )
# self.cache[key] = xs
# return self.cache[key]
# async def on_event_update(self, event_update: TValue):
# await self.subject.asend(event_update)
```
#### File: trader/data/contract_metadata.py
```python
from ib_insync.contract import Contract
from typing import Tuple, List, Optional, Dict, TypeVar, Generic, Type, Union, cast
import datetime as dt
class ContractMetadata():
def __init__(self,
contract: Contract,
history_no_data_dates: List[dt.datetime],
history_overlapping_data_dates: List[dt.datetime]):
self.contract = contract
self.history_no_data_dates = history_no_data_dates
self.history_overlapping_data = history_overlapping_data_dates
def to_dict(self):
return vars(self)
def add_no_data(self, date_time: dt.datetime):
self.history_no_data_dates.append(date_time)
def add_overlapping_data(self, date_time: dt.datetime):
self.history_overlapping_data.append(date_time)
def has_been_crawled(self, date_time: dt.datetime):
return date_time in self.history_no_data_dates or date_time in self.history_overlapping_data
def __str__(self):
return '{} {} {}'.format(self.contract, self.history_no_data_dates, self.history_overlapping_data)
```
#### File: trader/data/universe.py
```python
from dataclasses import dataclass, fields
import os
import csv
from types import resolve_bases
import numpy as np
import pandas as pd
import vectorbt as vbt
import datetime as dt
from dateutil.tz import tzlocal, gettz
from dateutil.tz.tz import tzfile
from pandas.core.base import PandasObject
from arctic import Arctic, TICK_STORE, VERSION_STORE
from arctic.tickstore.tickstore import VERSION, TickStore
from arctic.store.version_store import VersionStore
from arctic.exceptions import NoDataFoundException
from typing import Tuple, List, Optional, Dict, TypeVar, Generic, Type, Union, cast, Set
from ib_insync.contract import Contract
from trader.data.data_access import SecurityDefinition
class Universe():
def __init__(self, name: str, security_definitions: List[SecurityDefinition] = []):
self.name: str = name
self.historical_tick_store: str = name
self.security_definitions: List[SecurityDefinition] = security_definitions
@staticmethod
def to_contract(definition: Union[SecurityDefinition, Contract]) -> Contract:
if isinstance(definition, SecurityDefinition):
contract = Contract(secType=definition.secType, conId=definition.conId, symbol=definition.symbol,
currency=definition.currency, exchange=definition.exchange,
primaryExchange=definition.primaryExchange)
return contract
elif isinstance(definition, Contract):
return definition
else:
raise ValueError('unable to cast type to Contract')
def find_contract(self, contract: Contract) -> Optional[SecurityDefinition]:
for definition in self.security_definitions:
if definition.conId == contract.conId:
return definition
return None
class UniverseAccessor():
def __init__(self, arctic_server_address: str, arctic_universe_library: str):
self.arctic_server_address = arctic_server_address
self.arctic_library = arctic_universe_library
self.store = Arctic(self.arctic_server_address)
self.store.initialize_library(self.arctic_library, lib_type=VERSION_STORE)
self.library: VersionStore = self.store[self.arctic_library]
def list_universes(self) -> List[str]:
result = self.library.list_symbols()
# move the portfolio universe to the front
if 'portfolio' in result:
result.remove('portfolio')
result.insert(0, 'portfolio')
return result
def list_universes_count(self) -> Dict[str, int]:
universes = self.get_all()
result = {}
for u in universes:
result[u.name] = len(u.security_definitions)
return result
def get_all(self) -> List[Universe]:
result: List[Universe] = []
for name in self.list_universes():
u = self.get(name)
if u:
result.append(u)
return result
def get(self, name: str) -> Universe:
try:
return self.library.read(name).data
except NoDataFoundException:
return Universe(name)
def find_contract(self, contract: Contract) -> Optional[Universe]:
for universe in self.get_all():
for definition in universe.security_definitions:
if contract.conId == definition.conId:
return universe
return None
def update(self, universe: Universe) -> None:
self.library.write(universe.name, universe)
def delete(self, name: str) -> None:
self.library.delete(name)
def update_from_csv_str(self, name: str, csv_str: str) -> int:
reader = csv.DictReader(csv_str.splitlines())
defs: List[SecurityDefinition] = []
counter = 0
for row in reader:
args = {}
for n in [field.name for field in fields(SecurityDefinition)]:
try:
args[n] = row[n]
except KeyError:
args[n] = ''
security_definition = SecurityDefinition(**args)
defs.append(security_definition)
counter += 1
self.update(Universe(name, defs))
return counter
```
#### File: trader/messaging/messaging_client.py
```python
from re import I, sub
import zmq
import rx
import click
import logging
import coloredlogs
import asyncio
import random
import functools
import json
from asyncio import AbstractEventLoop
from bson import json_util
from rx import operators as ops
from rx import Observable
from rx.subject import Subject
from rx.scheduler import ThreadPoolScheduler, CatchScheduler, CurrentThreadScheduler
from rx.scheduler.periodicscheduler import PeriodicScheduler
from rx.scheduler.eventloop import AsyncIOThreadSafeScheduler
from rx.core.typing import Observer, Scheduler, OnNext, OnError, OnCompleted
from rx.disposable import Disposable
# from zmq.sugar.context import Context
from zmq.sugar.socket import Socket
from zmq.asyncio import Context, Poller
from typing import Optional, Union, Dict
from trader.common.helpers import from_aiter
class MessagingPublisher(Observer):
def __init__(self,
publish_ip_address: str = '127.0.0.1',
publish_port: int = 5001,
loop: AbstractEventLoop = None):
super(Observer, self).__init__()
self.publish_ip_address = publish_ip_address
self.publish_port = publish_port
self.loop = loop
self.publish_context = Context()
self.publish_socket = self.publish_context.socket(zmq.PUB) # type: ignore
self.publish_socket.bind('tcp://{}:{}'.format(self.publish_ip_address, self.publish_port))
def on_next(self, message: Dict):
if not type(message) == dict:
raise ValueError('message should be of type dict')
json_message = json.dumps(message, default=json_util.default)
self.publish_socket.send_string(json_message)
def on_completed(self):
logging.info('MessagingPublisher completed')
def on_error(self, error):
logging.error(error)
class MessagingSubscriber(Observable):
def __init__(self,
subscribe_ip_address: str = '127.0.0.1',
subscribe_port: int = 5002,
loop: AbstractEventLoop = None):
super(Observable, self).__init__()
self.subscribe_ip_address = subscribe_ip_address
self.subscribe_port = subscribe_port
self.loop = loop
self.subscribe_context = Context()
self.subscribe_socket = self.subscribe_context.socket(zmq.SUB) # type: ignore
self.subscribe_socket.connect('tcp://{}:{}'.format(self.subscribe_ip_address, self.subscribe_port))
self.subscribe_socket.setsockopt_string(zmq.SUBSCRIBE, '') # type: ignore
self.finished = False
self.disposable: rx.core.typing.Disposable
async def listen_publisher(self):
while not self.finished:
json_message = await self.subscribe_socket.recv_string()
message = json.loads(json_message, object_hook=json_util.object_hook)
yield message
def subscribe(self,
observer: Optional[Union[Observer, OnNext]] = None,
on_error: Optional[OnError] = None,
on_completed: Optional[OnCompleted] = None,
on_next: Optional[OnNext] = None,
*,
scheduler: Optional[Scheduler] = None) -> rx.core.typing.Disposable:
disposable = from_aiter(self.listen_publisher(), self.loop)
if observer:
self.disposable = disposable.subscribe(observer=observer, scheduler=scheduler)
else:
self.disposable = disposable.subscribe(on_next=on_next,
on_error=on_error,
on_completed=on_completed,
scheduler=scheduler)
return self.disposable
def dispose(self):
self.disposable.dispose()
def test(loop):
async def main(loop):
done = asyncio.Future()
subscriber = MessagingSubscriber(loop=loop)
subscriber.subscribe(on_next=lambda message: print(message))
await done
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
```
#### File: trader/portfolio/test_simulator.py
```python
import datetime as dt
import numpy as np
from life_simulator import Book, LifeSimulator, AssetCash, AssetStock
def test_cash():
simulator = LifeSimulator(dt.datetime(1980, 1, 1), 1000000.0)
book = Book(dt.datetime.now())
book.assets.append(
AssetCash(name='term deposit',
initial_value=1000000.0,
asset_init_date=dt.datetime(2020, 1, 1)))
book = simulator.run_simulation(book,
simulation_start_date=dt.datetime(2020, 1, 1),
simulation_end_date=dt.datetime(2040, 1, 1))
cash = book.assets[0]
assert round(cash.ticks[365].value) == 1027923
assert round(cash.ticks[-1].value) == 1651618
def test_stock():
simulator = LifeSimulator(dt.datetime(1980, 1, 1), 1000000.0)
nw = []
for i in range(0, 50):
book = Book(dt.datetime.now())
book.assets.append(
AssetCash(name='cash',
initial_value=100000.0,
asset_init_date=dt.datetime(2020, 1, 1)))
book.assets.append(
AssetStock(name='stocks',
initial_value=1000000.0,
initial_price=1.0,
asset_init_date=dt.datetime(2020, 1, 1)))
book = simulator.run_simulation(book,
simulation_start_date=dt.datetime(2020, 1, 1),
simulation_end_date=dt.datetime(2040, 1, 1))
nw.append(book.calculate_net_worth())
assert np.average(nw) > 1000000.0
assert np.average(nw) < 100000000.0
```
#### File: trader/scratch/ibrx.py
```python
import datetime
import ib_insync as ibapi
import asyncio
from ib_insync.contract import ContractDescription, ContractDetails
import pandas as pd
import rx
import rx.disposable as disposable
import datetime as dt
import time
import random
import threading
import itertools
import functools
import backoff
from enum import Enum
from typing import List, Dict, Tuple, Callable, Optional, Set, Generic, TypeVar, Union, cast
from asyncio import BaseEventLoop
from rx import operators as ops
from rx.subject import Subject
from rx.scheduler import ThreadPoolScheduler, CatchScheduler, CurrentThreadScheduler
from rx.scheduler.periodicscheduler import PeriodicScheduler
from rx.scheduler.eventloop import AsyncIOThreadSafeScheduler
from rx.core.typing import Observable, Observer, Disposable, Scheduler
from ib_insync import Stock, IB, Contract, Forex, BarData, Future, Position
from ib_insync.util import df
from ib_insync.ticker import Ticker
from trader.common.listener_helpers import Helpers
from trader.common.logging_helper import setup_logging
logging = setup_logging(module_name='ibrx')
class IBRx():
client_id_counter = 2
error_code = 0
def __init__(
self,
ib: Optional[IB] = None,
scheduler: Optional[PeriodicScheduler] = None,
):
# self.event_loop = event_loop
self.scheduler: Optional[PeriodicScheduler] = scheduler
self.connected: bool = False
self.ib: IB
if ib:
self.ib = ib
else:
self.ib = IB()
self.market_data_subject = Subject()
self.polling_loop_subject = Subject()
self.contracts: Dict[int, Observable] = {}
self.ticker_cache: Dict[int, Tuple[dt.datetime, Ticker]] = {}
# try binding helper methods to things we care about
Contract.to_df = Helpers.to_df # type: ignore
def __handle_error(self, reqId, errorCode, errorString, contract):
global error_code
if errorCode == 2104 or errorCode == 2158 or errorCode == 2106:
return
logging.warning('ibrx reqId: {} errorCode {} errorString {} contract {}'.format(reqId,
errorCode,
errorString,
contract))
@backoff.on_exception(backoff.expo, Exception, max_tries=3, max_time=30)
def connect(self,
ib_server_address: str = '127.0.0.1',
ib_server_port: int = 7496,
client_id: Optional[int] = None):
# we often have client_id clashes, so try incrementally updating a static counter
IBRx.client_id_counter += 1
if not client_id:
client_id = IBRx.client_id_counter
if self.__handle_error not in self.ib.errorEvent:
self.ib.errorEvent += self.__handle_error
self.ib.connect(ib_server_address, ib_server_port, clientId=client_id, timeout=9)
self.ib.pendingTickersEvent += self.on_pending_tickers
return self
def on_pending_tickers(self, tickers: Set[Ticker]):
for ticker in tickers:
date_time = dt.datetime.now()
if ticker.contract and Helpers.symbol(ticker.contract) in self.ticker_cache:
self.ticker_cache[Helpers.symbol(ticker.contract)] = (date_time, ticker) # type: ignore
self.market_data_subject.on_next(ticker)
def _filter_ticker(self, contract: Contract, ticker: Ticker) -> bool:
if not ticker.contract:
return False
else:
return ticker.contract.conId == contract.conId
def subscribe_contract(self,
contract: Contract,
transformer: Callable[[Observable], Observable] = Helpers.noop_transformer) -> Observable:
if Helpers.symbol(contract) in self.contracts:
return self.contracts[Helpers.symbol(contract)]
ticker: Ticker = self.ib.reqMktData(contract, '', False, False, None)
obs = self.market_data_subject.pipe(
ops.filter(lambda ticker: self._filter_ticker(contract, ticker)), # type: ignore
transformer
)
self.contracts[Helpers.symbol(contract)] = obs
return obs
def unsubscribe_contract(self, contract: Contract):
ticker = self.ib.reqMktData(contract)
del self.contracts[Helpers.symbol(contract)]
def subscribe_contract_timedelta(self,
contract: Contract,
refresh_period: dt.timedelta,
transformer: Callable[[Observable], Observable] = Helpers.noop_transformer) -> Observable:
def poll_cache(symbol: int) -> None:
(date_time, ticker) = self.ticker_cache[symbol]
self.polling_loop_subject.on_next(ticker)
if self.scheduler:
if Helpers.symbol(contract) not in self.contracts:
self.subscribe_contract(contract)
if Helpers.symbol(contract) not in self.ticker_cache:
self.ticker_cache[Helpers.symbol(contract)] = (dt.datetime.now(), Ticker())
self.scheduler.schedule_periodic(refresh_period, lambda x: poll_cache(Helpers.symbol(contract)))
return self.polling_loop_subject.pipe(
ops.filter(lambda ticker: self._filter_ticker(contract, ticker)) # type: ignore
)
else:
raise ValueError('self.scheduler not set. Set a scheduler to poll periodically')
def get_contract_details(self,
contract: Contract) -> List[ContractDetails]:
result = self.ib.reqContractDetails(contract)
if not result:
return []
else:
return cast(List[ContractDetails], result)
def get_matching_symbols(self, symbol: str) -> List[ContractDescription]:
return self.ib.reqMatchingSymbols(symbol)
def get_fundamental_data_sync(self, contract: Contract, report_type: str):
return self.ib.reqFundamentalData(contract, reportType=report_type)
def get_positions(self) -> List[Position]:
return self.ib.positions()
def get_conid_sync(self,
symbols: Union[str, List[str]],
secType: str = 'STK',
primaryExchange: str = 'SMART',
currency: str = 'USD') -> Union[Optional[Contract], List[Contract]]:
"""
Args:
secType (str): the security type
* 'STK' = Stock (or ETF)
* 'OPT' = Option
* 'FUT' = Future
* 'IND' = Index
* 'FOP' = Futures option
* 'CASH' = Forex pair
* 'CFD' = CFD
* 'BAG' = Combo
* 'WAR' = Warrant
* 'BOND'= Bond
* 'CMDTY'= Commodity
* 'NEWS' = News
* 'FUND'= Mutual fund
"""
def get_conid_helper(symbol: str, secType: str, primaryExchange: str, currency: str) -> Optional[Contract]:
contract_desc: List[ContractDescription] = self.get_matching_symbols(symbol)
f: List[ContractDescription] = []
if len(contract_desc) == 1 and contract_desc[0].contract:
return contract_desc[0].contract
elif len(contract_desc) > 0:
if secType:
f = f + [desc for desc in contract_desc if desc.contract and desc.contract.secType == secType]
if currency:
f = f + [desc for desc in contract_desc if desc.contract and desc.contract.currency == currency]
if len(f) > 0:
return f[0].contract
else:
return None
else:
logging.info('get_conid_helper for {} returned nothing'.format(symbol))
return None
if type(symbols) is list:
result = [get_conid_helper(symbol, secType, primaryExchange, currency) for symbol in symbols]
return [r for r in result if r]
else:
return get_conid_helper(str(symbols), secType, primaryExchange, currency)
def get_contract_history(self,
contract: Contract,
start_date: dt.datetime,
end_date: dt.datetime = dt.datetime.now(),
bar_size_setting: str = '5 secs',
to_pandas: bool = False) -> List[BarData]:
dt = end_date
bars_list = []
while dt >= start_date:
bars = self.ib.reqHistoricalData(
contract,
endDateTime=dt,
durationStr='1 D',
barSizeSetting=bar_size_setting,
whatToShow='MIDPOINT',
useRTH=True,
formatDate=1)
if not bars:
break
for bar in bars:
bars_list.append(bar)
dt = bars[0].date
if to_pandas:
return df(bars_list)
else:
return bars_list
def sleep(self, seconds: float):
self.ib.sleep(seconds)
def run(self):
self.ib.run()
def client(self):
return self.ib
```
#### File: 9600dev/mmr/trader_service.py
```python
import click
import nest_asyncio
nest_asyncio.apply()
import asyncio
from trader.common.logging_helper import setup_logging
logging = setup_logging(module_name='trading_runtime')
from trader.container import Container
from trader.trading.trading_runtime import Trader
from trader.common.helpers import get_network_ip
from trader.messaging.bus_server import start_lightbus
@click.command()
@click.option('--simulation', required=False, default=False, help='load with historical data')
@click.option('--config', required=False, default='/home/trader/mmr/configs/trader.yaml',
help='trader.yaml config file location')
def main(simulation: bool,
config: str):
# required for nested asyncio calls and avoids RuntimeError: This event loop is already running
loop = asyncio.get_event_loop()
if simulation:
raise ValueError('not implemented yet')
# ib_client = HistoricalIB(logger=logging)
container = Container(config)
trader = container.resolve(Trader, simulation=simulation)
trader.connect()
ip_address = get_network_ip()
logging.debug('starting trading_runtime at network address: {}'.format(ip_address))
# setup the lightbus
start_lightbus(container.config()['lightbus_config_file'],
container.config()['lightbus_module'],
loop)
logging.debug('started lightbus successfully')
# start the trader
logging.debug('starting trader run() loop')
trader.run()
asyncio.get_event_loop().run_forever()
if __name__ == '__main__':
main()
```
#### File: trader/trading/book.py
```python
import expression
import pandas as pd
from expression import pipe
from expression.collections import seq, Seq
from ib_insync.objects import Position, PortfolioItem
from ib_insync.contract import Contract
from ib_insync.order import Trade, Order, OrderStatus
from trader.common.logging_helper import setup_logging
from trader.common.helpers import ListHelper
from trader.common.reactive import AsyncCachedSubject, AsyncEventSubject
from eventkit import Event
logging = setup_logging(module_name='book')
from typing import List, Dict, Tuple, Union, cast, Optional
class Book(AsyncEventSubject[Union[Trade, Order]]):
def __init__(self):
self.orders: Dict[int, List[Order]] = {}
self.trades: Dict[int, List[Trade]] = {}
super().__init__()
# we go with order or trade here, because reconnecting with
# the server means we're doing a reqAllOrder call, which returns
# orders only
async def add_update_trade(self, order: Union[Trade, Order]):
logging.debug('updating trade book with {}'.format(order))
if type(order) is Trade:
order = cast(Trade, order)
if order.order.orderId not in self.trades: self.trades[order.order.orderId] = []
if order.order.orderId not in self.orders: self.orders[order.order.orderId] = []
self.trades[order.order.orderId] = [order] + self.trades[order.order.orderId]
self.orders[order.order.orderId] = [order.order] + self.orders[order.order.orderId]
if type(order) is Order:
order = cast(Order, order)
if order.orderId not in self.orders: self.orders[order.orderId] = []
self.orders[order.orderId] = [order] + self.orders[order.orderId]
def get_orders(self) -> Dict[int, List[Order]]:
return self.orders
def get_trades(self) -> Dict[int, List[Trade]]:
return self.trades
def get_trade(self, order_id: int) -> Optional[Trade]:
if order_id in self.trades:
return self.trades[order_id][0]
return None
def get_order(self, order_id: int) -> Optional[Order]:
if order_id in self.orders:
return self.orders[order_id][0]
return None
def get_book(self) -> Tuple[Dict[int, List[Trade]], Dict[int, List[Order]]]:
return (self.trades, self.orders)
async def asend(self, value: Union[Trade, Order]) -> None:
await self.add_update_trade(value)
await super().asend(value)
async def filter_book_by_contract(self, contract: Contract, value: Trade):
return contract.conId == value.contract.conId
```
#### File: trader/trading/portfolio.py
```python
import expression
import itertools
import pandas as pd
from expression import pipe
from expression.collections import seq, Seq
from ib_insync.objects import Position, PortfolioItem
from ib_insync.contract import Contract
from trader.common.logging_helper import setup_logging
from trader.common.helpers import ListHelper
logging = setup_logging(module_name='portfolio')
from typing import List, Dict, Tuple
class Portfolio():
def __init__(self):
self.positions: Dict[Tuple[str, Contract], Position] = {}
self.portfolio_items: Dict[Tuple[str, Contract], PortfolioItem] = {}
def add_position(self, position: Position) -> None:
key = (position.account, position.contract)
if key in self.positions:
logging.debug('updating position {}'.format(position))
self.positions[key] = position
def get_positions(self) -> List[Position]:
return list(self.positions.values())
def add_portfolio_item(self, portfolio_item: PortfolioItem):
key = (portfolio_item.account, portfolio_item.contract)
if key in self.portfolio_items:
logging.debug('updating portfolio item {}'.format(portfolio_item))
self.portfolio_items[key] = portfolio_item
def get_portfolio_items(self) -> List[PortfolioItem]:
return list(self.portfolio_items.values())
``` |
{
"source": "96368a/ityxb_scripts",
"score": 2
} |
#### File: 96368a/ityxb_scripts/API.py
```python
import requests
LoginUrl=r'http://stu.ityxb.com/back/bxg_anon/login'
InfoUrl=r'http://stu.ityxb.com/back/bxg_anon/user/loginInfo'
PointsUrl=r'http://stu.ityxb.com/back/bxg/user/getThreeRedPoints'
UnfinshedUrl=r'http://stu.ityxb.com/back/bxg/user/unfinished'
PreViewUrl=r'http://stu.ityxb.com/back/bxg/preview/info'
PreViewUpdateUrl=r'http://stu.ityxb.com/back/bxg/preview/updateProgress'
QuestionRul=r'http://stu.ityxb.com/back/bxg/preview/questions'
QuestionsUpdateUrl=r'http://stu.ityxb.com/back/bxg/preview/ansQuestions'
QuestionsView=r'http://stu.ityxb.com/back/bxg/preview/viewQuesAnsResult'
cookies = ""
def _Post(url,data={}):
header={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36',
'cookie' : "JSESSIONID=%s;"%cookies
}
return requests.post(url,headers=header,data=data).json()
def GetInfo():
return _Post(InfoUrl)
def GetPoint():
return _Post(PointsUrl)['resultObject']
def GetUnished():
data={
'pageNumber':'1',
'pageSize':'10',
'type':'1'
}
return _Post(UnfinshedUrl,data)['resultObject']
def GetPreviemInfo(id):
data={
'previewId':id
}
return _Post(PreViewUrl,data)['resultObject']
def UpdatePoint(previewId,pointId,s):
data={
'previewId':previewId,
'pointId':pointId,
'watchedDuration':s
}
return _Post(PreViewUpdateUrl,data)
def GetQuestions(previewId,pointId):
data={
'previewId':previewId,
'pointId':pointId,
}
try:
return _Post(QuestionRul,data)['resultObject'][0]['id']
except:
return 0
def UpdateQuestions(previewId,pointId,QuestionId,answer):
data={
'previewId':previewId,
'pointId':pointId,
'preivewQuestionId':QuestionId,
'stuAnswer':answer,
}
a = _Post(QuestionsUpdateUrl,data)
return a;
def GetAnswer(previewId,pointId):
data={
'previewId':previewId,
'pointId':pointId,
}
a = _Post(QuestionsView,data)
return a['resultObject'][0]['answerOriginal']
def LoginCookies(username,password):
data={
'automaticLogon':'false',
'username':username,
'password':password
}
info=requests.post(LoginUrl,data)
return info.cookies['JSESSIONID']
``` |
{
"source": "96bearli/bili-auto-note",
"score": 3
} |
#### File: bili-auto-note/bilibili/bilibili_note_helper.py
```python
import os
import time
import json
import csv
import asyncio
from urllib.parse import urlencode
from typing import Tuple, List
from .timeline import Timeline, TimelineItem
from .video import VideoInfo, VideoPartInfo
from .agent import BilibiliAgent
from .timeline_converter import TimelineConverter
class BilibiliNoteHelper:
@staticmethod
def getVideoPartInfo(cidCount: int, payload: dict) -> VideoPartInfo:
"""从返回的json生成视频分P信息
Args:
cidCount (int): 总分P数量
payload (dict): 分P的json
Returns:
VideoPartInfo: 生成的视频分P信息
"""
cid = payload['cid']
index = payload['page']
title = payload['part']
duration = payload['duration']
return VideoPartInfo(cid, index, cidCount, title, duration)
@staticmethod
def getVideoInfo(payload: dict) -> VideoInfo:
"""生成视频信息
Args:
payload (dict): 视频的json
Returns:
VideoInfo: 生成的视频信息
"""
aid = payload['aid']
pic = payload['pic']
title = payload['title']
cnt = len(payload['pages'])
parts = [BilibiliNoteHelper.getVideoPartInfo(cnt, part_payload) for part_payload in payload['pages']]
return VideoInfo(aid, pic, title, parts)
@staticmethod
async def sendNote(
timeline: Timeline, agent: BilibiliAgent,
bvid: str, offsets: List[int],
cover: str, publish: bool,
confirmed: bool = False,
previousPartCollection: List[str] = [],
ignoreThreshold: int = 600,
danmakuOffsets: List[int] = [],
autoComment: bool = True,
output: str = ''
) -> List[str]:
"""发送笔记
Args:
timeline (Timeline): 参考时间轴
agent (BilibiliAgent): 用于发送的账号
bvid (str): 目标视频BV号
offsets (list[int]): 每个分P的开场偏移
cover (str): 发送到评论区的字符串
publish (bool): 是否直接发布
confirmed (bool): 发布前是否不用二次确认, 默认为False
previousPartCollection (list[int]): 前一次发布的视频分P信息, 默认为空
ignoreThreshold (int): 时间短于此值的分P将被忽略(秒), 默认为10分钟
danmakuOffsets(list[int]): 弹幕版每个分P的开场偏移
output(str): 输出文本轴路径
Returns:
List[str]: 如果发布成功,返回新的视频分P信息
"""
# 获取视频信息
video_info_res = await agent.get(
"https://api.bilibili.com/x/web-interface/view",
params={
"bvid": bvid
})
video_info = BilibiliNoteHelper.getVideoInfo(video_info_res)
part_collection = [part.cid for part in video_info.parts]
if previousPartCollection == part_collection:
# 分P数量没有发生变化
return part_collection
await asyncio.sleep(1)
# 获取笔记状态
note_res = await agent.get(
"https://api.bilibili.com/x/note/list/archive",
params={
"oid": video_info.aid
})
note_id = ''
if not note_res['noteIds']:
await asyncio.sleep(1)
# 没有笔记,插入一个新的空笔记以获取ID
note_add_res = await agent.post(
"https://api.bilibili.com/x/note/add",
data={
"oid": video_info.aid,
"csrf": agent.csrf,
"title": video_info.title,
"summary": " "
})
note_id = note_add_res['note_id']
else:
note_id = note_res['noteIds'][0]
# 发布笔记
# 检查偏移量和分P数是否一致
if not confirmed:
print('请确认以下信息是否准确(自动监控模式下本提示只会出现一次)')
print(f' 视频名: {video_info.title}')
print(' 配置: '+('笔记会自动发布' if publish else '笔记不会自动发布, 请在脚本执行完毕后进入视频笔记区手动发布'))
if publish:
print(f' 自动发布的评论内容: \n{cover}')
if output:
print(f' 更新笔记时文本轴将同步保存于 {output}')
if len(offsets) + len(danmakuOffsets) != len(video_info.parts):
print(f' 注意: 偏移量{offsets}, {danmakuOffsets} 总数量和视频分段数量({len(video_info.parts)})不一致!')
if not confirmed:
command = input('请确认以上信息准确。是否执行?[y/n]')
if command != 'Y' and command != 'y':
return []
current_timestamp = 0
current_danmaku_timestamp = 0
video_part_index = 0
video_part_danmaku_index = 0
submit_obj = []
submit_len = 0
txt_timeline = ''
# 插入每个分P的轴
for video_part in video_info.parts:
# 自动忽略过短的视频(一般是用来垫的视频,不会对应到offsets序列)
if video_part.duration < ignoreThreshold:
continue
offset = 0
if not danmakuOffsets:
# 所有分P统一由offsets管理
if len(offsets) == 0 or video_part_index >= len(offsets):
raw_offset = 'auto'
else:
raw_offset = offsets[video_part_index]
video_part_index += 1
if isinstance(raw_offset, int):
offset = raw_offset
current_timestamp = offset + video_part.duration
elif raw_offset == 'auto':
offset = current_timestamp
current_timestamp = offset + video_part.duration
else:
continue
else:
# 分P分别由offsets和danmakuOffsets决定
if '弹幕' in video_part.title and '无弹幕' not in video_part.title:
# 这是一个弹幕视频
if len(danmakuOffsets) == 0 or video_part_danmaku_index >= len(danmakuOffsets):
raw_offset = 'auto'
else:
raw_offset = danmakuOffsets[video_part_danmaku_index]
video_part_danmaku_index += 1
if isinstance(raw_offset, int):
offset = raw_offset
current_danmaku_timestamp = offset + video_part.duration
elif raw_offset == 'auto':
offset = current_danmaku_timestamp
current_danmaku_timestamp = offset + video_part.duration
else:
continue
else:
# 这是一个无弹幕视频
if len(offsets) == 0 or video_part_index >= len(offsets):
raw_offset = 'auto'
else:
raw_offset = offsets[video_part_index]
video_part_index += 1
if isinstance(raw_offset, int):
offset = raw_offset
current_timestamp = offset + video_part.duration
elif raw_offset == 'auto':
offset = current_timestamp
current_timestamp = offset + video_part.duration
else:
continue
# 从原始时间轴中切出分p时间轴
part_timeline = timeline.clip(offset, video_part.duration)
if len(part_timeline.items) == 0:
continue
txt_timeline += (video_part.title)
txt_timeline += '\n'
txt_timeline += str(part_timeline)
txt_timeline += '\n\n'
(timeline_obj, timeline_len) = TimelineConverter.getTimelineJson(part_timeline, video_part)
submit_obj.extend(timeline_obj)
submit_len += timeline_len
if not submit_obj:
print('没有可用的笔记内容')
return part_collection
if output:
# 将文本轴存储在文件中
try:
with open(output, "w", encoding="utf-8") as f:
f.write(txt_timeline)
except Exception as e:
print('文本轴写入失败,错误原因:')
print(e)
submit_obj_str = json.dumps(submit_obj, indent=None, ensure_ascii=False, separators=(',', ':'))
data = {
"oid": video_info.aid,
"note_id": note_id,
"title": video_info.title,
"summary": cover,
"content": submit_obj_str,
"csrf": agent.csrf,
"cont_len": submit_len,
"hash": str(round(time.time()*1000)),
"publish": 1 if publish else 0,
"auto_comment": 1 if (publish and autoComment) else 0
}
await asyncio.sleep(1)
submit_res = await agent.post("https://api.bilibili.com/x/note/add", data=data)
if submit_res['note_id']:
print(f'执行成功,笔记ID为:{submit_res["note_id"]}')
return part_collection
else:
print(f'执行失败,返回值为{submit_res}')
return []
```
#### File: bili-auto-note/bilibili/timeline.py
```python
from enum import Enum, unique
from typing import Iterator, List
@unique
class TimelineType(Enum):
NORMAL = ''
SONG = 'song'
DANCE = 'dance'
@unique
class Member(Enum):
AVA = 'a'
BELLA = 'b'
CAROL = 'c'
DIANA = 'd'
EILEEN = 'e'
class TimelineItem:
def __init__(self,
sec: int, tag: str, highlight: bool = False,
type: TimelineType = TimelineType.NORMAL, members: List[Member] = []) -> None:
"""时间轴条目
Args:
sec (int): 条目秒数
tag (str): 条目内容
highlight (bool, optional): 是否高亮显示 Defaults to False.
type (TimelineType, optional): 条目特殊类型,如歌舞等 Defaults to TimelineType.NORMAL.
members (list[Member], optional): 特殊条目参与的成员 Defaults to [].
"""
self.sec = sec
self.tag = tag.strip()
self.highlight = highlight
self.type = type
self.members = members
def shift(self, delta: int) -> 'TimelineItem':
"""生成调整后的时间轴条目
Args:
delta (int): 调整的时间偏移量
Returns:
TimelineItem: 新生成的时间轴条目
"""
return TimelineItem(self.sec + delta, self.tag, self.highlight, self.type, self.members)
def __str__(self) -> str:
m, s = divmod(self.sec, 60)
h, m = divmod(m, 60)
if h == 0:
time = "%02d:%02d" % (m, s)
else:
time = "%d:%02d:%02d" % (h, m, s)
return time + ' ' + self.tag
class Timeline:
def __init__(self, items: List[TimelineItem]) -> None:
"""生成时间轴
Args:
items (list[TimelineItem]): 时间轴条目
"""
self.items = items
self.items.sort(key=lambda item: item.sec)
def __iter__(self) -> Iterator[TimelineItem]:
return iter(self.items)
def __add__(self, other: 'Timeline') -> 'Timeline':
return Timeline(self.items + other.items)
def shift(self, delta: int) -> 'Timeline':
"""生成调整后的时间轴
Args:
delta (int): 调整的时间偏移量
Returns:
Timeline: 新生成的时间轴
"""
return Timeline([item.shift(delta) for item in self.items])
def clip(self, start: int, length: int) -> 'Timeline':
"""生成时间轴切片
Args:
start (int): 切片开始对应的时刻(秒)
length (int): 切片总长度
Returns:
Timeline: 适配于切片的时间轴(0表示切片开始时刻)
"""
return Timeline([item.shift(-start) for item in self.items if item.sec >= start and item.sec <= start + length])
def __str__(self) -> str:
return '\n'.join(map(str, self.items))
```
#### File: bili-auto-note/bilibili/video.py
```python
from typing import List
class VideoPartInfo:
def __init__(self, cid: int, index: int, cidCount: int, title: str, duration: int) -> None:
"""视频分P信息
Args:
cid (int): 唯一标识符
index (int): 分P序号(从1开始)
cidCount (int): 总共的分P数量
title (str): 分P标题
"""
self.cid = cid
self.index = index
self.cidCount = cidCount
self.title = title
self.duration = duration
class VideoInfo:
def __init__(self, aid: int, pic: str, title: str, parts: List[VideoPartInfo]) -> None:
"""视频信息
Args:
aid (int): 视频AV号
pic (str): 视频封面图
title (str): 视频标题
parts (list[VideoPartInfo]): 每个分P的信息
"""
self.aid = aid
self.pic = pic
self.title = title
self.parts = parts
``` |
{
"source": "96bearli/biliup_record",
"score": 3
} |
#### File: 96bearli/biliup_record/main_get.py
```python
import asyncio
import json
import os
import sys
import time
import httpx
from loguru import logger
headers = {"accept-encoding": "gzip", # gzip压缩编码 能提高传输文件速率
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.46', }
cookies = {}
# 创建文件夹
def path_creat(_path):
if not os.path.exists(_path):
os.mkdir(_path)
return _path
def save_data(path, data, encoding="utf-8"):
with open(path, "a+", encoding=encoding) as f:
f.write(f"{data}\n")
def save_img(path, name, data):
with open(path + name, "wb") as f:
f.write(data)
async def download(url: str):
name = url.split("/")[-1]
if name == '':
logger.error(f"* 问题url:{url}")
return 0
logger.info(f"downloading {name}")
if "?" in name:
name = name.split("?")[0]
try:
async with httpx.AsyncClient(headers=headers, cookies=cookies, timeout=10) as client:
req = await client.get(url)
except Exception as e:
# print(e)
logger.error(f"* Error:Download_failed:{url},check file'{paths[1] + 'Img_failed.txt'}")
with open(paths[1] + "Img_failed.txt", "a+", encoding="utf-8") as f:
f.write(f"{url}\n")
return 0
save_img(paths[1], name, req.content)
return True
def get_content(s, url: str):
data = s.get(url).json()
offset = data['data']['next_offset']
more = data['data']['has_more']
return data, offset, more
async def main(uid, d_img: bool):
more = 1
count = 1
offset = 0
# offset = 634424114892767289
s = httpx.Client(headers=headers, cookies=cookies)
while more == 1:
url = f'https://api.vc.bilibili.com/dynamic_svr/v1/dynamic_svr/space_history?visitor_uid=283764718&host_uid={uid}&offset_dynamic_id={offset}&need_top=1&platform=web'
logger.info(f"第{count}页动态爬取中:{url}")
count += 1
tasks = []
try:
data_json, offset, more = get_content(s, url=url)
except Exception as e:
# logger.error(f"{e}")
logger.info("* 网络请求错误,5秒后重试一次")
time.sleep(5)
data_json, offset, more = get_content(s, url=url)
# print(data_json)
# exit()
if more == 0:
break
for dict_da in data_json['data']['cards']:
key = dict_da['card'].split('"')[1]
dict_data = json.loads(dict_da['card'])
# print(dict_data)
save_data(paths[0] + "data.txt", dict_data)
num = data_json['data']['cards'].index(dict_da)
if key == "item": # 图片动态
upload_time = dict_data['item']['upload_time'] # int
content = dict_data['item']['description'].replace("\n", "\\n").replace("\r", "").replace(",",
",").replace(
"'", "‘").replace('"', '”') # str
pic_urls = [d["img_src"] for d in dict_data['item']['pictures']] # list
# id_ = dict_data['item']['id'] # int
id_ = data_json['data']['cards'][num]['desc']["dynamic_id"] # int
other = "img"
elif key == "user": # 文字动态 or 转发 /data/cards/0/desc/dynamic_id
try:
origin = json.loads(dict_data["origin"])
try: # 转发动态
upload_time = data_json['data']['cards']['item']['timestamp']
except Exception: # 转发av
# logger.warning(f"{Exception}")
upload_time = data_json['data']['cards'][num]['desc'][
"timestamp"] # int /data/cards/0/desc/timestamp
# if upload_time == 0:
# upload_time = data_json['data']['cards']['item']['timestamp']
try:
content = f"评论:{dict_data['item']['content']}\n\r转发视频[{origin['title']}](https://b23.tv/av{origin['aid']})".replace(
"\r", "").replace("\n", "\\n").replace(",", ",").replace("'", "‘").replace('"', '”') # str
pic_urls = [] # list
except KeyError: # /item/orig_dy_id /item/description
# print(KeyError)
try:
content = f"评论:{dict_data['item']['content']}\n\r转发动态[{dict_data['origin_user']['info']['uname']} UID:{dict_data['origin_user']['info']['uid']}动态](https://t.bilibili.com/{dict_data['item']['orig_dy_id']})\n\r内容:{origin['item']['content']}".replace(
"\r", "").replace("\n", "\\n").replace(",", ",").replace("'", "‘").replace('"', '”') # str
pic_urls = [] # list
except:
content = f"评论:{dict_data['item']['content']}\n\r转发带图动态[{dict_data['origin_user']['info']['uname']} UID:{dict_data['origin_user']['info']['uid']}动态](https://t.bilibili.com/{dict_data['item']['orig_dy_id']})\n\r内容:{origin['item']['description']}".replace(
"\r", "").replace("\n", "\\n").replace(",", ",").replace("'", "‘").replace('"', '”') # str
pic_urls = [] # 转发带图动态的图就不下了,太多了
# pic_urls = [imd["img_src"] for imd in origin['item']['pictures']] # list /item/pictures/0/img_src
id_ = data_json['data']['cards'][num]['desc']["dynamic_id"] # int
other = "reprint"
except Exception as e:
# logger.warning(f"{e}")
upload_time = dict_data['item']['timestamp'] # int /item/upload_time
content = dict_data['item']['content'].replace("\n", "\\n").replace("\r", "").replace(",",
",").replace(
"'", "‘").replace('"', '”') # str
pic_urls = [] # list
id_ = data_json['data']['cards'][num]['desc']["dynamic_id"] # int
other = "text"
elif key == "aid": # 视频投稿
upload_time = dict_data['pubdate'] # int
content = dict_data['dynamic'].replace("\n", "\\n").replace("\r", "").replace(",", ",").replace("'",
"‘").replace(
'"', '”') # str
pic_urls = [dict_data["pic"]] # list
id_ = dict_data['aid'] # int
other = "av"
elif key == "id": # 专栏投稿
try:
upload_time = dict_data['publish_time'] # int
content = dict_data['title'].replace("\n", "\\n").replace("\r", "").replace(",", ",").replace("'",
"‘").replace(
'"', '”') # str
pic_urls = [dict_data["banner_url"]] # list
id_ = dict_data['id'] # int
other = "cv"
except:
upload_time = dict_data['ctime'] # int
content = (dict_data['title'] + "\n" + dict_data['intro']).replace("\r", "").replace("\n",
"\\n").replace(
",", ",").replace("'", "‘").replace('"', '”') # str
pic_urls = [dict_data["cover"]] # list
id_ = dict_data['id'] # int
other = "au"
elif key == "rid":
upload_time = 0 # int
content = dict_data['vest']['content'].replace("\n", "\\n").replace("\r", "").replace(",", ",").replace(
"'", "‘").replace('"', '”') # str
pic_urls = [] # list
id_ = dict_data['rid'] # int
other = "decorate"
else:
logger.debug(" * 注意本条数据 *")
logger.debug(dict_data)
logger.debug(url)
time.sleep(5)
continue
# print(f"{upload_time},{content},{pic_urls},{id_},{other}") # exit()
for p_u in pic_urls:
if p_u == "":
pic_urls.remove(p_u)
save_data(paths[0] + "data.csv", f"{upload_time},{content},{'&'.join(pic_urls)},{id_},{other}",
encoding="utf-8-sig")
for p_u in pic_urls:
tasks.append(download(p_u))
if d_img:
result = await asyncio.gather(*tasks)
if __name__ == '__main__':
# 目标uids
uids = [351609538]
# 是否下载图片
img = False
path_creat("./data")
logger.add(sink=f"./data/log_get_{int(time.time())}.log", format="{level} - {time} - {message}", level="DEBUG",encoding="utf-8")
logger.info("日志记录开始")
if len(sys.argv) != 1:
uids = [uid for uid in sys.argv[1].split(",")]
if len(sys.argv) == 3:
img = bool(int(sys.argv[-1]))
else:
img = False
logger.success(f"* 已获取参数参数 <目标UID:{uids},下载图片:{img}>")
else:
logger.success(f"* 使用内置参数 <目标UID:{uids},下载图片:{img}>")
print('''* Usage: python main_get.py <",".join(uids)> <download_img?>
example1: python main_get.py 1111,22222,333333 0 #uids:[1111,22222,333333] download_img?:False
example2: python main_get.py 1111 #uids:[1111] download_img?:False''')
path_creat("./data")
time.sleep(1.5)
# 时间戳转换 Fx = "=(A1+8*3600)/86400+70*365+19"
for uid in uids:
logger.info(f"* 正在进行准备工作,当前目标UID:{uid}")
paths = [f"./data/{uid}/", f"./data/{uid}/img/"]
for p in paths:
path_creat(p)
for i in ["data.txt", "data.csv"]:
with open(paths[0] + i, "w", encoding="utf-8")as f:
pass
asyncio.run(main(uid=uid, d_img=img))
logger.success(f"* 目标UID:{uid}已完成爬取\n")
time.sleep(1.5)
``` |
{
"source": "96chh/chat-room",
"score": 3
} |
#### File: 96chh/chat-room/server.py
```python
import socket
import select
import sqlite3
def broadcast(sock, message):
for socket in sockets:
if socket != server_socket and socket != sock:
try:
socket.send(message.encode('utf-8'))
except Exception as e: # 客户端意外退出会出现此异常
print(str(e) + 'aa')
socket.close()
sockets.remove(socket)
if __name__ == "__main__":
# 数据库用来存放用户账号密码
conn = sqlite3.connect("accounts.db")
cs = conn.cursor()
cs.execute(
"""
create table if not exists accounts (
account varchar(20) primary key,
psw varchar(10)
)
"""
)
sockets = []
online = []
sock_account = {}
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(("0.0.0.0", 6666))
server_socket.listen()
sockets.append(server_socket)
print("聊天室服务器启动,端口 6666")
while True:
# 通过 select 模型,获取状态变化的 socket
read_sockets, write_sockets, error_sockets = select.select(sockets, [], [])
for sock in read_sockets:
if sock == server_socket: # 新连接
sockfd, addr = server_socket.accept()
sockets.append(sockfd)
else: # 某个客户端发信息
try:
data = sock.recv(1024).decode('utf-8')
test = data.split('/')
if test[0] == 'reg': # 用户在注册,发过来是的用户名和密码
cs.execute("select * from accounts")
result = cs.fetchall()
for i in result:
if i[0] == test[1]: # 用户名重复
sock.send('fail_reg'.encode('utf-8'))
break
else:
cs.execute(
"insert into accounts (account, psw) values (?, ?)",
(test[1], test[2]),
)
conn.commit()
sock.send('true'.encode('utf-8'))
account = test[1]
sock_account[sock] = account
online.append(account)
print('【%s】注册成功!' % account)
broadcast(sock, "\r【服务器】 %s 进入聊天室\n" % account)
elif test[0] == 'login': # 用户在登录,发过来的是用户名和密码
cs.execute("select * from accounts")
result = cs.fetchall()
if (test[1], test[2]) in result:
if test[1] in online:
sock.send('logon'.encode('utf-8'))
continue
sock.send('true'.encode('utf-8'))
account = test[1]
sock_account[sock] = account
online.append(account)
print("【%s】登陆成功!" % account)
broadcast(sock, "\r【服务器】 %s 进入聊天室\n" % account)
else:
sock.send('fail_login'.encode('utf-8'))
elif data[-5:] == 'show\n': # 显示在线用户
test = ''
for i in online:
test += i + ' '
sock.send("\r【服务器】 在线用户:{}\n".format(test).encode('utf-8'))
elif test[0] == 'change': # 修改用户名、密码
old_account = sock_account[sock]
cs.execute("delete from accounts where account=?", (old_account,))
conn.commit()
sock_account[sock] = test[1]
online.remove(old_account)
cs.execute("select * from accounts")
result = cs.fetchall()
for i in result:
if i[0] == test[1]: # 用户名重复
sock.send('fail_reg'.encode('utf-8'))
break
else:
cs.execute(
"insert into accounts (account, psw) values (?, ?)",
(test[1], test[2]),
)
conn.commit()
sock.send('true'.encode('utf-8'))
online.append(sock_account[sock])
print('{}更名为{}成功!'.format(old_account, sock_account[sock]))
broadcast(sock, "\r【服务器】 {}已更名为{}\n".format(old_account, sock_account[sock]))
else: # 用户发信息
broadcast(sock, data)
except Exception as e:
# Windows 下,有时突然关闭客户端,会抛出 "Connection reset by peer" 异常
broadcast(sock, ("\r【服务器】 %s 已离线!\n" % sock_account[sock]))
online.remove(sock_account[sock])
print("【%s】已离线" % sock_account[sock])
sock.close()
sockets.remove(sock)
del sock_account[sock]
continue
server_socket.close()
``` |
{
"source": "96chh/crawl2pdf",
"score": 3
} |
#### File: 96chh/crawl2pdf/lxf.py
```python
import requests
from bs4 import BeautifulSoup
import pdfkit
from concurrent.futures import ThreadPoolExecutor
import os
import re
import proxy
domain = "https://www.liaoxuefeng.com"
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWeb"
"Kit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"
}
html_template = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
</head>
<body>
{body}
</body>
</html>
"""
count = 0
retry = 0
ban = 0
def parse_url():
python = "/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000"
rsp = requests.get(domain + python, headers=headers)
soup = BeautifulSoup(rsp.text, "html.parser")
divs = soup.find_all(depth=re.compile("\d"))
urls = []
for div in divs:
urls.append(domain + div.a.get("href"))
return urls
def parse_body(url):
global retry
try:
rsp = requests.get(url, headers=headers, proxies=proxy.get(), timeout=15)
except Exception:
retry += 1
print("重爬次数:{}".format(retry))
return parse_body(url)
if rsp.status_code == 503:
global ban
ban += 1
print("封IP次数:{}".format(ban))
return parse_body(url)
soup = BeautifulSoup(rsp.text, "html.parser")
title = soup.find_all("h4")[-1].text
body = soup.find(class_="x-wiki-content x-main-content")
# 标题居中,h1 样式,添加到 body
center_tag = soup.new_tag("center")
title_tag = soup.new_tag("h1")
title_tag.string = title
center_tag.insert(0, title_tag)
body.insert(0, center_tag)
html = html_template.format(body=str(body))
global count
count += 1
print("爬取页面成功数:{}".format(count))
html = html.replace("data-src", "src")
return html
def make_pdf(htmls):
html_files = []
for index, html in enumerate(htmls):
file = str(index) + ".html"
html_files.append(file)
with open(file, "w", encoding="utf-8") as f:
f.write(html)
options = {
"page-size": "Letter",
"margin-top": "0.75in",
"margin-right": "0.75in",
"margin-bottom": "0.75in",
"margin-left": "0.75in",
"encoding": "UTF-8",
"custom-header": [("Accept-Encoding", "gzip")],
"cookie": [
("cookie-name1", "cookie-value1"), ("cookie-name2", "cookie-value2")
],
"outline-depth": 10,
}
try:
pdfkit.from_file(html_files, "python.pdf", options=options)
except Exception:
pass
for file in html_files:
os.remove(file)
print("已制作电子书 python.pdf 在当前目录!")
if __name__ == "__main__":
urls = parse_url()
with ThreadPoolExecutor(max_workers=16) as executor:
htmls = executor.map(parse_body, urls)
make_pdf(htmls)
``` |
{
"source": "96chh/gdut-check-results",
"score": 3
} |
#### File: 96chh/gdut-check-results/login.py
```python
import requests
from bs4 import BeautifulSoup
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWeb'
'Kit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.170 Safari/537.36',
}
ss = requests.Session()
def sso():
url = 'http://authserver.gdut.edu.cn/authserver/login'
payload = {'service': 'http://jxfw.gdut.edu.cn/new/ssoLogin'}
rsp = ss.get(url, params=payload, headers=headers)
soup = BeautifulSoup(rsp.text, 'html.parser')
try:
lt = soup.form.find_all('input')[2].get('value')
except Exception:
return
execution = soup.form.find_all('input')[4].get('value')
data = {
'username': 'xxxx',
'password': '<PASSWORD>',
'lt': lt,
'dllt': 'userNamePasswordLogin',
'execution': execution,
'_eventId': 'submit',
'rmShown': '1',
}
ss.post(url, params=payload, headers=headers, data=data)
if __name__ == '__main__':
sso()
``` |
{
"source": "96Gabriel/BIR-Bar-Inventory-Register",
"score": 3
} |
#### File: 96Gabriel/BIR-Bar-Inventory-Register/db_operations.py
```python
import mysql.connector
#Error for wrong length
class BarcodeLengthError(Exception):
pass
#Functions for operations
def insertProduct(my_cursor, productName, barcode, price):
try:
if len(barcode) != 13:
raise BarcodeLengthError('Barcode length is not correct')
insertQuery = 'INSERT INTO PRODUCTS VALUES(%s, %s, %s)'
productValues = (barcode, productName, price)
my_cursor.execute(insertQuery, productValues)
return 'Item successfully stored in the database'
except (mysql.connector.Error, BarcodeLengthError) as err:
return f'Something went wrong: {err}'
def searchProduct(my_cursor, barcode):
try:
selectQuery = 'SELECT PRICE FROM PRODUCTS WHERE BARCODE = %s'
my_cursor.execute(selectQuery, (barcode,))
return my_cursor.fetchone()[0]
except TypeError:
return None
def checkChange(amount, price):
change = amount - float(price)
return change
def removeProduct(my_cursor, barcode):
removeQuery = 'DELETE FROM PRODUCTS WHERE BARCODE = %s'
my_cursor.execute(removeQuery, (barcode,))
``` |
{
"source": "96jhwei/Genetic-U-Net",
"score": 4
} |
#### File: code/metrics/average_meter.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class AverageMeter(object):
"""
Class to be an average meter
"""
def __init__(self):
self.current_value = 0
self.average_value = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
"""
Reset average meter.
"""
self.current_value = 0
self.average_value = 0
self.sum = 0
self.count = 0
def update(self, current_value, increment=1):
"""
Update average meter by given current value and number of increment.
"""
self.current_value = current_value
self.sum += current_value * increment
self.count += increment
self.average_value = self.sum / self.count
@property
def val(self):
"""
Return average value of the average meter
"""
return self.average_value
```
#### File: code/metrics/pr_curve.py
```python
from sklearn.metrics import precision_recall_curve
import sys
sys.path.append('../')
from util.numpy_utils import flatten_tensor
def get_pr_curve(preds, targets):
"""
Get precision recall curve
Arguments:
preds(torch tensor): raw probability outputs
targets(torch tensor): ground truth
Returns:
precisions
recalls
thresholds
"""
preds, targets = list(map(flatten_tensor, [preds, targets]))
precisions, recalls, thresholds = precision_recall_curve(
y_true=targets,
probas_pred=preds,
pos_label=None,
sample_weight=None
)
return precisions, recalls, thresholds
```
#### File: code/train/train_model.py
```python
import numpy
from torch.utils.data import DataLoader
from tqdm import tqdm
from loss.FocalLoss import FocalLossForSigmoid
import torch
from metrics.calculate_metrics import calculate_metrics
import shutil
from metrics.average_meter import AverageMeter
import torch.multiprocessing
from torch.nn.utils.clip_grad import clip_grad_norm_
import os
import sys
import numpy as np
import random
from thop import profile
from .util.get_optimizer import get_optimizer
from dataset.util.get_datasets import get_datasets
import multiprocessing as mp
sys.path.append('../')
def train_one_model(optimizer_name, learning_rate, l2_weight_decay, gen_num, ind_num, model, batch_size, epochs, device,
train_set_name, valid_set_name,
train_set_root, valid_set_root, exp_name,
mode='train'):
seed = 12
torch.cuda.empty_cache()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.benchmark = True
model.to(device)
model.train()
loss_func = FocalLossForSigmoid(reduction='mean').to(device)
optimizer = get_optimizer(optimizer_name, filter(lambda p: p.requires_grad, model.parameters()), learning_rate, l2_weight_decay)
train_set, num_return = get_datasets(train_set_name, train_set_root, True)
valid_set, _ = get_datasets(valid_set_name, valid_set_root, False)
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True, num_workers=3)
valid_loader = DataLoader(dataset=valid_set, batch_size=1, shuffle=False, num_workers=1)
best_f1_score = 0
flag = 0
count = 0
valid_epoch = 80
metrics_name = ['flops', 'param', 'accuracy', 'recall', 'specificity', 'precision', 'f1_score', 'auroc', 'iou']
metrics = {}
for metric_name in metrics_name:
if metric_name == 'flops' or metric_name == 'param':
metrics.update({metric_name: 100})
else:
metrics.update({metric_name: 0})
try:
for i in range(epochs):
train_tqdm_batch = tqdm(iterable=train_loader, total=numpy.ceil(len(train_set) / batch_size))
for images, targets in train_tqdm_batch:
images, targets = images.to(device), targets.to(device)
optimizer.zero_grad()
preds = model(images)
loss = loss_func(preds, targets)
loss.backward()
clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
train_tqdm_batch.close()
print('gens_{} individual_{}_epoch_{} train end'.format(gen_num, ind_num, i))
epoch_acc = AverageMeter()
epoch_recall = AverageMeter()
epoch_precision = AverageMeter()
epoch_specificity = AverageMeter()
epoch_f1_score = AverageMeter()
epoch_iou = AverageMeter()
epoch_auroc = AverageMeter()
if (i >= valid_epoch):
with torch.no_grad():
model.eval()
valid_tqdm_batch = tqdm(iterable=valid_loader, total=numpy.ceil(len(valid_set) / 1))
for images, targets in valid_tqdm_batch:
images = images.to(device)
targets = targets.to(device)
preds = model(images)
(acc, recall, specificity, precision,
f1_score, iou, auroc) = calculate_metrics(preds=preds, targets=targets, device=device)
epoch_acc.update(acc)
epoch_recall.update(recall)
epoch_precision.update(precision)
epoch_specificity.update(specificity)
epoch_f1_score.update(f1_score)
epoch_iou.update(iou)
epoch_auroc.update(auroc)
if i == valid_epoch:
flops, param = profile(model=model, inputs=(images,), verbose=False)
flops = flops / 1e11
param = param / 1e6
print('gens_{} individual_{}_epoch_{} validate end'.format(gen_num, ind_num, i))
print('acc:{} | recall:{} | spe:{} | pre:{} | f1_score:{} | auroc:{}'
.format(epoch_acc.val,
epoch_recall.val,
epoch_specificity.val,
epoch_precision.val,
epoch_f1_score.val,
epoch_auroc.val))
if epoch_f1_score.val > best_f1_score:
best_f1_score = epoch_f1_score.val
flag = i
count = 0
for key in list(metrics):
if key == 'flops':
metrics[key] = flops
elif key == 'param':
metrics[key] = param
elif key == 'accuracy':
metrics[key] = epoch_acc.val
elif key == 'recall':
metrics[key] = epoch_recall.val
elif key == 'specificity':
metrics[key] = epoch_specificity.val
elif key == 'precision':
metrics[key] = epoch_precision.val
elif key == 'f1_score':
metrics[key] = epoch_f1_score.val
elif key == 'auroc':
metrics[key] = epoch_auroc.val
elif key == 'iou':
metrics[key] = epoch_iou.val
else:
raise NotImplementedError
import pandas as pd
from os.path import join
performance_df = pd.DataFrame(
data=[[gen_num, ind_num, epoch_acc.val, epoch_recall.val, epoch_specificity.val,
epoch_precision.val,
epoch_f1_score.val, epoch_iou.val, epoch_auroc.val]],
columns=['epoch', 'individual', 'acc', 'recall',
'specificity', 'precision', 'f1_score', 'iou',
'auroc', ]
)
performance_csv_path = join(os.path.abspath('.'), 'exps/{}/csv'.format(exp_name),
'gens_{} individual_{} performance.csv'.format(gen_num, ind_num))
performance_df.to_csv(performance_csv_path)
else:
if i >= valid_epoch:
count += 1
end = None
if i > valid_epoch + 15 and best_f1_score < 0.50:
end = True
if (count >= 70) or end:
print('current best epoch_{} best_f1_score:'.format(flag), best_f1_score)
print('gens_{} individual_{} train early stop'.format(gen_num, ind_num))
print('=======================================================================')
valid_tqdm_batch.close()
return metrics, True
print('current best epoch_{} best_f1_score:'.format(flag), best_f1_score)
valid_tqdm_batch.close()
print('current best epoch_{} best_f1_score:'.format(flag), best_f1_score)
print('=======================================================================')
except RuntimeError as exception:
images.detach_()
del images
del model
del targets
return metrics, False
return metrics, True
```
#### File: code/util/create_dir.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
def create_dirs(dirs):
"""
Utility function for creating directories
Args:
dirs (list of string): A list of directories to create if these
directories are not found.
"""
logger = logging.getLogger('Create Directories')
for dir_ in dirs:
try:
os.makedirs(dir_)
except FileExistsError:
logger.warning('Directories already exist: %s', dir_)
```
#### File: code/util/get_analytic_plot.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import sys
sys.path.append('../')
from ..metrics.binary_confusion_matrix import get_binary_confusion_matrix
def get_analytic_plot(input_, target, device, pixel = None, threshold=0.5):
"""
Get analytic plot
Arguments:
preds (torch tensor): raw probability outrue_positiveuts
targets (torch tensor): ground truth
threshold: (float): threshold value, default: 0.5
Returns:
plots (torch tensor): analytic plots
"""
(true_positive, false_positive,
_, false_negative) = get_binary_confusion_matrix(
input_=input_, target=target, device = device, pixel = pixel,
threshold=threshold, reduction='none')
plots = torch.cat([false_positive, true_positive, false_negative], dim=1)
return plots
``` |
{
"source": "96junghwan/ARSocketServer",
"score": 3
} |
#### File: ARSocketServer/scripts/packet_struct.py
```python
import socket
import numpy as np
import datetime
import struct
from typing import NamedTuple
from packet_constants import *
class PacketHeader(NamedTuple):
msgType: np.uint16
packetStructSize: np.uint16
packetDataSize: np.uint16
# static variable
form_str = '<3H10x'
def to_bytes(self):
return struct.pack(self.form_str, self.msgType, self.packetStructSize, self.packetDataSize)
@classmethod
def from_bytes(cls, bytes):
return cls._make(struct.unpack(cls.form_str, bytes))
class WarningPacketStruct(NamedTuple):
warningType: np.uint16
# static variable
form_str = '<H'
def to_bytes(self):
return struct.pack(self.form_str, self.warningType)
@classmethod
def from_bytes(cls, bytes):
return cls._make(struct.unpack(cls.form_str, bytes))
class ErrorPacketStruct(NamedTuple):
errorType: np.uint16
# static variable
form_str = '<H'
def to_bytes(self):
return struct.pack(self.form_str, self.errorType)
@classmethod
def from_bytes(cls, bytes):
return cls._make(struct.unpack(cls.form_str, bytes))
class NotifyPacketStruct(NamedTuple):
notifyType: np.uint16
# static variable
form_str = '<H'
def to_bytes(self):
return struct.pack(self.form_str, self.notifyType)
@classmethod
def from_bytes(cls, bytes):
return cls._make(struct.unpack(cls.form_str, bytes))
class RequestAccessPacketStruct(NamedTuple):
accessCode: str
# static variable
form_str = '<11s'
def to_bytes(self):
return struct.pack(self.form_str, self.accessCode)
@classmethod
def from_bytes(cls, bytes):
return cls._make(struct.unpack(cls.form_str, bytes))
class RequestServerStatusPacketStruct(NamedTuple):
temp: int
class RequestNNCalPacketStruct(NamedTuple):
frameID: np.int32
imageWholeSize: np.uint32
dataSize: np.uint16
offset: np.uint32
order: np.int32
nnType: np.int32
# static variable
form_str = '<iIHIii'
def to_bytes(self):
return struct.pack(self.form_str, self.frameID, self.imageWholeSize, self.dataSize, self.offset, self.order, self.nnType)
@classmethod
def from_bytes(cls, bytes):
return cls._make(struct.unpack(cls.form_str, bytes))
class ResponseAccessPacketStruct(NamedTuple):
accessResult: np.uint16
# static variable
form_str = '<H'
def to_bytes(self):
return struct.pack(self.form_str, self.accessResult)
@classmethod
def from_bytes(cls, bytes):
return cls._make(struct.unpack(cls.form_str, bytes))
class ResponseServerStatusPacketStruct(NamedTuple):
ccu: np.uint32
serverBufferStatus: np.uint16
# static variable
form_str = '<IH'
def to_bytes(self):
return struct.pack(self.form_str, self.ccu, self.serverBufferStatus)
@classmethod
def from_bytes(cls, bytes):
return cls._make(struct.unpack(cls.form_str, bytes))
class ResponseSegmentationPacketStruct(NamedTuple):
frameID: np.int32
maskWholeSize: np.uint32
dataSize: np.uint16
result: np.uint16
nnType: np.int32
offset: np.uint32
order: np.int32
width: np.uint16
height: np.uint16
# static variable
form_str = '<iIHHiIiHH'
def to_bytes(self):
return struct.pack(self.form_str, self.frameID, self.maskWholeSize, self.dataSize, self.result,
self.nnType, self.offset, self.order, self.width, self.height)
@classmethod
def from_bytes(cls, bytes):
return cls._make(struct.unpack(cls.form_str, bytes))
class Response2DPosePacketStruct(NamedTuple):
frameID: np.int32
jointWholeSize: np.uint32
dataSize: np.uint16
result: np.uint16
nnType: np.int32
offset: np.uint32
order: np.int32
jointNumbers: np.uint16
# static variable
form_str = '<iIHHiIiH'
def to_bytes(self):
return struct.pack(self.form_str, self.frameID, self.jointWholeSize, self.dataSize,
self.result, self.nnType, self.offset, self.order, self.jointNumbers)
@classmethod
def from_bytes(cls, bytes):
return cls._make(struct.unpack(cls.form_str, bytes))
# 추가 필요 : C# 측과 협의
class Response3DPosePacketStruct(NamedTuple):
frameID: np.int32
jointWholeSize: np.uint32
dataSize: np.uint16
result: np.uint16
nnType: np.int32
offset: np.uint32
order: np.int32
people: np.uint16
# static variable
form_str = '<iIHHiIiH'
def to_bytes(self):
return struct.pack(self.form_str, self.frameID, self.jointWholeSize, self.dataSize,
self.result, self.nnType, self.offset, self.order, self.people)
@classmethod
def from_bytes(cls, bytes):
return cls._make(struct.unpack(cls.form_str, bytes))
packetStructDict = {
MsgType.WARNING: WarningPacketStruct,
MsgType.ERROR: ErrorPacketStruct,
MsgType.NOTIFY: NotifyPacketStruct,
MsgType.REQUEST_ACCESS: RequestAccessPacketStruct,
MsgType.REQUEST_SERVER_STATUS: RequestServerStatusPacketStruct,
MsgType.REQUEST_NNCAL: RequestNNCalPacketStruct,
MsgType.RESPONSE_ACCESS: ResponseAccessPacketStruct,
MsgType.RESPONSE_SERVER_STATUS: ResponseServerStatusPacketStruct,
MsgType.RESPONSE_SEGMENTATION: ResponseSegmentationPacketStruct,
MsgType.RESPONSE_2DPOSE: Response2DPosePacketStruct,
MsgType.RESPONSE_3DPOSE: Response3DPosePacketStruct
}
```
#### File: ARSocketServer/scripts/ServerConfig.py
```python
class ServerCfg:
# os.path.dirname(__file__) + '\\..\\..\\..\\AlphaPose\\' 이런 방식으로 현재 스크립트 기준으로 파일 찾기
def __init__(self):
# 서버 신경망 사용 여부 세팅
self.UseFastPose = True
self.UseAlphaPose = False
self.UseYolact = False
self.UseBMC = False
# 각 신경망 별 프로세스 할당 개수 세팅
self.FastPoseProcessNum = 1
self.AlphaPoseProcessNum = 1
self.YolactProcessNum = 1
self.BMCProcessNum = 1
# 큐 사이즈 세팅
self.Q_MAX_SIZE = 10
self.FPS = 10 # 추후에는 직접 계산해서 사용
# 서버 IP, Port 주소 세팅 : 포트포워딩 시 내부 IP 및 포워딩한 포트로 설정
self.Server_IP = '127.0.0.1'
self.Server_Port = 9999
# 서버 최대 접속 인원
self.CLIENT_MAX = 100
# 서버 접속 코드
self.developer_code = "99.99.99"
self.standard_code = "01.05.01"
```
#### File: ARSocketServer/scripts/Socket_Buffer.py
```python
from multiprocessing import Queue
import ServerConfig
import cv2
class buffer:
def __init__(self, config):
# 버퍼 프로세스 개수만큼 생성하는 내부 함수
def create_buffers(proc_num, Q_MAX_SIZE):
buffers = []
for i in range(proc_num):
buffer = Queue(maxsize=Q_MAX_SIZE)
init_buffer(buffer)
buffers.append(buffer)
return buffers
# 버퍼에 이미지 한 장 넣어놔서 Init하는 함수
def init_buffer(buffer):
init_img = cv2.imread('init.jpg')
buffer.put({
'client_socket': 0,
'frame_id': 0,
'frame': init_img,
'option': 0})
self.FastBuffers = create_buffers(config.FastPoseProcessNum, config.Q_MAX_SIZE)
self.AlphaBuffers = create_buffers(config.AlphaPoseProcessNum, config.Q_MAX_SIZE)
self.YOLACTBuffers = create_buffers(config.YOLACTProcessNum, config.Q_MAX_SIZE)
# 버퍼 닫는 함수
def close(self, config):
def closeEachBuffers(buffers, proc_num, Q_MAX_SIZE):
for i in range(proc_num):
buffers[i].close()
buffers[i].joint_thread()
closeEachBuffers(self.FastBuffers, config.FastPoseProcessNum, config.Q_MAX_SIZE)
closeEachBuffers(self.AlphaBuffers, config.FastPoseProcessNum, config.Q_MAX_SIZE)
closeEachBuffers(self.YOLACTBuffers, config.FastPoseProcessNum, config.Q_MAX_SIZE)
```
#### File: ARSocketServer/scripts/SocketQ.py
```python
from multiprocessing import Queue
import cv2
import os
class SocketQ:
def __init__(self, config):
self.QList = []
# 큐에 이미지 미리 넣어놓는 함수
def InitQ(Q, count):
init_img = cv2.imread(os.path.dirname(__file__) + '\\..\\data\\init.jpg')
# 프로세스 개수만큼 초기화용 이미 넣어둠. 정확하지는 않음
for i in range(count):
Q.put({
'client_socket': 0,
'frame_id': 0,
'frame': init_img,
'option': 0})
# 소켓 I/O 큐 생성
self.FastPoseInputQ = Queue(maxsize=config.Q_MAX_SIZE)
self.FastPoseOutputQ = Queue(maxsize=config.Q_MAX_SIZE)
self.AlphaPoseInputQ = Queue(maxsize=config.Q_MAX_SIZE)
self.AlphaPoseOutputQ = Queue(maxsize=config.Q_MAX_SIZE)
self.YolactInputQ = Queue(maxsize=config.Q_MAX_SIZE)
self.YolactOutputQ = Queue(maxsize=config.Q_MAX_SIZE)
self.BMCInputQ = Queue(maxsize=config.Q_MAX_SIZE)
self.BMCOutputQ = Queue(maxsize=config.Q_MAX_SIZE)
# 각 프로세스 개수만큼 초기화 이미지 입력
InitQ(self.FastPoseInputQ, config.FastPoseProcessNum)
InitQ(self.AlphaPoseInputQ, config.AlphaPoseProcessNum)
InitQ(self.YolactInputQ, config.YolactProcessNum)
InitQ(self.BMCInputQ, config.BMCProcessNum)
# 임시 관리를 위해 리스트에 큐 전체 추가
self.QList.append(self.FastPoseInputQ)
self.QList.append(self.FastPoseOutputQ)
self.QList.append(self.AlphaPoseInputQ)
self.QList.append(self.AlphaPoseOutputQ)
self.QList.append(self.YolactInputQ)
self.QList.append(self.YolactOutputQ)
self.QList.append(self.BMCInputQ)
self.QList.append(self.BMCOutputQ)
# 소켓 큐 전체 닫는 함수
def close(self, config):
# 전체 큐 리스트 해제
for i in range(len(self.QList)):
self.QList[i].close()
self.QList[i].join_thread()
``` |
{
"source": "96kevrud/PiFermentor",
"score": 3
} |
#### File: 96kevrud/PiFermentor/spreadsheet_connector.py
```python
import gspread
#from oauth2client.service_account import ServiceAccountCredentials
import string
import datetime
from time import sleep
_sheetid = '1EestCfXucUt5knqdGMlnKy7LsTB4Oj8BkkzHQw_DCbc'
_cred_file = '<PASSWORD>.json'
_worksheet = "pifermentor"
def connect_sheet(sheetid, cred_file):
sa = gspread.service_account(filename=cred_file)
return sa.open_by_key(sheetid)
# Create new nonnection everytime for now
# TODO: Only connect when connection is lost
def connect_worksheet(sheetid, cred_file):
sheet = connect_sheet(sheetid, cred_file)
return sheet, sheet.worksheet(_worksheet)
def get_empty_row_idx_whole(worksheet):
col_vals = worksheet.col_values(1)
return len(col_vals)+1
def get_empty_row_idx_24h(worksheet):
col_vals = worksheet.col_values(7)
return len(col_vals)+1
def get_sheet_formated_datetime(dt):
return dt.strftime("%Y-%m-%d %H.%M.%S")
def get_range_str_whole(row_idx, row_len):
from_ = 'A'+str(row_idx)
to_ = 'E'+str(row_idx)
return from_+':'+to_
def get_range_str_24h(row_idx, row_len):
from_ = 'G'+str(row_idx)
to_ = 'K'+str(row_idx)
return from_+':'+to_
# Filter out so only 24h remains
# Assume that alldata is sorted on time
# [[Time, temp, target, delta, on/off],
# [...]]
def filter_24h(alldata):
if len(alldata) == 0 or len(alldata) == 1:
return alldata
top_time = datetime.datetime.strptime(alldata[0][0], '%Y-%m-%d %H.%M.%S')
latest_time = datetime.datetime.strptime(alldata[-1][0], '%Y-%m-%d %H.%M.%S')
day_delta = datetime.timedelta(hours=24)
#If top time plus 24h is smaller then remove top time
cmp_time = top_time + day_delta
if cmp_time < latest_time:
del alldata[0]
return filter_24h(alldata)
return alldata
def append_row_24h(data_row, sheet=None, worksheet=None):
if sheet == None and worksheet == None:
sheet, worksheet = connect_worksheet(_sheetid, _cred_file)
data_row = data_row[0]
data_row[0] = get_sheet_formated_datetime(data_row[0])
row_idx = get_empty_row_idx_24h(worksheet)
range_ = 'G2:K'+str(row_idx-1 if row_idx != 2 else 2)
alldata = worksheet.get(range_)
alldata.append(data_row)
alldata = filter_24h(alldata)
sheet.values_clear(_worksheet+'!'+range_)
range_ = 'G2:K'+str(len(alldata)+1)
worksheet.update(range_, alldata, value_input_option='USER_ENTERED')
return sheet, worksheet
# Data row contains latest data to log
# Time, temp, target, delta, on/off
def append_row_all(data_row, sheet=None, worksheet=None):
if sheet == None and worksheet == None:
sheet, worksheet = connect_worksheet(_sheetid, _cred_file)
data_row[0][0] = get_sheet_formated_datetime(data_row[0][0])
row_idx = get_empty_row_idx_whole(worksheet)
range_str = get_range_str_whole(row_idx, len(data_row[0]))
worksheet.update(range_str, data_row, value_input_option='USER_ENTERED')
return sheet, worksheet
def clear_all():
sheet, worksheet = connect_worksheet(_sheetid, _cred_file)
sheet.values_clear(_worksheet+'!A2:E2000')
sheet.values_clear(_worksheet+'!G2:K1000')
def change_beer_name(name):
sheet, worksheet = connect_worksheet(_sheetid, _cred_file)
worksheet.update("M12", name)
def get_beer_name():
sheet, worksheet = connect_worksheet(_sheetid, _cred_file)
return worksheet.acell("M12").value
#worksheet = connect_worksheet(worksheet_name, _sheetid, _cred_file)
#sheet, worksheet = connect_worksheet(_sheetid, _cred_file)
#_append_row_24h(sheet, worksheet, ["2021-06-30 23.56.11", 23, 23, 23, "ON"])
#count = 0
#while True:
# count += 1
# temp = 20+(count%6)
# row = [[datetime.datetime.now(),temp, 21.5, abs(21.5-temp), 'ON']]
# append_row(row)
# sleep(1)
```
#### File: 96kevrud/PiFermentor/spreadsheet_logger.py
```python
import threading
import time
from datetime import datetime
import spreadsheet_connector
class SpreadsheetLogger:
def __init__(self, sensor, controller, fridge):
self.thread = threading.Thread(target=self._log)
self.sensor = sensor
self.controller = controller
self.fridge = fridge
self.sleep_time_24h = 60*5
self.sleep_time_all = 60*60*24
self.thread.start()
def _log(self):
counter_24h = 0
counter_all = 0
while(True):
temp = self.sensor.temp()
target = self.controller.target_temp
delta = self.controller.temp_delta
dt_now = datetime.now()
on_off = self.fridge.on_off_str()
sheet_row_1 = [[dt_now, temp, target, delta, on_off]]
sheet_row_2 = [[dt_now, temp, target, delta, on_off]]
if counter_all >= self.sleep_time_all:
s, ws = spreadsheet_connector.append_row_all(sheet_row_1)
spreadsheet_connector.append_row_24h(sheet_row_2, s, ws)
counter_24h = 0
counter_all = 0
elif counter_24h >= self.sleep_time_24h:
spreadsheet_connector.append_row_24h(sheet_row_1)
counter_24h = 0
counter_24h += 3
counter_all += 3
time.sleep(3)
```
#### File: 96kevrud/PiFermentor/webserver.py
```python
import io
import urllib
from http.server import BaseHTTPRequestHandler
import spreadsheet_connector
class WebServer(BaseHTTPRequestHandler):
sensor = None
fridge = None
controller = None
temp = 0
def get_response_body(self):
print(self.sensor.temp())
with io.open("webpage.html","r", encoding="utf-8") as f:
text = f.read()
text = text.replace("%TAR_TEMP%", "%.2f" % self.controller.target_temp)
text = text.replace("%CUR_TEMP%", "%.2f" % self.sensor.temp())
text = text.replace("%ON_OFF_FRIDGE%", self.fridge.on_off_str())
text = text.replace("%NAME_TEMP%", self.controller.name)
return bytes(text, "utf-8")
def do_POST(self):
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
if("temp" in post_data):
try:
t = int(post_data["temp"][0])
self.controller.set_target_temp(t)
except:
print("Invalid input")
elif("clear" in post_data):
spreadsheet_connector.clear_all()
elif("name" in post_data):
try:
name = str(post_data["name"][0])
self.controller.name = name
spreadsheet_connector.change_beer_name(name)
except:
print("Invalid input")
self.do_GET()
def do_GET(self):
self.send_response(200)
if self.path.endswith(".css"):
self.send_header("Content-type", "text/css")
self.end_headers()
f = open("mystyle.css")
body = bytes(f.read(), "utf-8")
f.close()
else:
self.send_header("Content-type", "text/html")
self.end_headers()
body = self.get_response_body()
self.wfile.write(body)
``` |
{
"source": "96liuzhixin/DFRobot_bmm150",
"score": 2
} |
#### File: examples/data_ready_interrupt/data_readly_interrupt.py
```python
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from DFRobot_bmm150 import *
bmm150 = DFRobot_bmm150_I2C (0x01 ,0x13) # bus default use I2C1 , iic address is 0x13
#bmm150 = DFRobot_bmm150_SPI (27) # default use spi0 ,cs pin is 27
def setup():
while ERROR == bmm150.sensor_init():
print "sensor init error ,please check connect"
'''
POWERMODE_NORMAL
POWERMODE_FORCED
POWERMODE_SLEEP
POWERMODE_SUSPEND
'''
bmm150.set_operation_mode(POWERMODE_NORMAL)
'''
PRESETMODE_LOWPOWER
PRESETMODE_REGULAR
PRESETMODE_HIGHACCURACY
PRESETMODE_ENHANCED
'''
bmm150.set_preset_mode(PRESETMODE_LOWPOWER)
'''
Enable or disable the pin :
ENABLE_DRDY
DISABLE_DRDY (default mode)
polarity Active level
POKARITY_HIGH (default active high level )
POKARITY_LOW
'''
bmm150.set_data_readly_interrupt_pin(ENABLE_DRDY ,POKARITY_HIGH)
def loop():
if bmm150.get_data_readly_state() == 1:
rslt = bmm150.get_geomagnetic()
print "mag x = %d ut"%rslt[0]
print "mag y = %d ut"%rslt[1]
print "mag z = %d ut"%rslt[2]
print ""
else:
time.sleep(1)
time.sleep(1)
if __name__ == "__main__":
setup()
while True:
loop()
``` |
{
"source": "96liuzhixin/DFRobot_MGC3130",
"score": 2
} |
#### File: python/raspberrypi/DFRobot_MGC3130.py
```python
import sys
import smbus
import logging
import numpy as np
from ctypes import *
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
logger = logging.getLogger()
logger.setLevel(logging.INFO) #显示所有的打印信息
#logger.setLevel(logging.FATAL)#如果不想显示过多打印,只打印错误,请使用这个选项
ph = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s - [%(filename)s %(funcName)s]:%(lineno)d - %(levelname)s: %(message)s")
ph.setFormatter(formatter)
logger.addHandler(ph)
DFRobot_MGC3130_IIC_ADDR = 0x42
class DFRobot_MGC3130(object):
def __init__(self,d_pin,mclr_pin,bus = 1):
self.now_touch = 0
self.last_touch = 0
self.gesture_info = 0
self.touch_info = 0
self.air_wheel_info = 0
self.x_position = 0
self.y_position = 0
self.z_position = 0
self.NOERROR = 0x0000
self.UNKONWN_COMMAND = 0x0001
self.WRONG_PARAMETER_VALUE = 0x0014
self.UNKNOWN_PARAMETER_ID = 0x0015
self.WAKEUP_HAPPEND = 0x001A
self.NO_GESTURE = 0
self.GARBAGE_MODEL = 1
self.FILCK_R = 2
self.FILCK_L = 3
self.FILCK_U = 4
self.FILCK_D = 5
self.CIRCLE_CLOCKWISE = 6
self.CIRCLE_COUNTERCLOCKWISE = 7
self.TOUCH_DOWN = 1
self.TOUCH_LEFT = 2
self.TOUCH_UP = 4
self.TOUCH_RIGHT = 8
self.TPUCH_CENTER = 16
self.TAP_DOWN = 32
self.TAP_LEFT = 64
self.TAP_UP = 128
self.TAP_RIGHT = 256
self.TAP_CENTER = 512
self.DOUBLE_TAP_DOWN = 1024
self.DOUBLE_TAP_LEFT = 2048
self.DOUBLE_TAP_UP = 4096
self.DOUBLE_TAP_RIGHT = 8192
self.DOUBLE_TAP_CENTER = 16384
self._ts_pin = d_pin
self._reset_pin = mclr_pin
self.position = False
self.last_time_stamp = 0
self.now_time_stamp = 0
self.i2cbus=smbus.SMBus(bus)
self.i2c_addr = DFRobot_MGC3130_IIC_ADDR
'''
@brief 初始化函数
@return 返回True表示初始化成功,返回False表示初始化失败
'''
def begin(self):
ret = False
self._ts_input()
self.reset()
while(self.disable_touch_detection()!=0):
ret = True
while(self.disable_approach_detection()!=0):
ret = True
while(self.disable_air_wheel()!=0):
ret = True
while(self.disable_gestures()!=0):
ret = True
while(self._enable_data_output()!=0):
ret = True
while(self._lock_data_output()!=0):
ret = True
return ret
'''
@brief 复位传感器
'''
def reset(self):
GPIO.setup(self._reset_pin, GPIO.OUT)
GPIO.output(self._reset_pin, GPIO.LOW)
time.sleep(0.25)
GPIO.output(self._reset_pin, GPIO.HIGH)
time.sleep(2)
'''
@brief 开启接触检测功能
@return 返回-1代表设置失败,0代表设置成功
'''
def enable_touch_detection(self):
ret = -1
buf=[0x00,0x00,0xA2,0x97,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x08,0x00,0x00,0x00]
recv_buf=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self._set_runtime_parameter(buf,16)
recv_buf = self._read(16)
if(recv_buf != 0):
if(recv_buf[4] == 0xA2):
ret = recv_buf[7]>>8 | recv_buf[6]
return ret
'''
@brief 关闭接触检测功能
@return 返回-1代表设置失败,0代表设置成功
'''
def disable_touch_detection(self):
ret = -1
buf=[0x00,0x00,0xA2,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00]
recv_buf=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self._set_runtime_parameter(buf,16)
recv_buf = self._read(16)
if(recv_buf != 0):
if(recv_buf[4] == 0xA2):
ret = recv_buf[7]>>8 | recv_buf[6]
return ret
'''
@brief 使能接近检测功能
@return 返回-1代表设置失败,0代表设置成功
'''
def enable_approach_detection(self):
ret = -1
buf=[0x00,0x00,0xA2,0x97,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
recv_buf=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self._set_runtime_parameter(buf,16)
recv_buf = self._read(16)
if(recv_buf != 0):
if(recv_buf[4] == 0xA2):
ret = recv_buf[7]>>8 | recv_buf[6]
return ret
'''
@brief 关闭接近检测功能
@return 返回-1代表设置失败,0代表设置成功
'''
def disable_approach_detection(self):
ret = -1
buf=[0x00,0x00,0xA2,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
recv_buf=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self._set_runtime_parameter(buf,16)
recv_buf = self._read(16)
if(recv_buf != 0):
if(recv_buf[4] == 0xA2):
ret = recv_buf[7]>>8 | recv_buf[6]
return ret
'''
@brief 使能AirWheel功能
@return 返回-1代表设置失败,0代表设置成功
'''
def enable_air_wheel(self):
ret = -1
buf=[0x00,0x00,0xA2,0x90,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x20,0x00,0x00,0x00]
recv_buf=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self._set_runtime_parameter(buf,16)
recv_buf = self._read(16)
if(recv_buf != 0):
if(recv_buf[4] == 0xA2):
ret = recv_buf[7]>>8 | recv_buf[6]
return ret
'''
@brief 关闭手势识别功能
@return 返回-1代表设置失败,0代表设置成功
'''
def disable_air_wheel(self):
ret = -1
buf=[0x00,0x00,0xA2,0x90,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x00,0x00,0x00]
recv_buf=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self._set_runtime_parameter(buf,16)
recv_buf = self._read(16)
if(recv_buf != 0):
if(recv_buf[4] == 0xA2):
ret = recv_buf[7]>>8 | recv_buf[6]
return ret
'''
@brief 使能手势识别功能
@return 返回-1代表设置失败,0代表设置成功
'''
def enable_gestures(self):
ret = -1
buf=[0x00,0x00,0xA2,0x85,0x00, 0x00,0x00, 0x7F,0x00,0x00,0x00, 0x7F,0x00,0x00,0x00]
recv_buf=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self._set_runtime_parameter(buf,16)
recv_buf = self._read(16)
if(recv_buf != 0):
if(recv_buf[4] == 0xA2):
ret = recv_buf[7]>>8 | recv_buf[6]
return ret
'''
@brief 关闭手势识别功能
@return 返回-1代表设置失败,0代表设置成功
'''
def disable_gestures(self):
ret = -1
buf=[0x00,0x00,0xA2,0x85,0x00, 0x00,0x00, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00]
recv_buf=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self._set_runtime_parameter(buf,16)
recv_buf = self._read(16)
if(recv_buf != 0):
if(recv_buf[4] == 0xA2):
ret = recv_buf[7]>>8 | recv_buf[6]
return ret
'''
@brief 获取X轴位置
@return X轴位置
'''
def get_x_position(self):
return self.x_position
'''
@brief 获取Y轴位置
@return Y轴位置
'''
def get_y_position(self):
return self.y_position
'''
@brief 获取Z轴位置
@return Z轴位置
'''
def get_z_position(self):
return self.z_position
'''
@brief 获取接触信息
@return 接触信息:
DOUBLE_TAP_CENTER/DOUBLE_TAP_RIGHT/DOUBLE_TAP_UP/DOUBLE_TAP_LEFT/DOUBLE_TAP_DOWN
TAP_CENTER/TAP_RIGHT/TAP_UP/TAP_LEFT/TAP_DOWN
TPUCH_CENTER/TOUCH_RIGHT/TOUCH_UP/TOUCH_LEFT/TOUCH_DOWN
'''
def get_touch_info(self):
data = self.touch_info & 0xFFFF
if(self.touch_info & 0x3E0):
if((self.now_touch == self.last_touch) and (self.now_time_stamp == self.last_time_stamp)):
data = self.touch_info & 0xFC1F
if(self.touch_info & 0x7C00):
if((self.now_touch == self.last_touch) and (self.now_time_stamp == self.last_time_stamp)):
data = self.touch_info & 0x83FF
self.last_touch = self.now_touch
self.last_time_stamp = self.now_time_stamp
return data
'''
@brief 获取手势信息
@return 手势信息
'''
def get_gesture_info(self):
return self.gesture_info & 0xFF
'''
@brief 监测是否有位置信息
@return 返回true,代表有位置信息,false代表没有
'''
def have_position_info(self):
return self.position
'''
@brief 获取传感器数据
'''
def sensor_data_recv(self):
self.position = False
self.x_position = 0
self.y_position = 0
self.z_position = 0
self.air_wheel_info = 0
self.gesture_info = 0
self.touch_info = 0
buf=self._read(24)
if(buf!=0):
if((buf[3] == 0x91) and (buf[4] == 0x1E)):
self.gesture_info = buf[8] | buf[9]<<8 | buf[10]<<16 | buf[11]<<24
self.touch_info = buf[12] | buf[13]<<8 | buf[14]<<16 | buf[15]<<24
self.now_time_stamp = buf[14] | buf[15]<<8
self.now_touch = buf[12] | buf[13]<<8
if(buf[7] & 0x02):
self.air_wheel_info = buf[16] | buf[17]<<8
if(buf[7] & 0x01):
self.position = True
self.x_position = buf[18] | buf[19]<<8
self.y_position = buf[20] | buf[21]<<8
self.z_position = buf[22] | buf[23]<<8
elif(buf[4] == 0x1F):
while(self._enable_data_output()!=0):
ret = True
while(self._lock_data_output()!=0):
ret = True
'''
@brief 设置传感器的输出数据格式
@return 返回-1代表设置失败,0代表设置成功
'''
def _enable_data_output(self):
ret = -1
buf=[0x00,0x00,0xA2,0xA0,0x00, 0x00,0x00, 0x1E,0x00,0x00,0x00, 0xFF,0xFF,0xFF,0xFF]
recv_buf=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self._set_runtime_parameter(buf,16)
recv_buf = self._read(16)
if(recv_buf != 0):
if(recv_buf[4] == 0xA2):
ret = recv_buf[7]>>8 | recv_buf[6]
return ret
'''
@brief 锁定传感器的输出数据格式
@return 返回-1代表设置失败,0代表设置成功
'''
def _lock_data_output(self):
ret = -1
buf=[0x00,0x00,0xA2,0xA1,0x00, 0x00,0x00, 0x1E,0x00,0x00,0x00, 0xFF,0xFF,0xFF,0xFF]
recv_buf=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self._set_runtime_parameter(buf,16)
recv_buf = self._read(16)
if(recv_buf != 0):
if(recv_buf[4] == 0xA2):
ret = recv_buf[7]>>8 | recv_buf[6]
return ret
'''
@brief 设置主机的ts_pin为输入模式(TS:transfer status line)
'''
def _ts_input(self):
GPIO.setup(self._ts_pin, GPIO.IN)
'''
@brief 设置主机的ts_pin为输出模式(TS:transfer status line)
'''
def _ts_output(self):
GPIO.setup(self._ts_pin, GPIO.OUT)
'''
@brief 设置主机的ts_pin为输出状态(TS:transfer status line)
@param mode 输出状态,HIGH/LOW
'''
def _ts_write(self,mode):
if(mode):
GPIO.output(self._ts_pin, GPIO.HIGH)
else:
GPIO.output(self._ts_pin, GPIO.LOW)
'''
@brief 获取的transfer status line的状态
@param return HIGH/LOW
'''
def _ts_read(self):
return GPIO.input(self._ts_pin)
'''
@brief 写入传感器IIC数据
@param pBuf 要写入数据的存放缓存
@param size 要写入数据的长度
@return 返回实际写入的长度
'''
def _set_runtime_parameter(self,buf,len):
self.i2cbus.write_i2c_block_data(self.i2c_addr, 0x10, buf)
'''
@brief 获取传感器IIC数据
@param pBuf 要写入数据的存放缓存
@param size 要写入数据的长度
@return 返回实际读取的长度,返回0表示读取失败
'''
def _read(self,len):
#self._ts_input()
if(self._ts_read() != 0):
return 0
self._ts_output()
self._ts_write(0)
data = self.i2cbus.read_i2c_block_data(self.i2c_addr, 0x00, len)
self._ts_write(1)
self._ts_input()
time.sleep(0.05)
return data
``` |
{
"source": "96liuzhixin/DFRobot_URM13",
"score": 2
} |
#### File: raspberrypi/example/URM13_work_in_I2C.py
```python
from __future__ import print_function
import sys
sys.path.append('../')
from DFRobot_URM13 import *
'''
# UART(Modbus-RTU)与I2C/TRIG模式切换
# URM13传感器默认出厂设置为UART模式。 传感器可以在I2C、UART两种模式间通过上电前短接不同的引脚实现简单的模式切换:
# I2C/TRIG: 在传感器上电前,将TRIG与ECHO引脚短接,上电后LED闪烁2次,表示传感器已切换为I2C模式。
# UART(Modbus-RTU): 在传感器上电前,将TRIG与BUSY引脚短接,上电后LED闪烁1次,表示传感器已切换为UART(Modbus-RTU)模式。
# 在模式切换成功后,用户即可断开对应引脚的短接,切换后的模式将被传感器记录保存,永久生效。
# 实例化一个对象,来驱动我们的传感器。
'''
sensor = DFRobot_URM13_I2C(i2c_addr = 0x12, bus = 1)
def setup():
while (sensor.begin() == False):
print ('Please check that the device is properly connected')
time.sleep(3)
print("sensor begin successfully!!!\n")
'''
* 读取模块基本信息
* 返回读取的数据列表, IIC接口模式:
* 第一个元素为: 模块的通信地址
* 第二个元素为: 模块的PID
* 第三个元素为: 模块的VID, 固件版本号
'''
buf = sensor.read_basic_info()
if 3 == len(buf):
# 模块的I2C从机地址, 默认值0x12, 模块的设备地址(1~127)
print("baudrate: 0x%x" %buf[0])
# 模块的PID, 默认值0x02 该位用于产品校验[可实现传感器类型的检测]
print("PID: 0x0%x" %buf[1])
# 模块的VID, 固件版本号:0x10代表V1.0
print("VID: 0x%x" %buf[2])
'''
* 设置模块的通信地址, 断电保存, 重启后生效
* addr 要设置的设备地址, IIC地址范围(1~127即0x01~0x7F)
'''
sensor.set_addr(0x12)
'''
* 设置测量相关模式
* mode 需要设置的测量相关模式, 下列模式相加为mode:
* E_INTERNAL_TEMP: 使用板载温度补偿功能, E_EXTERNAL_TEMP: 使用外部温度补偿功能(需用户写入外部温度)
* E_TEMP_COMP_MODE_EN: 开启温度补偿功能, E_TEMP_COMP_MODE_DIS: 关闭温度补偿功能
* E_AUTO_MEASURE_MODE_EN: 自动测距, E_AUTO_MEASURE_MODE_DIS: 被动测距
* E_MEASURE_RANGE_MODE_LONG: 大量程测距(40 - 900cm), E_MEASURE_RANGE_MODE_SHORT: 小量程测距(15-150cm)
'''
sensor.set_measure_mode(sensor.E_INTERNAL_TEMP +
sensor.E_TEMP_COMP_MODE_EN +
sensor.E_AUTO_MEASURE_MODE_DIS +
sensor.E_MEASURE_RANGE_MODE_LONG)
'''
* 写入环境温度数据用于外部温度补偿
* temp 写入的环境温度数据, 单位℃, 分辨率0.1℃,有符号数
'''
sensor.set_external_tempreture_C(30.0)
'''
* 测距灵敏度设置, 0x00-0x0A:灵敏度等级0-10
* mode 用于设置传感器大量程段(40-900cm)的测距灵敏度, 该值越小, 灵敏度越高, 断电保存, 立即生效
'''
sensor.set_measure_sensitivity(0x00)
print()
time.sleep(1.5)
def loop():
'''
* 被动测量模式下的触发测量函数
* 在被动测量模式下, 调用一次此函数, 发送一次测距命令, 模块测量一次距离并将测量的距离值存入距离寄存器
'''
sensor.passive_measurement_TRIG()
'''
* 获取电源噪声等级, 0x00-0x0A对应噪声等级0-10
* 该参数能够反映供电电源以及环境对传感器的影响程度。噪声等级越小, 传感器得到的距离值将更精准。
'''
noise_level = sensor.get_noise_level()
print("Current ambient noise level: 0x0%x" %noise_level)
'''
* 读取当前板载温度
* 当前板载温度值, 单位℃, 分辨率0.1℃,有符号数
'''
internal_tempreture_C = sensor.get_internal_tempreture_C()
print("The onboard temperature: %d C" %internal_tempreture_C)
'''
* 读取当前距离测量值
* 注意:当物体所在的位置不在传感器测距范围内,会使读出的测量数据无意义
* 当前距离测量值, 单位cm, 大量程测距范围(40 - 900cm)小量程测距范围(15-150cm)
'''
distance_cm = sensor.get_distance_cm()
print("Current distance measurement: %d cm" %distance_cm)
print()
time.sleep(1)
if __name__ == "__main__":
setup()
while True:
loop()
```
#### File: raspberrypi/example/URM13_work_in_TRIG.py
```python
from __future__ import print_function
import sys
import RPi.GPIO as GPIO
sys.path.append('../')
from DFRobot_URM13 import *
'''
# UART(Modbus-RTU)与I2C/TRIG模式切换
# URM13传感器默认出厂设置为UART模式。 传感器可以在I2C、UART两种模式间通过上电前短接不同的引脚实现简单的模式切换:
# I2C/TRIG: 在传感器上电前, 将TRIG与ECHO引脚短接, 上电后LED闪烁2次, 表示传感器已切换为I2C模式。
# UART(Modbus-RTU): 在传感器上电前, 将TRIG与BUSY引脚短接, 上电后LED闪烁1次, 表示传感器已切换为UART(Modbus-RTU)模式。
# 在模式切换成功后, 用户即可断开对应引脚的短接, 切换后的模式将被传感器记录保存, 永久生效。
# 可以通过实例化一个IIC接口对象, 来配置传感器测量参数。
'''
# sensor = DFRobot_URM13_IIC(i2c_addr = 0x12, bus = 1)
global flag, echo_pin, echo_pin_high_start_ticks, echo_pin_high_end_ticks, speed_of_sound
flag = 0
echo_pin_high_start_ticks = 0
echo_pin_high_end_ticks = 0
speed_of_sound = 0
trig_pin = 20
echo_pin = 21
def int_callback(channel):
global flag, echo_pin, echo_pin_high_start_ticks, echo_pin_high_end_ticks # 全局变量声明
if 1 == GPIO.input(echo_pin) and 0 == flag:
echo_pin_high_start_ticks = time.time()
flag = 1
if 0 == GPIO.input(echo_pin) and 1 == flag:
echo_pin_high_end_ticks = time.time()
flag = 2
def delay_microsecond(microsecond): # 微秒级延时函数
start, end = 0, 0 # 声明变量
start = time.time() # 记录开始时间
microsecond = (microsecond - 3) / 1000000 # 将输入t的单位转换为秒, -3是时间补偿
while end-start < microsecond: # 循环至时间差值大于或等于设定值时
end = time.time() # 记录结束时间
def setup():
global echo_pin, trig_pin, speed_of_sound # 全局变量声明
GPIO.setwarnings(False) # 关闭引脚设置等警告
GPIO.setmode(GPIO.BCM) # 设置引脚编码模式为BCM
GPIO.setup(trig_pin, GPIO.OUT, initial=0) # 设置测量触发引脚为输出模式, 初始化输出低电平
GPIO.setup(echo_pin, GPIO.IN) # 设置测量数据引脚为输入模式
GPIO.add_event_detect(echo_pin, GPIO.BOTH, callback=int_callback) # Use GPIO port to monitor sensor interrupt
environment_temperature = 30.0 # 当前环境温度
speed_of_sound = (331.5 + 0.6 * environment_temperature ) * 100 # 由给定的当前环境温度计算出的音速
# while (sensor.begin() == False):
# print ('Please check that the device is properly connected')
# time.sleep(3)
# print("sensor begin successfully!!!\n")
'''
* 设置测量相关模式
* mode 需要设置的测量相关模式, 下列模式相加为mode:
* E_INTERNAL_TEMP: 使用板载温度补偿功能, E_EXTERNAL_TEMP: 使用外部温度补偿功能(需用户写入外部温度)
* E_TEMP_COMP_MODE_EN: 开启温度补偿功能, E_TEMP_COMP_MODE_DIS: 关闭温度补偿功能
* E_AUTO_MEASURE_MODE_EN: 自动测距, E_AUTO_MEASURE_MODE_DIS: 被动测距
* E_MEASURE_RANGE_MODE_LONG: 大量程测距(40 - 900cm), E_MEASURE_RANGE_MODE_SHORT: 小量程测距(15-150cm)
'''
# sensor.set_measure_mode(sensor.E_INTERNAL_TEMP +
# sensor.E_TEMP_COMP_MODE_EN +
# sensor.E_AUTO_MEASURE_MODE_DIS +
# sensor.E_MEASURE_RANGE_MODE_LONG)
'''
* 写入环境温度数据用于外部温度补偿
* temp 写入的环境温度数据, 单位℃, 分辨率0.1℃,有符号数
'''
# sensor.set_external_tempreture_C(30.0)
'''
* 测距灵敏度设置, 0x00-0x0A:灵敏度等级0-10
* mode 用于设置传感器大量程段(40-900cm)的测距灵敏度, 该值越小, 灵敏度越高, 断电保存, 立即生效
'''
# sensor.set_measure_sensitivity(0x00)
print()
time.sleep(1.5)
def loop():
global flag, echo_pin, trig_pin, speed_of_sound, echo_pin_high_start_ticks, echo_pin_high_end_ticks # 全局变量声明
GPIO.output(trig_pin, GPIO.HIGH) # Set the trig_pin High
delay_microsecond(50) # Delay of 50 microseconds
GPIO.output(trig_pin, GPIO.LOW) # Set the trig_pin Low
for i in range(1000):
if flag == 2:
break
if flag == 2:
# Measure echo high level time, the output high level time represents the ultrasonic flight time (unit: us)
measuring_time = echo_pin_high_end_ticks - echo_pin_high_start_ticks
flag = 0
# print(measuring_time)
# print(speed_of_sound)
'''
* 计算当前距离测量值
* 注意:当物体所在的位置不在传感器测距范围内, 会使读出的测量数据无意义
* 当前距离测量值, 单位cm, 大量程测距范围(40 - 900cm)小量程测距范围(15-150cm)
* The distance can be calculated according to the flight time of ultrasonic wave,
* and the ultrasonic sound speed can be compensated according to the actual ambient temperature
'''
measuring_distance = speed_of_sound * measuring_time / 2.0
print("Current distance measurement: %d cm" %measuring_distance)
print()
time.sleep(1)
if __name__ == "__main__":
setup()
while True:
loop()
``` |
{
"source": "96lives/matrixlstm",
"score": 3
} |
#### File: classification/layers/EventDropout.py
```python
import torch
from torch import nn
from libs import utils
class EventDropout(nn.Module):
def __init__(self, drop_prob):
super().__init__()
self.drop_prob = drop_prob
def forward(self, tensor, lengths=None):
if self.training:
batch_size, time_size, features_size = tensor.shape
keep_mask = torch.rand(batch_size, time_size, device=tensor.device) > self.drop_prob
if lengths is not None:
valid_mask = utils.padding_mask(lengths, batch_size, time_size)
keep_mask *= valid_mask
lengths = keep_mask.sum(dim=-1)
events = utils.select_padded(tensor, keep_mask)
return events, lengths
else:
return tensor, lengths
```
#### File: libs/readers/eventchunkdataset.py
```python
import os
import glob
import numpy as np
from tqdm import tqdm
from torch.utils.data.dataset import Dataset
class EventChunkDataset(Dataset):
def __init__(self, filereader, files_source, format=None, transforms=None, chunk_params=None):
super().__init__()
self.filereader = filereader
self.format = format
self.transforms = transforms
if isinstance(files_source, str) and os.path.isfile(files_source):
self.chunks = self.parse_chunk_file(files_source)
elif isinstance(files_source, str) and os.path.isdir(files_source):
files = glob.glob(os.path.join(files_source, "*/*" + self.filereader.ext))
self.chunks = self._chunk_files(files, chunk_params)
elif isinstance(files_source, list):
if os.path.isfile(files_source[0]):
self.chunks = self._chunk_files(files_source, chunk_params)
elif isinstance(files_source[0], tuple):
self.chunks = files_source
else:
raise ValueError("If a list is provided for the 'files_source' it must contain either a list "
"of files or a list of tuples")
else:
raise ValueError("The 'files_source' argument must be either a list or a path to a txt file!")
self.classes = sorted(np.unique([os.path.basename(os.path.dirname(f)) for f, _, _ in self.chunks]))
self.name_to_id = {cls: i for i, cls in enumerate(self.classes)}
@property
def num_classes(self):
return len(self.classes)
@staticmethod
def gen_chunk_indices(events_ts, delta_t):
start_ts = events_ts[0]
last_ts = events_ts[-1]
chunks = []
# start_t <= t < start_ts + delta_t
while start_ts < last_ts:
events_inside = np.logical_and(start_ts <= events_ts, events_ts < start_ts + delta_t)
start_idx = np.argmax(events_inside)
end_idx = events_inside.size - np.argmax(events_inside[::-1]) - 1
count = end_idx - start_idx + 1
# argmax returns 0 if the array only contains False values
# we check if there is at least one element inside the delta_t interval
if bool(events_inside[start_idx]) is True:
chunks.append((start_idx, count))
start_ts = start_ts + delta_t
return chunks
@staticmethod
def filter_chunks(chunks, ts, min_delta_t, min_n_events):
fchunks = []
prev_idx, _ = chunks[-1]
for start_idx, count in chunks:
ok_min_delta_t = min_delta_t is None or \
ts[start_idx + count - 1] - ts[start_idx] >= min_delta_t
ok_min_n_events = min_n_events is None or count >= min_n_events
if ok_min_delta_t and ok_min_n_events:
fchunks.append((start_idx, count))
return fchunks
@staticmethod
def parse_chunk_file(filename):
with open(filename, "r") as f:
lines = f.read().splitlines()
chunks = []
for line in lines:
values = line.split(" ")
chunks.append((values[0], int(values[1]), int(values[2])))
return chunks
def _chunk_files(self, files_list, params):
out_chunks = []
for file in tqdm(files_list, desc="Computing chunk indices"):
_, _, _, ts, _ = self.filereader.read_example(file)
chunks = self.gen_chunk_indices(ts, params.delta_t)
chunks = self.filter_chunks(chunks, ts, params.min_delta_t, params.min_n_events)
out_chunks += [(file, idx, count) for idx, count in chunks]
return out_chunks
def _path_to_class(self, path):
cls_name = os.path.basename(os.path.dirname(path))
return self.name_to_id[cls_name]
def read_example(self, filename, start=0, count=-1):
return self.filereader.read_example(filename, start=start, count=count)
def __getitem__(self, index):
path, start, count = self.chunks[index]
l, x, y, ts, p = self.read_example(path, start=start, count=count)
lbl = self._path_to_class(path)
events = np.column_stack([x, y, ts, p])
if self.transforms is not None:
events = self.transforms(events)
return events, lbl
def __len__(self):
return len(self.chunks)
class EventDetectionChunkDataset(EventChunkDataset):
def __init__(self, filereader, files_source, format=None,
transforms=None, mixed_transforms=None, chunk_params=None):
super(EventChunkDataset, self).__init__()
self.filereader = filereader
self.format = format
self.transforms = transforms
self.mixed_transforms = mixed_transforms
if isinstance(files_source, str) and os.path.isfile(files_source):
self.chunks = self.parse_chunk_file(files_source)
elif isinstance(files_source, str) and os.path.isdir(files_source):
# Detection datasets are not organized in directories based on the class
# all files and annotations are on the same directory
files = glob.glob(os.path.join(files_source, "*" + self.filereader.ext))
self.chunks = self._chunk_files(files, chunk_params)
elif isinstance(files_source, list):
if os.path.isfile(files_source[0]):
self.chunks = self._chunk_files(files_source, chunk_params)
elif isinstance(files_source[0], tuple):
self.chunks = files_source
else:
raise ValueError("If a list is provided for the 'files_source' it must contain either a list "
"of files or a list of tuples")
else:
raise ValueError("The 'files_source' argument must be either a list or a path to a txt file!")
def read_annotation(self, filename, ts_start=None, ts_end=None):
return self.filereader.read_annotation(filename, ts_start=ts_start, ts_end=ts_end)
def _chunk_files(self, files_list, params):
out_chunks = []
for file in tqdm(files_list, desc="Computing chunk indices"):
_, _, _, ts, _ = self.filereader.read_example(file)
ann = self.filereader.read_annotation(self.filereader.get_ann_path(file))
ann_ts = ann[:, 4]
chunks = self.gen_chunk_indices(ts, ann_ts, params.delta_t)
chunks = self.filter_chunks(chunks, ts, params.min_delta_t, params.min_n_events)
out_chunks += [(file, idx, count) for idx, count in chunks]
return out_chunks
@staticmethod
def _get_prev_exp(n):
exp = 0
while 2 ** (exp + 32) - 1 < n:
exp += 32
return exp
@staticmethod
def parse_chunk_file(filename):
with open(filename, "r") as f:
lines = f.read().splitlines()
chunks = []
for line in lines:
values = line.split(" ")
chunk = [values[i] if i == 0 else int(values[i])
for i, v in enumerate(values)]
chunks.append(tuple(chunk))
return chunks
@staticmethod
def gen_chunk_indices(events_ts, bboxes_ts, delta_t):
unique_bboxes_ts = np.unique(bboxes_ts)
chunks = []
if len(unique_bboxes_ts) > 1:
start_ts = -1
for end_ts in unique_bboxes_ts[1:]:
# Limits each chunk to be at most delta_t len
if end_ts - start_ts > delta_t:
start_ts = end_ts - delta_t
events_inside = np.logical_and(events_ts > start_ts, events_ts <= end_ts)
start_idx = np.argmax(events_inside)
end_idx = events_inside.size - np.argmax(events_inside[::-1]) - 1
count = end_idx - start_idx + 1
# argmax returns 0 if the array only contains False values
# we check if there is at least one element inside the delta_t interval
if bool(events_inside[start_idx]) is True:
exp_base = EventDetectionChunkDataset._get_prev_exp(events_ts[start_idx])
chunks.append((start_idx, count, end_ts, end_ts, exp_base))
start_ts = end_ts
else:
start_ts = events_ts[0] - 1
end_ts = unique_bboxes_ts[0]
# Limits each chunk to be at most delta_t len
if end_ts - start_ts > delta_t:
start_ts = end_ts - delta_t
events_inside = np.logical_and(events_ts > start_ts, events_ts <= end_ts)
start_idx = np.argmax(events_inside)
end_idx = events_inside.size - np.argmax(events_inside[::-1]) - 1
count = end_idx - start_idx + 1
exp_base = EventDetectionChunkDataset._get_prev_exp(events_ts[start_idx])
chunks = [(start_idx, count, end_ts, end_ts, exp_base)]
return chunks
@staticmethod
def filter_chunks(chunks, ts, min_delta_t, min_n_events):
fchunks = []
prev_idx, _ = chunks[-1]
for start_idx, count, end_ts, end_ts, exp_base in chunks:
ok_min_delta_t = min_delta_t is None or \
ts[start_idx + count - 1] - ts[start_idx] >= min_delta_t
ok_min_n_events = min_n_events is None or count >= min_n_events
if ok_min_delta_t and ok_min_n_events:
fchunks.append((start_idx, count, end_ts, end_ts, exp_base))
return fchunks
def __getitem__(self, index):
path, start, count, bbox_ts_start, bbox_ts_end, base_exp = self.chunks[index]
ann_path = self.filereader.get_ann_path(path)
l, x, y, ts, p = self.read_example(path, start=start, count=count)
if base_exp > 0:
ts += 2 ** base_exp
ann = self.read_annotation(ann_path, ts_start=bbox_ts_start, ts_end=bbox_ts_end+1)
events = np.column_stack([x, y, ts, p])
if self.transforms is not None:
events = self.transforms(events)
if self.mixed_transforms is not None:
events, ann = self.mixed_transforms(events=events, bboxes=ann)
return events, ann
def __len__(self):
return len(self.chunks)
@property
def num_classes(self):
return NotImplementedError("This functionality is not available for detection datasets")
def _path_to_class(self, path):
return NotImplementedError("This functionality is not available for detection datasets")
```
#### File: classification/libs/utils.py
```python
import torch
import numpy as np
import re
import itertools
from textwrap import wrap
import matplotlib.pyplot as plt
def padding_mask(lengths, batch_size, time_size=None):
"""
Computes a [batch_size, time_size] binary mask which selects all and only the
non padded values in the input tensor
:param torch.tensor lengths: a [batch_size] tensor containing the actual length
(before padding) of every sample in the batch
:param int batch_size: the number of samples in the batch
:param int time_size: the length of the padded sequences
:retype: torch.tensors
"""
max_len = torch.max(lengths) if time_size is None else time_size
mask = torch.arange(max_len, device=lengths.device, dtype=lengths.dtype)
mask = mask.expand(batch_size, max_len) < lengths.unsqueeze(1)
return mask.type(torch.uint8)
def cat_arange(counts, dtype=torch.int32):
"""
Concatenate results of multiple arange calls
E.g.: cat_arange([2,1,3]) = [0, 1, 0, 0, 1, 2]
Credits: https://stackoverflow.com/a/20033438
:param torch.tensor counts: a 1D tensor
:return: equivalent to torch.cat([torch.arange(c) for c in counts])
"""
counts1 = counts[:-1].type(dtype)
reset_index = torch.cumsum(counts1, dim=0).type(torch.int64)
incr = torch.ones(counts.sum(), dtype=dtype, device=counts.device)
incr[0] = 0
incr[reset_index] = 1 - counts1
# Reuse the incr array for the final result.
return torch.cumsum(incr, dim=0)
def repeat_arange(counts, dtype=torch.int32):
"""
Repeat each element of arange multiple times
E.g.: repeat_arange([2,1,3]) = [0, 0, 1, 2, 2, 2]
:param counts: a 1D tensor having the same length of 'tensor'
:return: equivalent to torch.cat([torch.tensor([v]).expand(n) for v, n in enumerate(counts)])
"""
incr = torch.zeros(counts.sum(), dtype=dtype, device=counts.device)
set_index = torch.cumsum(counts[:-1], dim=0).type(torch.int64)
incr[set_index] = 1
return torch.cumsum(incr, dim=0)
def select_padded(source, mask):
lengths = mask.sum(-1)
max_length = lengths.max()
batch_size, time_size, feature_size = source.shape
out_tensor = source.new_zeros([batch_size, max_length, feature_size])
batch_idx = repeat_arange(lengths, torch.int64)
time_idx = cat_arange(lengths, torch.int64)
out_tensor[batch_idx, time_idx] = source[mask]
return out_tensor
def confusion_matrix_fig(cm, labels, normalize=False):
if normalize:
cm = cm.astype('float') * 10 / cm.sum(axis=1)[:, np.newaxis]
cm = np.nan_to_num(cm, copy=True)
cm = cm.astype('int')
fig = plt.figure(figsize=(7, 7), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1, 1, 1)
im = ax.imshow(cm, cmap='Oranges')
classes = ['\n'.join(wrap(l, 40)) for l in labels]
tick_marks = np.arange(len(classes))
ax.set_xlabel('Predicted', fontsize=7)
ax.set_xticks(tick_marks)
c = ax.set_xticklabels(classes, fontsize=4, rotation=-90, ha='center')
ax.xaxis.set_label_position('bottom')
ax.xaxis.tick_bottom()
ax.set_ylabel('True Label', fontsize=7)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes, fontsize=4, va='center')
ax.yaxis.set_label_position('left')
ax.yaxis.tick_left()
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, format(cm[i, j], 'd') if cm[i, j] != 0 else '.',
horizontalalignment="center", fontsize=6,
verticalalignment='center', color="black")
return fig
```
#### File: opticalflow/src/vis_utils.py
```python
import tensorflow as tf
import numpy as np
import math
import cv2
"""
Generates an RGB image where each point corresponds to flow in that direction from the center,
as visualized by flow_viz_tf.
Output: color_wheel_rgb: [1, width, height, 3]
"""
def draw_color_wheel_tf(width, height):
color_wheel_x = tf.lin_space(-width / 2.,
width / 2.,
width)
color_wheel_y = tf.lin_space(-height / 2.,
height / 2.,
height)
color_wheel_X, color_wheel_Y = tf.meshgrid(color_wheel_x, color_wheel_y)
color_wheel_flow = tf.stack([color_wheel_X, color_wheel_Y], axis=2)
color_wheel_flow = tf.expand_dims(color_wheel_flow, 0)
color_wheel_rgb, flow_norm, flow_ang = flow_viz_tf(color_wheel_flow)
return color_wheel_rgb
def draw_color_wheel_np(width, height):
color_wheel_x = np.linspace(-width / 2.,
width / 2.,
width)
color_wheel_y = np.linspace(-height / 2.,
height / 2.,
height)
color_wheel_X, color_wheel_Y = np.meshgrid(color_wheel_x, color_wheel_y)
color_wheel_rgb = flow_viz_np(color_wheel_X, color_wheel_Y)
return color_wheel_rgb
"""
Visualizes optical flow in HSV space using TensorFlow, with orientation as H, magnitude as V.
Returned as RGB.
Input: flow: [batch_size, width, height, 2]
Output: flow_rgb: [batch_size, width, height, 3]
"""
def flow_viz_tf(flow):
flow_norm = tf.norm(flow, axis=3)
flow_ang_rad = tf.atan2(flow[:, :, :, 1], flow[:, :, :, 0])
flow_ang = (flow_ang_rad / math.pi) / 2. + 0.5
const_mat = tf.ones(tf.shape(flow_norm))
hsv = tf.stack([flow_ang, const_mat, flow_norm], axis=3)
flow_rgb = tf.image.hsv_to_rgb(hsv)
return flow_rgb, flow_norm, flow_ang_rad
def flow_viz_np(flow_x, flow_y):
import cv2
flows = np.stack((flow_x, flow_y), axis=2)
mag = np.linalg.norm(flows, axis=2)
ang = np.arctan2(flow_y, flow_x)
ang += np.pi
ang *= 180. / np.pi / 2.
ang = ang.astype(np.uint8)
hsv = np.zeros([flow_x.shape[0], flow_x.shape[1], 3], dtype=np.uint8)
hsv[:, :, 0] = ang
hsv[:, :, 1] = 255
hsv[:, :, 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
flow_rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return flow_rgb
``` |
{
"source": "96moustafa/sim_gazebo_bringup",
"score": 2
} |
#### File: sim_gazebo_bringup/launch/sim_gazebo.launch.py
```python
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.actions import SetLaunchConfiguration
from launch.actions import IncludeLaunchDescription
from launch.actions import ExecuteProcess
from launch.actions import LogInfo
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
import os
from time import sleep
import json
import numpy as np
import shlex
import sys
import subprocess
launch_path = os.path.realpath(__file__).replace("sim_gazebo.launch.py", "")
# path of the default config file
json_path = os.path.realpath(os.path.relpath(
os.path.join(launch_path, "../config")))
ros2_ws = os.path.realpath(os.path.relpath(
os.path.join(launch_path, "../../../")))
# this path is where px4 repo and tii_gazebo repo will be cloned
ros2_ws_parent = os.path.realpath(os.path.relpath(
os.path.join(launch_path, "../../../..")))
# this path contains the a directory for with a config file of each scenario and their plan files
scenarios_path = os.path.realpath(os.path.relpath(
os.path.join(launch_path, "../config/scenarios")))
print("launch_path: " + launch_path)
print("json_path: " + json_path)
print("ros2_ws: " + ros2_ws)
print("scenarios_path: " + scenarios_path)
# parse the scenarios directory for the available scenarios
available_scenarios = sorted(os.listdir(scenarios_path))
# list the available scenarios
print("The available scenarios are:")
for num, scenario in enumerate(available_scenarios):
print("{:d}) {:s}".format(num, scenario))
# prompt for the choosen scenario
scenario_num = 0
while True:
scenario_num = int(input("Enter the number of the required scenario: "))
if scenario_num in range(len(available_scenarios)):
break
# change json_path if one of the availbe scenarios is chosen
json_path = "{:s}/{:s}".format(scenarios_path,
available_scenarios[scenario_num])
print("chosen scenario path: " + json_path)
gazebo_model_reset_env = False
gazebo_plugin_reset_env = False
with open('{:s}/gen_params.json'.format(json_path)) as json_file:
json_params = json.load(json_file)
setup_autopilot = json_params["setup"]["autopilot"]
setup_gazebo = json_params["setup"]["gazebo"]
models = json_params["models"]
world_params = json_params["world_params"]
generate_world_params = world_params["generate_params"]
for repo in setup_gazebo["gazebo_models"]:
gazebo_repo = setup_gazebo["gazebo_models"][repo]
gazebo_repo_path = '{:s}/{:s}'.format(ros2_ws_parent,
gazebo_repo["name"])
if not os.path.isdir(gazebo_repo_path):
clone_cmd = 'git clone -b {:s} {:s} {:s}'.format(
gazebo_repo["version"],gazebo_repo["repo"], gazebo_repo_path)
clone_cmd_popen=shlex.split(clone_cmd)
clone_popen = subprocess.Popen(clone_cmd_popen,
stdout=subprocess.PIPE, text=True)
while True:
output = clone_popen.stdout.readline()
if output == '' and clone_popen.poll() is not None:
break
if output:
print(output.strip())
clone_popen.wait()
if not gazebo_model_reset_env:
os.environ['GAZEBO_MODEL_PATH'] = '{:s}/models'.format(
gazebo_repo_path)
gazebo_model_reset_env=True
elif '{:s}/models'.format(gazebo_repo_path) not in os.getenv('GAZEBO_MODEL_PATH'):
os.environ['GAZEBO_MODEL_PATH'] = '{:s}/models:{:s}'.format(
gazebo_repo_path, os.getenv('GAZEBO_MODEL_PATH'))
for build in setup_autopilot:
autopilot_build = setup_autopilot[build]
autopilot_path = '{:s}/{:s}'.format(ros2_ws_parent,
autopilot_build["name"])
autopilot_build_path = '{:s}/build/{:s}'.format(autopilot_path,
autopilot_build["build_type"])
if (not os.path.isdir(autopilot_path)) and (autopilot_build["clone"]):
clone_cmd = 'git clone -b {:s} {:s} {:s}'.format(
autopilot_build["version"],autopilot_build["repo"], autopilot_path)
clone_cmd_popen=shlex.split(clone_cmd)
clone_popen = subprocess.Popen(clone_cmd_popen,
stdout=subprocess.PIPE, text=True)
while True:
output = clone_popen.stdout.readline()
if output == '' and clone_popen.poll() is not None:
break
if output:
print(output.strip())
clone_popen.wait()
if (os.path.isdir(autopilot_path)) and (not os.path.isdir(autopilot_build_path)):
build_cmd = 'make clean && DONT_RUN=1 make {:s} {:s} {:s}'.format(
autopilot_build["build_prefix"],autopilot_build["build_type"],
autopilot_build["build_postfix"])
build_cmd_popen=shlex.split(build_cmd)
build_popen = subprocess.Popen(build_cmd_popen, stdout=subprocess.PIPE,
cwd=autopilot_path, text=True)
while True:
output = build_popen.stdout.readline()
if output == '' and build_popen.poll() is not None:
break
if output:
print(output.strip())
build_popen.wait()
if os.getenv('LD_LIBRARY_PATH') is None:
os.environ['LD_LIBRARY_PATH'] = '{:s}/build_gazebo'.format(
autopilot_build_path)
elif '{:s}/build_gazebo'.format(autopilot_build_path) not in os.getenv('LD_LIBRARY_PATH'):
os.environ['LD_LIBRARY_PATH'] = '{:s}/build_gazebo:{:s}'.format(
autopilot_build_path, os.getenv('LD_LIBRARY_PATH'))
if not gazebo_plugin_reset_env and autopilot_build["gazebo_plugins"]:
os.environ['GAZEBO_PLUGIN_PATH'] = '{:s}/build_gazebo'.format(
autopilot_build_path)
gazebo_plugin_reset_env=True
elif autopilot_build["gazebo_plugins"] and ('{:s}/build_gazebo'.format(autopilot_build_path)
not in os.getenv('GAZEBO_PLUGIN_PATH')):
os.environ['GAZEBO_PLUGIN_PATH'] = '{:s}/build_gazebo:{:s}'.format(
autopilot_build_path, os.getenv('GAZEBO_PLUGIN_PATH'))
if os.getenv('GAZEBO_PLUGIN_PATH') is None:
os.environ['GAZEBO_PLUGIN_PATH'] = "/usr/lib/x86_64-linux-gnu/gazebo-11/plugins"
elif "/usr/lib/x86_64-linux-gnu/gazebo-11/plugins" not in os.getenv('GAZEBO_PLUGIN_PATH'):
os.environ['GAZEBO_PLUGIN_PATH'] = '{:s}:{:s}'.format(
"/usr/lib/x86_64-linux-gnu/gazebo-11/plugins",
os.getenv('GAZEBO_PLUGIN_PATH'))
os.environ['GAZEBO_RESOURCE_PATH'] = "/usr/share/gazebo-11"
if world_params["generate_world"]:
generate_world_args=""
for params in generate_world_params:
generate_world_args += ' --{:s} "{:s}"'.format(params,
str(generate_world_params[params]))
generate_world_cmd = 'python3 {:s}/{:s}/scripts/jinja_world_gen.py{:s}'.format(
ros2_ws_parent, world_params["gazebo_name"], generate_world_args
).replace("\n","").replace(" ","")
world_cmd_popen=shlex.split(generate_world_cmd)
world_popen = subprocess.Popen(world_cmd_popen, stdout=subprocess.PIPE, text=True)
while True:
output = world_popen.stdout.readline()
if output == '' and world_popen.poll() is not None:
break
if output:
print(output.strip())
world_popen.wait()
world_file_path='/tmp/{:s}.world'.format(generate_world_params["world_name"])
else:
world_file_path='{:s}/{:s}/worlds/{:s}.world'.format(ros2_ws_parent,
world_params["gazebo_name"],generate_world_params["world_name"])
latitude = generate_world_params["latitude"]
longitude = generate_world_params["longitude"]
altitude = generate_world_params["altitude"]
def generate_launch_description():
ld = LaunchDescription([
# World path argument
DeclareLaunchArgument(
'world_path', default_value= world_file_path,
description='Provide full world file path and name'),
LogInfo(msg=LaunchConfiguration('world_path')),
])
# Get path to gazebo package
gazebo_package_prefix = get_package_share_directory('gazebo_ros')
# Launch gazebo servo with world file from world_path
gazebo_server = IncludeLaunchDescription(
PythonLaunchDescriptionSource([gazebo_package_prefix,'/launch/gzserver.launch.py']),
launch_arguments={'world': LaunchConfiguration('world_path')}.items(),
)
ld.add_action(gazebo_server)
instance = 0
for model_params in models:
generate_model_params = models[model_params]["generate_params"]
if generate_model_params["model_name"] == "NotSet":
generate_model_params["model_name"] = 'sitl_{:s}_{:d}'.format(
generate_model_params["base_model"],instance)
# Path for PX4 binary storage
sitl_output_path = '/tmp/{:s}'.format(generate_model_params["model_name"])
generate_model_args = ""
for params in generate_model_params:
generate_model_args += ' --{:s} "{:s}"'.format(
params, str(generate_model_params[params]))
generate_model = ['python3 {:s}/{:s}/scripts/jinja_model_gen.py{:s}'.format(
ros2_ws_parent, models[model_params]["gazebo_name"],
generate_model_args).replace("\n","").replace(" ","")]
# Command to make storage folder
sitl_folder_cmd = ['mkdir -p \"{:s}\"'.format(sitl_output_path)]
# Calculate spawn locations
spawn_pose = models[model_params]["spawn_pose"]
latitude_vehicle = float(latitude) + ((float(spawn_pose[1])/6378137.0)*(180.0/np.pi))
longitude_vehicle = float(longitude) + ((float(spawn_pose[0])/
(6378137.0*np.cos((np.pi*float(latitude))/180.0)))*(180.0/np.pi))
altitude_vehicle = float(altitude) + float(spawn_pose[2])
# Set each xterm with PX4 environment variables
px4_env = '''export PX4_SIM_MODEL=\"{:s}\"; export PX4_HOME_LAT={:s};
export PX4_HOME_LON={:s}; export PX4_HOME_ALT={:s};'''.format(
generate_model_params["base_model"], str(latitude_vehicle),
str(longitude_vehicle), str(altitude_vehicle)
).replace("\n","").replace(" ","")
# Set path for PX4 build
px4_path = '{:s}/{:s}/build/{:s}'.format(ros2_ws_parent,
models[model_params]["autopilot_name"],
models[model_params]["autopilot_build_type"])
# Command to export model and run PX4 binary
px4_cmd = '''{:s} eval \"\"{:s}/bin/px4\"
-w {:s} \"{:s}/etc\" -s etc/init.d-posix/rcS -i {:d}\"; bash'''.format(
px4_env, px4_path, sitl_output_path, px4_path, instance)
# Xterm command to name xterm window and run px4_cmd
xterm_px4_cmd = ['''xterm -hold -T \"PX4 NSH {:s}\"
-n \"PX4 NSH {:s}\" -e \'{:s}\''''.format(
sitl_output_path, sitl_output_path,
px4_cmd).replace("\n","").replace(" ","")]
# Execute jinja generator
jinja_model_generate = ExecuteProcess(
cmd=generate_model,
name='jinja_gen_{:s}'.format(generate_model_params["model_name"]),
shell=True,
output='screen')
ld.add_action(jinja_model_generate)
# Make storage command
make_sitl_folder = ExecuteProcess(
cmd=sitl_folder_cmd,
name='make_sitl_folder_{:s}'.format(generate_model_params["model_name"]),
shell=True)
ld.add_action(make_sitl_folder)
# Run PX4 binary
px4_posix = ExecuteProcess(
cmd=xterm_px4_cmd,
name='xterm_px4_nsh_{:s}'.format(generate_model_params["model_name"]),
shell=True)
ld.add_action(px4_posix)
# Spawn entity
spawn_entity = Node(package='gazebo_ros', executable='spawn_entity.py',
arguments=['-entity', '{:s}'.format(generate_model_params["model_name"]),
'-x', str(spawn_pose[0]), '-y', str(spawn_pose[1]), '-z', str(spawn_pose[2]),
'-R', str(spawn_pose[3]), '-P', str(spawn_pose[4]), '-Y', str(spawn_pose[5]),
'-file', '/tmp/{:s}.sdf'.format(generate_model_params["model_name"])],
name='spawn_{:s}'.format(generate_model_params["model_name"]), output='screen')
ld.add_action(spawn_entity)
# Increment instance
instance += 1
# Launch gazebo client
gazebo_client = IncludeLaunchDescription(
PythonLaunchDescriptionSource([gazebo_package_prefix,'/launch/gzclient.launch.py']))
LogInfo(msg="\nWaiting to launch Gazebo Client...\n")
sleep(2)
ld.add_action(gazebo_client)
return ld
``` |
{
"source": "96RadhikaJadhav/CLMM",
"score": 3
} |
#### File: clmm/modbackend/generic.py
```python
import numpy as np
from astropy import units
from astropy.cosmology import LambdaCDM
from ..constants import Constants as const
import warnings
__all__ = ['get_reduced_shear_from_convergence']
# functions that are general to all backends
def get_reduced_shear_from_convergence(shear, convergence):
""" Calculates reduced shear from shear and convergence
Parameters
----------
shear : array_like
Shear
convergence : array_like
Convergence
Returns
-------
reduced_shear : array_like
Reduced shear
"""
shear, convergence = np.array(shear), np.array(convergence)
reduced_shear = shear/(1.-convergence)
return reduced_shear
```
#### File: CLMM/clmm/plotting.py
```python
import matplotlib.pyplot as plt
from .galaxycluster import GalaxyCluster
def plot_profiles(cluster=None, rbins=None, tangential_component=None, tangential_component_error=None,
cross_component=None, cross_component_error=None, r_units=None, table_name='profile',
xscale='linear',yscale='linear'):
"""Plot shear profiles
This function can be called by either passing in an instance of `GalaxyCluster` or as an
attribute of an instance of a `GalaxyCluster` object assuming that that instance has had
a shear profile computed and saved as a `.profile` attribute. This function can also be
called by passing in `rbins` along with the respective shears.
We require at least `rbins` information and `tangential_component` information.
Parameters
----------
cluster: GalaxyCluster, optional
Instance of `GalaxyCluster()` that contains a `.profile` attribute.
rbins: array_like, optional
The centers of the radial bins that was used to compute the shears.
tangential_component: array_like, optional
The tangential component at the radii of `rbins`, or the name of the column in the galcat Table corresponding to the tangential component of the shear or reduced shear (Delta Sigma not yet implemented). Default: 'gt'
tangential_component_error: array_like, optional
The uncertainty in the tangential component or the name of the column in the galcat Table corresponding to the uncertainty in tangential component of the shear or reduced shear. Default: 'gt_err'
cross_component: array_like, optional
The cross component at the radii of `rbins` or the name of the column in the galcat Table corresponding to the cross component of the shear or reduced shear. Default: 'gx'
cross_component_error: array_like, optional
The uncertainty in the cross component or the name of the column in the galcat Table corresponding to the uncertainty in the cross component of the shear or reduced shear. Default: 'gx_err'
r_units: str, optional
Units of `rbins` for x-axis label
table_name: str, optional
Name of the GalaxyCluster() `.profile` attribute. Default: 'profile'
xscale:
matplotlib.pyplot.xscale parameter to set x-axis scale (e.g. to logarithmic axis)
yscale:
matplotlib.pyplot.yscale parameter to set y-axis scale (e.g. to logarithmic axis)
Returns
-------
fig:
The matplotlib figure object that has been plotted to.
axes:
The matplotlib axes object that has been plotted to.
"""
# If a cluster object was passed, use these arrays
if cluster is not None and hasattr(cluster, table_name):
cluster_profile = getattr(cluster,table_name)
rbins = cluster_profile['radius']
r_units = cluster_profile.meta['bin_units']
if tangential_component != 'gt':
ValueError("The function requires a column called 'gt' to run.")
if cross_component != 'gx':
ValueError("The function requires a column called 'gx' to run.")
if 'gt' not in cluster_profile.colnames:
ValueError("The function requires a column called 'gt' to run.")
if 'gx' not in cluster_profile.colnames:
ValueError("The function requires a column called 'gx' to run.")
if type(tangential_component)==str:
tangential_component = cluster_profile[tangential_component]
else:
tangential_component = cluster_profile['gt']
try:
if type(tangential_component_error)==str:
tangential_component_error = cluster_profile[tangential_component_error]
else:
tangential_component_error = cluster_profile['gt_err']
except:
pass
try:
if type(cross_component)==str:
cross_component = cluster_profile[cross_component]
else:
cross_component = cluster_profile['gx']
except:
pass
try:
if type(cross_component_error)==str:
cross_component_error = cluster_profile[cross_component_error]
else:
cross_component_error = cluster_profile['gx_err']
except:
pass
# Plot the tangential shears
fig, axes = plt.subplots()
axes.errorbar(rbins, tangential_component,
yerr=tangential_component_error,
fmt='bo-', label="Tangential component")
# Plot the cross shears
try:
axes.errorbar(rbins, cross_component,
yerr=cross_component_error,
fmt='ro-', label="Cross component")
except:
pass
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.legend()
axes.set_xlabel(f'Radius [{r_units}]')
axes.set_ylabel(r'$\gamma$')
return fig, axes
GalaxyCluster.plot_profiles = plot_profiles
```
#### File: CLMM/tests/test_gcdata.py
```python
from numpy.testing import assert_raises, assert_equal
from clmm import GCData
from clmm import Cosmology
def test_init():
gcdata = GCData()
assert_equal(None, gcdata.meta['cosmo'])
def test_update_cosmo():
# Define inputs
cosmo1 = Cosmology(H0=70.0, Omega_dm0=0.3-0.045, Omega_b0=0.045)
desc1 = cosmo1.get_desc()
gcdata = GCData()
# check it has __str__ adn __repr__
gcdata.__str__()
gcdata.__repr__()
# manual update
gcdata.update_cosmo_ext_valid(gcdata, cosmo1, overwrite=False)
assert_equal(desc1, gcdata.meta['cosmo'])
# check that adding cosmo metadata manually is forbidden
assert_raises(ValueError, gcdata.meta.__setitem__, 'cosmo', None)
assert_raises(ValueError, gcdata.meta.__setitem__, 'cosmo', cosmo1)
# update_cosmo funcs
# input_cosmo=None, data_cosmo=None
gcdata = GCData()
gcdata.update_cosmo_ext_valid(gcdata, None, overwrite=False)
assert_equal(None, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo_ext_valid(gcdata, None, overwrite=True)
assert_equal(None, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(None, overwrite=False)
assert_equal(None, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(None, overwrite=True)
assert_equal(None, gcdata.meta['cosmo'])
# input_cosmo!=None, data_cosmo=None
gcdata = GCData()
gcdata.update_cosmo_ext_valid(gcdata, cosmo1, overwrite=True)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1, overwrite=False)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1, overwrite=True)
assert_equal(desc1, gcdata.meta['cosmo'])
# input_cosmo=data_cosmo!=None
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo_ext_valid(gcdata, cosmo1, overwrite=False)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo_ext_valid(gcdata, cosmo1, overwrite=True)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo(cosmo1, overwrite=False)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo(cosmo1, overwrite=True)
assert_equal(desc1, gcdata.meta['cosmo'])
# input_cosmo(!=None) != data_cosmo(!=None)
cosmo2 = Cosmology(H0=60.0, Omega_dm0=0.3-0.045, Omega_b0=0.045)
desc2 = cosmo2.get_desc()
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
assert_raises(TypeError, gcdata.update_cosmo_ext_valid, gcdata, cosmo2, overwrite=False)
assert_raises(TypeError, gcdata.update_cosmo_ext_valid, gcdata, cosmo2)
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo_ext_valid(gcdata, cosmo2, overwrite=True)
assert_equal(desc2, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo(cosmo1, overwrite=False)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
assert_raises(TypeError, gcdata.update_cosmo, cosmo2, overwrite=False)
assert_raises(TypeError, gcdata.update_cosmo, cosmo2)
# test_creator = 'Mitch'
# test_creator_diff = 'Witch'
# test_dict = {'test%d'%i:True for i in range(3)}
# test_dict_diff = {'test%d'%i:False for i in range(3)}
# test_dict_sub = {'test%d'%i:True for i in range(2)}
# test_table = []
# test_data = GCData(test_creator, test_dict, test_table)
# test_data_diff = GCData(test_creator, test_dict_diff, test_table)
# def test_check_subdict():
#
# assert check_subdict(test_dict_sub, test_dict)
# assert not check_subdict(test_dict, test_dict_sub)
# assert not check_subdict(test_dict_sub, test_dict_diff)
#
# def test_find_in_datalist():
#
# tst.assert_equal([test_data], find_in_datalist(test_dict, [test_data]))
# tst.assert_equal([test_data], find_in_datalist(test_dict_sub, [test_data]))
# tst.assert_equal([], find_in_datalist(test_dict_diff, [test_data]))
#
# tst.assert_equal([test_data], find_in_datalist(test_dict, [test_data], exact=True))
# tst.assert_equal([], find_in_datalist(test_dict_sub, [test_data], exact=True))
# tst.assert_equal([], find_in_datalist(test_dict_diff, [test_data], exact=True))
# def test_find_data():
#
# gc = GalaxyCluster('test_cluster', test_data)
#
# tst.assert_equal([], gc.find_data(test_creator_diff, test_dict))
#
# tst.assert_equal([test_data], gc.find_data(test_creator, test_dict))
# tst.assert_equal([test_data], gc.find_data(test_creator, test_dict_sub))
# tst.assert_equal([], gc.find_data(test_creator, test_dict_diff))
#
# tst.assert_equal([test_data], gc.find_data(test_creator, test_dict, exact=True))
# tst.assert_equal([], gc.find_data(test_creator, test_dict_sub, exact=True))
# tst.assert_equal([], gc.find_data(test_creator, test_dict_diff, exact=True))
# def test_add_data():
# gc = GalaxyCluster('test_cluster')
# tst.assert_raises(TypeError, gc.add_data, '')
# tst.assert_raises(TypeError, gc.add_data, '', force=True)
# tst.assert_equal(None, gc.add_data(test_data, force=True))
# gc = GalaxyCluster('test_cluster')
# tst.assert_equal(None, gc.add_data(test_data))
# tst.assert_equal(None, gc.add_data(test_data_diff))
# tst.assert_raises(ValueError, gc.add_data, test_data)
# tst.assert_equal(None, gc.add_data(test_data, force=True))
#
# def test_remove_data():
#
# gc = GalaxyCluster('test_cluster', test_data)
# tst.assert_raises(ValueError, gc.remove_data, test_creator_diff, test_dict)
# tst.assert_raises(ValueError, gc.remove_data, test_creator, test_dict_sub)
# tst.assert_raises(ValueError, gc.remove_data, test_creator, test_dict_diff)
# tst.assert_equal(None, gc.remove_data(test_creator, test_dict))
# tst.assert_raises(ValueError, gc.remove_data, test_creator, test_dict)
#
# def test_read_GC():
# pass
# def test_write_GC():
# pass
```
#### File: CLMM/tests/test_load_clusters_with_gcr.py
```python
import os
from numpy import testing
import clmm
def test_types():
testing.assert_raises(TypeError, clmm.lsst.load_from_dc2, 1.5, 'cosmoDC2_v1.1.4_small',
'.', _reader='test')
testing.assert_raises(TypeError, clmm.lsst.load_from_dc2, 5, 'cosmoDC2_v1.1.4_small',
10, _reader='test')
def test_ranges():
testing.assert_raises(ValueError, clmm.lsst.load_from_dc2, -1, 'cosmoDC2_v1.1.4_small', '.',
_reader='test')
testing.assert_raises(ValueError, clmm.lsst.load_from_dc2, int(1e10), 'cosmoDC2_v1.1.4_small', '.',
_reader='test')
testing.assert_raises(ValueError, clmm.lsst.load_from_dc2, 5, 'cosmoDC2_v1.1.4_small', '.',
(-0.3, 0.3), (-0.3, 0.3, 0.1), (0.1, 1.5), _reader='test')
testing.assert_raises(ValueError, clmm.lsst.load_from_dc2, 5, 'cosmoDC2_v1.1.4_small', '.',
(0.3, -0.3), (-0.3, 0.3), (0.1, 1.5), _reader='test')
def test_values():
clmm.lsst.load_from_dc2(10, 'cosmoDC2_v1.1.4_small', '.', _reader='test')
c = clmm.GalaxyCluster.load('./3.p')
testing.assert_equal(len(c.galcat), 10)
testing.assert_equal(c.galcat.columns.keys(),
['galaxy_id', 'ra', 'dec', 'e1', 'e2', 'z', 'kappa'])
testing.assert_equal(c.galcat[5]['e1'], 2.)
testing.assert_equal(c.galcat[4]['z'], 0.4)
for file in os.listdir('.'):
if file[-2:]=='.p':
os.remove(file)
```
#### File: CLMM/tests/test_modeling.py
```python
import json
import numpy as np
from numpy.testing import assert_raises, assert_allclose, assert_equal
from astropy.cosmology import FlatLambdaCDM, LambdaCDM
import clmm.modeling as md
from clmm.constants import Constants as clc
from clmm.galaxycluster import GalaxyCluster
from clmm import GCData
TOLERANCE = {'rtol': 1.0e-6, 'atol': 1.0e-6}
# ----------- Some Helper Functions for the Validation Tests ---------------
def compute_sigmac_physical_constant(lightspeed, gnewt, msun, pc_to_m):
""" Computes physical constant used to in Sigma_crit
Parameters
----------
lightspeed,: float
Lightspeed in km/s
gnewt: float
Gravitational constant in m^3/(km s^2)
msun: float
Solar mass in kg
pc_to_m: float
Value of 1 parsec in meters
Returns
-------
float
lightspeed^2/G[Msun/pc]
"""
return (lightspeed*1000./pc_to_m)**2/(gnewt*msun/pc_to_m**3)
def load_validation_config():
""" Loads values precomputed by numcosmo for comparison """
numcosmo_path = 'tests/data/numcosmo/'
with open(numcosmo_path+'config.json', 'r') as fin:
testcase = json.load(fin)
numcosmo_profile = np.genfromtxt(numcosmo_path+'radial_profiles.txt', names=True)
# Physical Constants
CLMM_SIGMAC_PCST = compute_sigmac_physical_constant(clc.CLIGHT_KMS.value,
clc.GNEWT.value,
clc.SOLAR_MASS.value,
clc.PC_TO_METER.value)
testcase_SIGMAC_PCST = compute_sigmac_physical_constant(testcase['lightspeed[km/s]'],
testcase['G[m3/km.s2]'],
testcase['Msun[kg]'],
testcase['pc_to_m'])
# Cosmology
cosmo = md.Cosmology(H0=testcase['cosmo_H0'], Omega_dm0=testcase['cosmo_Om0']-testcase['cosmo_Ob0'], Omega_b0=testcase['cosmo_Ob0'])
# Sets of parameters to be used by multiple functions
RHO_PARAMS = {
'r3d': np.array(numcosmo_profile['r3d']),
'mdelta':testcase['cluster_mass'],
'cdelta':testcase['cluster_concentration'],
'z_cl':testcase['z_cluster'],
}
SIGMA_PARAMS = {
'r_proj': np.array(numcosmo_profile['r3d']),
'mdelta':testcase['cluster_mass'],
'cdelta':testcase['cluster_concentration'],
'z_cl':testcase['z_cluster'],
'delta_mdef':testcase['mass_Delta'],
'halo_profile_model':testcase['density_profile_parametrization'],
}
GAMMA_PARAMS = {
'r_proj': np.array(numcosmo_profile['r3d']),
'mdelta': testcase['cluster_mass'],
'cdelta': testcase['cluster_concentration'],
'z_cluster': testcase['z_cluster'],
'z_source': testcase['z_source'],
'delta_mdef': testcase['mass_Delta'],
'halo_profile_model': testcase['density_profile_parametrization'],
'z_src_model': 'single_plane',
}
return {'TEST_CASE': testcase, 'z_source': testcase['z_source'],
'cosmo': cosmo,
'RHO_PARAMS': RHO_PARAMS, 'SIGMA_PARAMS': SIGMA_PARAMS, 'GAMMA_PARAMS': GAMMA_PARAMS,
'numcosmo_profiles': numcosmo_profile, 'TEST_CASE_SIGMAC_PCST': testcase_SIGMAC_PCST,
'CLMM_SIGMAC_PCST': CLMM_SIGMAC_PCST}
# --------------------------------------------------------------------------
def test_physical_constants(modeling_data):
""" Test physical values of physical_constants
Notes
-----
The precision set for these tests put in here right now are somewhat arbitrary,
has to be improved to values provided by CCL
"""
cfg = load_validation_config()
assert_allclose(cfg['TEST_CASE']['lightspeed[km/s]'], clc.CLIGHT_KMS.value, 1e-3)
assert_allclose(cfg['TEST_CASE']['G[m3/km.s2]'], clc.GNEWT.value, 1e-3)
assert_allclose(cfg['TEST_CASE']['pc_to_m'], clc.PC_TO_METER.value, 1e-6)
assert_allclose(cfg['TEST_CASE']['Msun[kg]'], clc.SOLAR_MASS.value, 1e-2)
assert_allclose(cfg['TEST_CASE_SIGMAC_PCST'], cfg['CLMM_SIGMAC_PCST'], 1e-2)
def test_cclify_astropy_cosmo(modeling_data):
""" Unit tests for md.cllify_astropy_cosmo """
# Make some base objects
truth = {'H0': 70., 'Om0': 0.3, 'Ob0': 0.05}
apycosmo_flcdm = FlatLambdaCDM(**truth)
apycosmo_lcdm = LambdaCDM(Ode0=1.0-truth['Om0'], **truth)
cclcosmo = {'Omega_c': truth['Om0']-truth['Ob0'], 'Omega_b': truth['Ob0'],
'h': truth['H0']/100., 'H0': truth['H0']}
# Test for exception if missing baryon density (everything else required)
#missbaryons = FlatLambdaCDM(H0=truth['H0'], Om0=truth['Om0'])
#assert_raises(KeyError, md.cclify_astropy_cosmo, missbaryons)
# Test output if we pass FlatLambdaCDM and LambdaCDM objects
#assert_equal(md.cclify_astropy_cosmo(apycosmo_flcdm), cclcosmo)
#assert_equal(md.cclify_astropy_cosmo(apycosmo_lcdm), cclcosmo)
# Test output if we pass a CCL object (a dict right now)
#assert_equal(md.cclify_astropy_cosmo(cclcosmo), cclcosmo)
# Test for exception if anything else is passed in
#assert_raises(TypeError, md.cclify_astropy_cosmo, 70.)
#assert_raises(TypeError, md.cclify_astropy_cosmo, [70., 0.3, 0.25, 0.05])
def test_astropyify_ccl_cosmo(modeling_data):
""" Unit tests for astropyify_ccl_cosmo """
# Make a bse object
truth = {'H0': 70., 'Om0': 0.3, 'Ob0': 0.05}
apycosmo_flcdm = FlatLambdaCDM(**truth)
apycosmo_lcdm = LambdaCDM(Ode0=1.0-truth['Om0'], **truth)
cclcosmo = {'Omega_c': truth['Om0']-truth['Ob0'], 'Omega_b': truth['Ob0'],
'h': truth['H0']/100., 'H0': truth['H0']}
# Test output if we pass FlatLambdaCDM and LambdaCDM objects
#assert_equal(md.astropyify_ccl_cosmo(apycosmo_flcdm), apycosmo_flcdm)
#assert_equal(md.astropyify_ccl_cosmo(apycosmo_lcdm), apycosmo_lcdm)
# Test output if we pass a CCL object, compare the dicts
#assert_equal(md.cclify_astropy_cosmo(md.astropyify_ccl_cosmo(cclcosmo)),
# md.cclify_astropy_cosmo(apycosmo_lcdm))
# Test for exception if anything else is passed in
#assert_raises(TypeError, md.astropyify_ccl_cosmo, 70.)
#assert_raises(TypeError, md.astropyify_ccl_cosmo, [70., 0.3, 0.25, 0.05])
def test_get_reduced_shear(modeling_data):
""" Unit tests for get_reduced_shear """
# Make some base objects
shear = [0.5, 0.75, 1.25, 0.0]
convergence = [0.75, -0.2, 0.0, 2.3]
truth = [2., 0.625, 1.25, 0.0]
# Test for exception if shear and convergence are not the same length
assert_raises(ValueError, md.get_reduced_shear_from_convergence, shear[:3], convergence[:2])
assert_raises(ValueError, md.get_reduced_shear_from_convergence, shear[:2], convergence[:3])
# Check output including: float, list, ndarray
assert_allclose(md.get_reduced_shear_from_convergence(shear[0], convergence[0]),
truth[0], **TOLERANCE)
assert_allclose(md.get_reduced_shear_from_convergence(shear, convergence),
truth, **TOLERANCE)
assert_allclose(md.get_reduced_shear_from_convergence(np.array(shear), np.array(convergence)),
np.array(truth), **TOLERANCE)
def helper_profiles(func):
""" A helper function to repeat a set of unit tests on several functions
that expect the same inputs.
Tests the following functions: get_3d_density, predict_surface_density,
predict_excess_surface_density
Tests that the functions:
1. Throw an error if an invalid profile model is passed
2. Test each default parameter to ensure that the defaults are not changed.
"""
# Make some base objects
r3d = np.logspace(-2, 2, 100)
mdelta = 1.0e15
cdelta = 4.0
z_cl = 0.2
cclcosmo = md.Cosmology(Omega_dm0=0.25, Omega_b0=0.05)
# Test for exception if other profiles models are passed
assert_raises(ValueError, func, r3d, mdelta, cdelta, z_cl, cclcosmo, 200, 'bleh')
# Test defaults
defaulttruth = func(r3d, mdelta, cdelta, z_cl, cclcosmo, delta_mdef=200,
halo_profile_model='nfw')
assert_allclose(func(r3d, mdelta, cdelta, z_cl, cclcosmo, halo_profile_model='nfw'),
defaulttruth, **TOLERANCE)
assert_allclose(func(r3d, mdelta, cdelta, z_cl, cclcosmo, delta_mdef=200),
defaulttruth, **TOLERANCE)
def test_profiles(modeling_data):
""" Tests for profile functions, get_3d_density, predict_surface_density,
and predict_excess_surface_density """
helper_profiles(md.get_3d_density)
helper_profiles(md.predict_surface_density)
helper_profiles(md.predict_excess_surface_density)
# Validation tests
# NumCosmo makes different choices for constants (Msun). We make this conversion
# by passing the ratio of SOLAR_MASS in kg from numcosmo and CLMM
cfg = load_validation_config()
cosmo = cfg['cosmo']
assert_allclose(md.get_3d_density(cosmo=cosmo, **cfg['RHO_PARAMS']),
cfg['numcosmo_profiles']['rho'], 2.0e-9)
assert_allclose(md.predict_surface_density(cosmo=cosmo, **cfg['SIGMA_PARAMS']),
cfg['numcosmo_profiles']['Sigma'], 2.0e-9)
assert_allclose(md.predict_excess_surface_density(cosmo=cosmo, **cfg['SIGMA_PARAMS']),
cfg['numcosmo_profiles']['DeltaSigma'], 2.0e-9)
def test_get_critical_surface_density(modeling_data):
""" Validation test for critical surface density """
cfg = load_validation_config()
assert_allclose(md.get_critical_surface_density(cfg['cosmo'],
z_cluster=cfg['TEST_CASE']['z_cluster'],
z_source=cfg['TEST_CASE']['z_source']),
cfg['TEST_CASE']['nc_Sigmac'], 1.2e-8)
# Check behaviour when sources are in front of the lens
z_cluster = 0.3
z_source = 0.2
assert_allclose(md.get_critical_surface_density(cfg['cosmo'],z_cluster=z_cluster, z_source=z_source),
np.inf, 1.0e-10)
z_source = [0.2,0.12,0.25]
assert_allclose(md.get_critical_surface_density(cfg['cosmo'],z_cluster=z_cluster, z_source=z_source),
[np.inf,np.inf, np.inf], 1.0e-10)
# Check usage with cluster object function
z_src = np.array([cfg['TEST_CASE']['z_source']])
cluster = GalaxyCluster(unique_id='blah', ra=0, dec=0, z=cfg['TEST_CASE']['z_cluster'],
galcat=GCData([0*z_src, 0*z_src, z_src],
names=('ra', 'dec', 'z')))
cluster.add_critical_surface_density(cfg['cosmo'])
assert_allclose(cluster.galcat['sigma_c'],
cfg['TEST_CASE']['nc_Sigmac'], 1.2e-8)
def helper_physics_functions(func):
""" A helper function to repeat a set of unit tests on several functions
that expect the same inputs.
Tests the following functions: predict_tangential_shear, predict_convergence,
predict_reduced_tangential_shear
Tests that the functions:
1. Test each default parameter to ensure that the defaults are not changed.
2. Test that exceptions are thrown for unsupported zsource models and profiles
"""
# Make some base objects
rproj = np.logspace(-2, 2, 100)
mdelta = 1.0e15
cdelta = 4.0
z_cl = 0.2
z_src = 0.45
cosmo = md.Cosmology(Omega_dm0=0.25, Omega_b0=0.05, H0=70.0)
# Test defaults
defaulttruth = func(rproj, mdelta, cdelta, z_cl, z_src, cosmo, delta_mdef=200,
halo_profile_model='nfw', z_src_model='single_plane')
assert_allclose(func(rproj, mdelta, cdelta, z_cl, z_src, cosmo, halo_profile_model='nfw',
z_src_model='single_plane'), defaulttruth, **TOLERANCE)
assert_allclose(func(rproj, mdelta, cdelta, z_cl, z_src, cosmo, delta_mdef=200,
z_src_model='single_plane'), defaulttruth, **TOLERANCE)
assert_allclose(func(rproj, mdelta, cdelta, z_cl, z_src, cosmo, delta_mdef=200,
halo_profile_model='nfw'), defaulttruth, **TOLERANCE)
# Test for exception on unsupported z_src_model and halo profiles
assert_raises(ValueError, func, rproj, mdelta, cdelta, z_cl, z_src, cosmo,
200, 'bleh', 'single_plane')
assert_raises(ValueError, func, rproj, mdelta, cdelta, z_cl, z_src, cosmo,
200, 'nfw', 'bleh')
def test_shear_convergence_unittests(modeling_data):
""" Unit and validation tests for the shear and convergence calculations """
helper_physics_functions(md.predict_tangential_shear)
helper_physics_functions(md.predict_convergence)
helper_physics_functions(md.predict_reduced_tangential_shear)
helper_physics_functions(md.predict_magnification)
# Validation Tests -------------------------
# NumCosmo makes different choices for constants (Msun). We make this conversion
# by passing the ratio of SOLAR_MASS in kg from numcosmo and CLMM
cfg = load_validation_config()
constants_conversion = clc.SOLAR_MASS.value/cfg['TEST_CASE']['Msun[kg]']
# First compute SigmaCrit to correct cosmology changes
cosmo = cfg['cosmo']
sigma_c = md.get_critical_surface_density(cosmo, cfg['GAMMA_PARAMS']['z_cluster'],
cfg['z_source'])
# Compute sigma_c in the new cosmology and get a correction factor
sigma_c_undo = md.get_critical_surface_density(cosmo, cfg['GAMMA_PARAMS']['z_cluster'],
cfg['z_source'])
sigmac_corr = (sigma_c_undo/sigma_c)
# Chech error is raised if too small radius
assert_raises(ValueError, md.predict_tangential_shear, 1.e-12, 1.e15, 4, 0.2, 0.45, cosmo)
# Validate tangential shear
gammat = md.predict_tangential_shear(cosmo=cosmo, **cfg['GAMMA_PARAMS'])
assert_allclose(gammat*sigmac_corr, cfg['numcosmo_profiles']['gammat'], 1.0e-8)
# Validate convergence
kappa = md.predict_convergence(cosmo=cosmo, **cfg['GAMMA_PARAMS'])
assert_allclose(kappa*sigmac_corr, cfg['numcosmo_profiles']['kappa'], 1.0e-8)
# Validate reduced tangential shear
assert_allclose(md.predict_reduced_tangential_shear(cosmo=cosmo, **cfg['GAMMA_PARAMS']),
gammat/(1.0-kappa), 1.0e-10)
assert_allclose(gammat*sigmac_corr/(1.-(kappa*sigmac_corr)), cfg['numcosmo_profiles']['gt'], 1.0e-6)
# Validate magnification
assert_allclose(md.predict_magnification(cosmo=cosmo, **cfg['GAMMA_PARAMS']),
1./((1-kappa)**2-abs(gammat)**2), 1.0e-10)
assert_allclose(1./((1-kappa)**2-abs(gammat)**2), cfg['numcosmo_profiles']['mu'], 4.0e-7)
# Check that shear, reduced shear and convergence return zero and magnification returns one if source is in front of the cluster
# First, check for a array of radius and single source z
r = np.logspace(-2,2,10)
z_cluster = 0.3
z_source = 0.2
assert_allclose(md.predict_convergence(r, mdelta=1.e15, cdelta=4., z_cluster=z_cluster,
z_source=z_source, cosmo=cosmo),
np.zeros(len(r)), 1.0e-10)
assert_allclose(md.predict_tangential_shear(r, mdelta=1.e15, cdelta=4., z_cluster=z_cluster,
z_source=z_source, cosmo=cosmo),
np.zeros(len(r)), 1.0e-10)
assert_allclose(md.predict_reduced_tangential_shear(r, mdelta=1.e15, cdelta=4., z_cluster=z_cluster,
z_source=z_source, cosmo=cosmo),
np.zeros(len(r)), 1.0e-10)
assert_allclose(md.predict_magnification(r, mdelta=1.e15, cdelta=4., z_cluster=z_cluster,
z_source=z_source, cosmo=cosmo),
np.ones(len(r)), 1.0e-10)
# Second, check a single radius and array of source z
r = 1.
z_source = [0.25, 0.1, 0.14, 0.02]
assert_allclose(md.predict_convergence(r, mdelta=1.e15, cdelta=4., z_cluster=z_cluster, z_source=z_source, cosmo=cosmo),
np.zeros(len(z_source)), 1.0e-10)
assert_allclose(md.predict_tangential_shear(r, mdelta=1.e15, cdelta=4., z_cluster=z_cluster, z_source=z_source, cosmo=cosmo),
np.zeros(len(z_source)), 1.0e-10)
assert_allclose(md.predict_reduced_tangential_shear(r, mdelta=1.e15, cdelta=4., z_cluster=z_cluster, z_source=z_source, cosmo=cosmo),
np.zeros(len(z_source)), 1.0e-10)
assert_allclose(md.predict_magnification(r, mdelta=1.e15, cdelta=4., z_cluster=z_cluster, z_source=z_source, cosmo=cosmo),
np.ones(len(z_source)), 1.0e-10)
``` |
{
"source": "96RadhikaJadhav/empress",
"score": 2
} |
#### File: tests/python/test_cli.py
```python
import os
import unittest
import biom
from click.testing import CliRunner
import pandas as pd
from qiime2 import Artifact
from skbio.stats.ordination import OrdinationResults
from skbio.tree import TreeNode
from empress.scripts._cli import empress
def files_present(output_dir):
files = os.listdir(output_dir)
assert "empress.html" in files
assert os.path.isdir(f"{output_dir}/support_files")
class TestCLI(unittest.TestCase):
@classmethod
def setUpClass(cls):
q2_tree_loc = "docs/moving-pictures/rooted-tree.qza"
q2_table_loc = "docs/moving-pictures/table.qza"
q2_sm_loc = "docs/moving-pictures/sample_metadata.tsv"
q2_fm_loc = "docs/moving-pictures/taxonomy.qza"
q2_pcoa_loc = "docs/moving-pictures/biplot.qza"
cls.tree_loc = "rooted-tree.nwk"
cls.table_loc = "table.biom"
cls.sm_loc = "sample_metadata.tsv"
cls.fm_loc = "taxonomy.tsv"
cls.pcoa_loc = "pcoa.txt"
# convert tree to .nwk
nwk_tree = Artifact.load(q2_tree_loc).view(TreeNode)
# convert table to .biom
biom_tbl = Artifact.load(q2_table_loc).view(biom.table.Table)
# remove comment rows from sample metadata
sm = pd.read_csv(q2_sm_loc, sep="\t", index_col=0, skiprows=[1])
# convert feature metadata to .tsv
fm = Artifact.load(q2_fm_loc).view(pd.DataFrame)
# convert biplot to skbio OrdinationResults
pcoa = Artifact.load(q2_pcoa_loc).view(OrdinationResults)
# create isolated filesystem for tests in this file
# manually using __enter__ so that we can run all tests and close in
# tearDown rather than use 'with runner.isolated_filesystem():'
cls.runner = CliRunner()
cls.iso_fs = cls.runner.isolated_filesystem()
cls.iso_fs.__enter__()
nwk_tree.write(cls.tree_loc)
with biom.util.biom_open(cls.table_loc, "w") as f:
biom_tbl.to_hdf5(f, "test")
sm.to_csv(cls.sm_loc, index=True, sep="\t")
fm.to_csv(cls.fm_loc, index=True, sep="\t")
pcoa.write(cls.pcoa_loc)
@classmethod
def tearDownClass(cls):
# https://stackoverflow.com/questions/51706836/manually-open-context-manager
cls.iso_fs.__exit__(None, None, None)
def test_tree_plot_basic(cls):
output_dir = "tree_plot_basic"
result = cls.runner.invoke(
empress,
["tree-plot", "--tree", cls.tree_loc, "--output-dir", output_dir]
)
assert result.exit_code == 0
files_present(output_dir)
def test_comm_plot_basic(cls):
output_dir = "comm_plot_basic"
result = cls.runner.invoke(
empress,
["community-plot", "--tree", cls.tree_loc, "--table",
cls.table_loc, "--sample-metadata", cls.sm_loc,
"--output-dir", output_dir]
)
assert result.exit_code == 0
files_present(output_dir)
def test_comm_plot_pcoa(cls):
output_dir = "comm_plot_pcoa"
result = cls.runner.invoke(
empress,
["community-plot", "--tree", cls.tree_loc, "--table",
cls.table_loc, "--sample-metadata", cls.sm_loc,
"--output-dir", output_dir, "--pcoa", cls.pcoa_loc,
"--filter-extra-samples", "--feature-metadata", cls.fm_loc]
)
assert result.exit_code == 0
files_present(output_dir)
assert os.path.isdir(f"{output_dir}/emperor-resources")
def test_existing_directory(cls):
output_dir = "existing_dir"
os.mkdir("existing_dir")
result = cls.runner.invoke(
empress,
["community-plot", "--tree", cls.tree_loc, "--table",
cls.table_loc, "--sample-metadata", cls.sm_loc,
"--output-dir", output_dir]
)
assert result.exit_code == 1
error_class, value, _ = result.exc_info
assert error_class == OSError
assert str(value) == "Output directory already exists!"
assert not os.path.isdir(f"{output_dir}/support_files")
assert "empress.html" not in os.listdir(output_dir)
``` |
{
"source": "96tm/simple-messenger",
"score": 3
} |
#### File: app/api_1_0/chats.py
```python
from . import api
from .authentication import auth
from flask import abort, current_app, g, jsonify, request, url_for
@api.route('/chats')
@auth.login_required
def get_chats():
page = request.args.get('page', 1, type=int)
prev_page = None
next_page = None
pagination = (g.current_user
.get_available_chats_query()
.paginate(page,
current_app.config['CHATS_PER_PAGE'],
error_out=False))
if pagination.has_prev:
prev_page = url_for('api.get_chats', page=page-1, _external=True)
if pagination.has_next:
prev_page = url_for('api.get_chats', page=page+1, _external=True)
chats = [chat.to_json(g.current_user) for chat in pagination.items]
return jsonify({'chats': chats,
'previous': prev_page,
'next': next_page,
'count': pagination.total})
@api.route('/chats/<int:chat_id>')
@auth.login_required
def get_chat(chat_id):
chat = (g.current_user
.get_available_chats_query()
.filter_by(id=chat_id).first())
if not chat:
abort(404)
return jsonify(chat.to_json(g.current_user))
```
#### File: app/api_1_0/messages.py
```python
from . import api, errors
from .authentication import auth
from .. import database
from ..models import Message, User
from flask import abort, current_app, g, jsonify, request, url_for
@api.route('/chats/<int:chat_id>/messages')
@auth.login_required
def get_messages(chat_id):
page = request.args.get('page', 1, type=int)
prev_page = None
next_page = None
chat = (g.current_user
.get_available_chats_query()
.filter_by(id=chat_id).first())
if not chat:
abort(404)
pagination = (chat
.messages
.paginate(page,
current_app.config['MESSAGES_PER_PAGE'],
error_out=False))
if pagination.has_prev:
prev_page = url_for('api.get_messages',
chat_id=chat_id,
page=page-1,
_external=True)
if pagination.has_next:
next_page = url_for('api.get_messages',
chat_id=chat_id,
page=page+1,
_external=True)
messages = [message.to_json(g.current_user)
for message
in pagination.items]
return jsonify({'messages': messages,
'previous': prev_page,
'next': next_page,
'count': pagination.total})
@api.route('/chats/<int:chat_id>/messages/<int:message_id>')
@auth.login_required
def get_message(chat_id, message_id):
chat = (g.current_user
.get_available_chats_query()
.filter_by(id=chat_id).first())
if not chat:
abort(404)
message = chat.messages.filter_by(id=message_id).first()
if not message:
abort(404)
return jsonify(message.to_json(g.current_user))
@api.route('/chats/<int:chat_id>/messages/', methods=['POST'])
@auth.login_required
def new_message(chat_id):
chat = (g.current_user
.get_available_chats_query()
.filter_by(id=chat_id).first())
if not chat:
abort(404)
recipient = None
if not chat.is_group_chat:
recipient = (chat
.users
.filter(User.id != g.current_user.id)
.first())
else:
abort(404)
message = Message.from_json(request.json)
if not message:
return errors.generate_error(errors.CONFLICT,
'cannot process client data')
message.sender = g.current_user
message.chat = chat
message.recipient = recipient
database.session.add(message)
database.session.commit()
return (jsonify(message.to_json(g.current_user)),
201,
{'Location': url_for('api.get_message',
chat_id=chat.id,
message_id=message.id,
_external=True)})
```
#### File: app/auth/before_auth_request.py
```python
from flask_login import current_user
from flask import current_app, redirect, request, url_for
def before_auth_request():
if (current_user.is_authenticated
and not current_user.confirmed):
view_name = current_app.view_functions[request.endpoint].__name__
if view_name == 'unconfirmed':
pass
elif (request.blueprint not in ('auth', 'static')
or view_name not in ('confirm',
'resend_confirmation',
'logout')):
return redirect(url_for('auth.unconfirmed'))
```
#### File: app/auth/decorators.py
```python
from flask import redirect, url_for
from flask_login import current_user
from functools import wraps
def disable_if_user_confirmed(f):
@wraps(f)
def decorated(*args, **kwargs):
if current_user.is_authenticated and current_user.confirmed:
return redirect(url_for('main.index'))
return f(*args, **kwargs)
return decorated
```
#### File: app/errors/handlers.py
```python
from flask import render_template, jsonify, request
from . import errors_blueprint
@errors_blueprint.app_errorhandler(404)
def page_not_found(e):
if (request.accept_mimetypes.accept_json
and not request.accept_mimetypes.accept_html):
response = jsonify({'error': 'not found'})
response.status_code = 404
return response
return render_template('errors/404.html'), 404
@errors_blueprint.app_errorhandler(500)
def internal_server_error(e):
if (request.accept_mimetypes.accept_json
and not request.accept_mimetypes.accept_html):
response = jsonify({'error': 'server error'})
response.status_code = 500
return response
return render_template('errors/500.html'), 500
```
#### File: app/main/decorators.py
```python
from functools import wraps
from flask import redirect, request, url_for
from flask_login import current_user
from flask_socketio import disconnect
def authenticated_only(f):
@wraps(f)
def decorated(*args, **kwargs):
if not (current_user.is_authenticated and current_user.confirmed):
disconnect()
else:
return f(*args, **kwargs)
return decorated
def disable_if_unconfirmed(f):
@wraps(f)
def decorated(*args, **kwargs):
if not (current_user.is_authenticated and current_user.confirmed):
return redirect(url_for('auth.unconfirmed'))
else:
return f(*args, **kwargs)
return decorated
```
#### File: simple-messenger/app/models.py
```python
from . import login_manager
from . import database
from .exceptions import ValidationError
from datetime import datetime, timezone
from itsdangerous import BadHeader, SignatureExpired
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import and_, not_
from flask import current_app, url_for
from flask_login import UserMixin, AnonymousUserMixin
from functools import partial
from werkzeug.security import generate_password_hash, check_password_hash
utc_now = partial(datetime.now, tz=timezone.utc)
@login_manager.user_loader
def load_user(user_id):
"""
Set up current user.
"""
return User.query.get(int(user_id))
def format_date(date):
"""
Return a string representation of the given date.
:param date: DateTime instance
:returns: string
"""
date_format = '%A, %B %d %Y %H:%M'
return date.strftime(date_format)
def add_test_users():
"""
Load data to the database
for testing purposes.
"""
database.create_all()
arthur = User(username='Arthur', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
morgain = User(username='Morgain', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
clair = User(username='Clair', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
merlin = User(username='Merlin', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
ophelia = User(username='Ophelia', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
lancelot = User(username='Lancelot', email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
guinevere = User(username='Guinevere',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
uther = User(username='Uther',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
mordred = User(username='Mordred',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
percival = User(username='Percival',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
dinadan = User(username='Dinadan',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
gingalain = User(username='Gingalain',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
galahad = User(username='Galahad',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
pelleas = User(username='Pelleas',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
pellinore = User(username='Pellinore',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
tristan = User(username='Tristan',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
branor = User(username='Branor',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
accolon = User(username='Accolon',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
blanchefleur = User(username='Blanchefleur',
email='<EMAIL>ur',
password='<PASSWORD>', confirmed=True)
brangaine = User(username='Brangaine',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
cailia = User(username='Caelia',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
dindrane = User(username='Dindrane',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
enide = User(username='Enide',
email='<EMAIL>',
password='<PASSWORD>', confirmed=True)
database.session.add_all([arthur, morgain, clair, merlin, ophelia,
lancelot, guinevere, mordred, percival,
dinadan, gingalain, galahad, pelleas,
pellinore, tristan, branor, accolon, cailia,
blanchefleur, brangaine, dindrane, enide,
uther])
database.session.commit()
# association table for many-to-many relationship
# between User model and Chat model
UserChatTable = database.Table(
'user_chat_link',
database.Column('user_id',
database.Integer,
database.ForeignKey('users.id',
ondelete="CASCADE"),
primary_key=True),
database.Column('chat_id',
database.Integer,
database.ForeignKey('chats.id',
ondelete="CASCADE"),
primary_key=True)
)
class RemovedChat(database.Model):
"""
Association table
to keep track of which users
mark which chats as removed.
"""
__tablename__ = 'removed_chats'
user_id = database.Column(database.Integer,
database.ForeignKey('users.id',
ondelete="CASCADE"),
primary_key=True)
chat_id = database.Column(database.Integer,
database.ForeignKey('chats.id',
ondelete="CASCADE"),
primary_key=True)
class Role(database.Model):
"""
Represents user role for managing
permission.
Static methods defined here:
insert_roles(roles=None)
"""
__tablename__ = 'roles'
id = database.Column(database.Integer, primary_key=True)
name = database.Column(database.String(64), unique=True)
is_default = database.Column(database.Boolean,
default=False,
index=True,
nullable=False)
permissions = database.Column(database.Integer)
users = database.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return f'Role(id={self.id}, name={self.name}'
# TODO
# change admin permissions to the disjunction of all the available
# permissions
@staticmethod
def insert_roles(roles=None):
"""
Insert the given roles to the database.
Insert a default set of roles if
called with no parameters.
:param roles: dictionary of roles
in the form
{'role_name': {'permissions': int,
'is_default': bool}}
"""
if roles is None:
roles = {'Admin': {'permissions': 0xff,
'is_default': False},
'User': {'permissions': 0,
'is_default': True}
}
for role in roles:
new_role = Role.query.filter_by(name=role).first()
if new_role is None:
new_role = Role(name=role)
new_role.permissions = roles[role]['permissions']
new_role.is_default = roles[role]['is_default']
database.session.add(new_role)
database.session.commit()
class Contact(database.Model):
"""
Association table
representing many-to-many relationship
among User model instances.
"""
__tablename__ = 'contacts'
user_id = database.Column(database.Integer,
database.ForeignKey('users.id'),
primary_key=True)
contact_id = database.Column(database.Integer,
database.ForeignKey('users.id'),
primary_key=True)
contact_group = database.Column(database.String(16), nullable=True)
_date_created = database.Column(database.DateTime(timezone=True),
default=utc_now)
@hybrid_property
def date_created(self):
return self._date_created.astimezone(timezone.utc)
@date_created.expression
def date_created(self):
return self._date_created
@date_created.setter
def date_created(self, value):
self._data_created = value
class Chat(database.Model):
"""
Represents a chat,
which is defined as a collection of
users and messages.
Methods defined here:
to_json(user)
get_name(user)
add_users(users)
delete_users(users)
Static methods defined here:
from_json(json_object)
search_chats_query(chat_name, user)
Class methods defined here:
get_chat(users)
"""
__tablename__ = 'chats'
id = database.Column(database.Integer, primary_key=True)
name = database.Column(database.String(64))
is_group_chat = database.Column(database.Boolean, default=False)
_date_created = database.Column(database.DateTime(timezone=True),
default=utc_now)
_date_modified = database.Column(database.DateTime(timezone=True),
default=utc_now,
onupdate=utc_now)
removed_users = database.relationship('RemovedChat',
backref='chat',
lazy='dynamic',
cascade='all, delete-orphan')
@hybrid_property
def date_created(self):
return self._date_created.astimezone(timezone.utc)
@date_created.expression
def date_created(self):
return self._date_created
@date_created.setter
def date_created(self, value):
self._date_created = value
@hybrid_property
def date_modified(self):
return self._date_modified.astimezone(timezone.utc)
@date_modified.expression
def date_modified(self):
return self._date_modified
@date_modified.setter
def date_modified(self, value):
self._date_modified = value
def to_json(self, user):
"""
Return a JSON representation
of current chat.
:param user: current user (needed to get chat name)
:returns: Chat model instance turned to dictionary
"""
chat = {'chat_name': self.get_name(user),
'is_group_chat': self.is_group_chat,
'date_created': self.date_created,
'date_modified': self.date_modified,
'messages': url_for('api.get_messages',
chat_id=self.id,
_external=True)
}
return chat
def get_name(self, user):
"""
Return current chat's 'name' if present,
otherwise return 'username'
of the first user of current chat's 'users' attribute
which is not equal to the given user's username.
:param user: User model instance
:returns: string
"""
if self.name:
return self.name
recipient = (self
.users
.filter(User.username != user.username)
.first())
return recipient.username
def add_users(self, users):
"""
Add the given users to current chat.
:param users: sequence of User model instances
"""
for user in users:
if not user in self.users.all():
self.users.append(user)
database.session.commit()
def remove_users(self, users):
"""
Delete the given users from current chat.
:param users: sequence of User model instances
"""
for user in users:
self.users.remove(user)
database.session.commit()
@staticmethod
def from_json(json_chat, current_user):
"""
Return a Chat model instance
created from the given json_chat dictionary.
:param json_chat: dictionary
:param current_user: current user (needed to get chat name)
:returns: Message model instance
"""
chat = Chat()
chat_name = json_chat.get('chat_name')
usernames = json_chat.get('users')
users = User.query.filter(User.username.in_(usernames)).all()
if len(users) > 1:
chat.is_group_chat = True
if not chat_name:
raise ValidationError('Chat name or recipient name\
must be present.')
chat.add_users(users)
chat.add_users([current_user])
return chat
@staticmethod
def search_chats_query(chat_name, user):
"""
Return a query of chats
where each chat either:
- contains the given chat_name in 'name' column;
- has only two users ('is_group_chat' is False and 'name' is None),
and the user with 'username' not equal to (the given user).username
contains the given chat_name in 'username'.
:param chat_name: string to search for
:param user: user whose 'username' is excluded from search,
i.e. if
chat.users == [User(username='bob'),
User(username='arthur')]
and chat.name == None,
then search_chats('bob', User(username='bob'))
:returns: Chat model query
"""
subquery_current = (User
.query
.filter(User
.username == user.username)
.subquery())
subquery_pattern = (User
.query
.filter(User.username != user.username,
User
.username
.ilike('%' + chat_name + '%'))
.subquery())
subquery_current_chats = (database
.session
.query(UserChatTable.c.chat_id)
.join(subquery_current,
UserChatTable
.c
.user_id == subquery_current.c.id)
.subquery())
subquery_pattern_chats = (database
.session
.query(UserChatTable.c.chat_id)
.join(subquery_pattern,
UserChatTable
.c
.user_id == subquery_pattern.c.id)
.subquery())
chats = (database
.session
.query(Chat)
.join(subquery_current_chats,
Chat.id == subquery_current_chats.c.chat_id)
.join(subquery_pattern_chats,
subquery_current_chats
.c
.chat_id == subquery_pattern_chats.c.chat_id))
return (database
.session
.query(Chat)
.filter(Chat.users.contains(user),
Chat.name.ilike('%' + chat_name + '%'))
.union(chats))
@classmethod
def get_chat(cls, users):
"""
Return the chat of users in the given sequence.
:param user: sequence of User model instances
:returns: Chat model instance
"""
chat = cls.query
for user in users:
chat = chat.filter(cls.users.contains(user))
return chat.first()
class User(UserMixin, database.Model):
"""
User model.
Implements UserMixin,
used as a default authentication model by Flask-Login.
Methods defined here:
get_updated_chats(current_user, session)
get_chat_query(user_ids)
get_removed_query(chat_query=None)
get_removed_chats_query(user_ids)
mark_chats_as_removed(chats)
unmark_chats_as_removed(chats)
has_permission(permission)
verify_password(password)
generate_auth_token(expiration=3600)
generate_confirmation_token(expiration=3600)
confirm(token)
has_contact(user)
is_contacted_by(user)
add_contacts(users, contact_group=None)
delete_contacts(users)
get_other_users_query()
get_available_chats_query()
get_messages(chat)
get_unread_messages_query(chat)
search_users_query(username, users_query)
Static methods defined here:
verify_auth_token(token)
"""
__tablename__ = 'users'
id = database.Column(database.Integer, primary_key=True)
confirmed = database.Column(database.Boolean, default=False)
last_seen = database.Column(database.DateTime(timezone=True),
nullable=True)
role_id = database.Column(database.Integer,
database.ForeignKey('roles.id'))
_date_created = database.Column(database.DateTime(timezone=True),
nullable=False,
default=utc_now)
username = database.Column(database.String(64),
unique=True,
index=True,
nullable=False)
email = database.Column(database.String(64),
unique=True,
index=True,
nullable=False)
password_hash = database.Column(database.String(128), nullable=False)
contacts = database.relationship('Contact',
foreign_keys=[Contact.user_id],
backref=database.backref('user',
lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
contacted = database.relationship('Contact',
foreign_keys=[Contact.contact_id],
backref=database.backref('contact',
lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
messages_from = (database
.relationship('Message',
primaryjoin='User.id==Message.sender_id'))
messages_to = (database
.relationship('Message',
primaryjoin='User.id==Message.recipient_id'))
chats = database.relationship('Chat',
secondary=UserChatTable,
backref=database.backref('users',
lazy='dynamic'))
removed_chats = database.relationship('RemovedChat',
backref='user',
lazy='dynamic',
cascade='all, delete-orphan')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.role is None:
if self.email == current_app.config['ADMIN_MAIL']:
self.role = Role.query.filter_by(permissions = 0xff).first()
else:
self.role = Role.query.filter_by(is_default=True).first()
def __repr__(self):
return (f'User(id={self.id}, username={self.username}, '
+ f'date_created={self.date_created}, '
+ f'confirmed={self.confirmed})')
@hybrid_property
def date_created(self):
return self._date_created.astimezone(timezone.utc)
@date_created.expression
def date_created(self):
return self._date_created
@date_created.setter
def date_created(self, value):
self._date_created = value
@property
def is_admin(self):
"""
Check if current user has admin permissions.
:returns: True if current user has admin permission,
False otherwise
"""
return self.role and self.has_permission(Permission.ADMINISTRATION)
@property
def password(self):
raise AttributeError('Password is not a readable attribute')
@password.setter
def password(self, password):
"""
Assign the given password to current user.
:param password: string
"""
self.password_hash = generate_password_hash(password)
def get_updated_chats(self, current_user, session):
"""
Return information about the user's updated chats,
if there are any.
:param current_user: the user currently logged in
:param session: flask session
:returns: dictionary with the keys
'chats', 'current_chat_messages', 'current_username'
or None
"""
available_chats = self.get_available_chats_query().all()
chats = []
messages = []
for chat in available_chats:
unread_messages_query = self.get_unread_messages_query(chat)
count = unread_messages_query.count()
if count:
chats.append({'chat_id': str(chat.id),
'unread_messages_count': count,
'chat_name': chat.get_name(self)})
current_chat_id = session.get((current_user.id,
'current_chat_id'))
if current_chat_id == chat.id:
messages = (Message
.get_messages_list(unread_messages_query))
if chats:
data = {'chats': chats,
'current_chat_messages': messages,
'current_username': self.username}
return data
def get_chat_query(self, user_ids):
"""
Return a query of current user's chats
with users identified by the given user_ids.
:param user_ids: sequence of integers
:returns: Chat model query
"""
return (Chat
.query
.filter(Chat.users.contains(self))
.join(UserChatTable,
and_(UserChatTable.c.chat_id == Chat.id,
UserChatTable.c.user_id.in_(user_ids)
)
)
)
def get_removed_query(self, chat_query=None):
"""
Return RemovedChat query for currrent user
(based on the chat query, if given).
:param chat_query: Chat model query
:returns: RemovedChat model query
"""
if chat_query:
return (RemovedChat
.query
.filter(RemovedChat.user == self,
RemovedChat
.chat_id
.in_([chat.id
for chat
in chat_query.all()]
)
)
)
else:
return (RemovedChat
.query
.filter(RemovedChat.user==self))
def get_removed_chats_query(self, user_ids):
"""
Return a query of chats
with users having
the given user_ids
which are marked as removed by current user.
:param user_ids: sequence of integers
:returns: Chat model query
"""
chat_query = self.get_chat_query(user_ids)
removed_chat_query = self.get_removed_query(chat_query)
result = (chat_query
.join(removed_chat_query.subquery(),
Chat
.id
.in_([removed.chat_id
for removed
in removed_chat_query]
)
)
)
return result
def mark_chats_as_removed(self, chats):
"""
Add RemovedChat record
for each chat in the given chats.
:param chats: sequence of Chat model instances
"""
for chat in chats:
removed_chat = RemovedChat()
removed_chat.user = self
removed_chat.chat = chat
database.session.add(removed_chat)
database.session.commit()
def unmark_chats_as_removed(self, chats):
"""
Delete RemovedChat record
for each chat in the given chats.
:param chats: sequence of Chat model instances
"""
chat_ids = [chat.id for chat in chats]
removed_chats_query = (RemovedChat
.query
.filter(RemovedChat.chat_id.in_(chat_ids),
RemovedChat.user_id == self.id))
removed_chats_query.delete(synchronize_session='fetch')
def has_permission(self, permission):
"""
Check if current user has the given permission.
:param permission: integer representing permission
:returns: True if current user has a role
and the role has permission,
False otherwise
"""
return (self.role is not None
and (self.role.permissions & permission == permission))
def verify_password(self, password):
"""
Check if the given password matches current user's password.
:param password: string
:returns: True if password matches current user's password,
False otherwise
"""
return check_password_hash(self.password_hash, password)
def generate_auth_token(self, expiration=3600):
"""
Return an authentication token for current user.
:param expiration: Time in seconds after which token expires
:returns: TimedJSONWebSignature
"""
serializer = Serializer(current_app.config['SECRET_KEY'], expiration)
return serializer.dumps({'id': self.id})
def generate_confirmation_token(self, expiration=3600):
"""
Return a confirmation token for current user.
:param expiration: Time in seconds after which token expires
:returns: TimedJSONWebSignature
"""
serializer = Serializer(current_app.config['SECRET_KEY'], expiration)
return serializer.dumps({'confirm': self.id})
def confirm(self, token):
"""
Check that the given token belongs to current user
and set current user's 'confirmed' column to True
if it does.
:param token: TimedJSONWebSignature instance or string
:returns: True if the given token belongs to current user,
False otherwise
"""
serializer = Serializer(current_app.config['SECRET_KEY'])
try:
data = serializer.loads(token)
except (BadHeader, SignatureExpired):
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
database.session.add(self)
return True
def has_contact(self, user):
"""
Check if current user has the given user as a contact.
:param user: User model instance
:returns: True if current user has user as a contact,
False otherwise
"""
return bool(self.contacts.filter_by(contact_id=user.id).first())
def is_contacted_by(self, user):
"""
Check if the given user has current user as a contact.
:param user: User model instance
:returns: True if user has current user as a contact,
False otherwise
"""
return bool(self.contacted.filter_by(user_id=user.id).first())
def add_contacts(self, users, contact_group=None):
"""
Add the given users to current user's contacts.
:param users: list of User model instances
:param contact_group: name of contact group
"""
for user in users:
if not self.has_contact(user):
relation = Contact(user=self,
contact=user,
contact_group=contact_group)
database.session.add(relation)
def delete_contacts(self, users):
"""
Delete the given users from contacts of current user.
:param users: sequence of User model instances
"""
for user in users:
if self.has_contact(user):
relation = self.contacts.filter_by(contact_id=user.id).first()
database.session.delete(relation)
def get_other_users_query(self):
"""
Return a query of users not including current user
ordered by column 'username' in ascending order.
:returns: User model query
"""
return (User
.query
.filter(User.id != self.id)
.order_by(User.username))
def get_available_chats_query(self):
"""
Return a query of current user's chats
not marked as removed ordered by modification date
in descending order.
:returns: User model query
"""
removed_chats = self.get_removed_query()
return (Chat
.query
.filter(Chat.users.contains(self))
.filter(not_(Chat
.id
.in_(removed_chats
.with_entities(RemovedChat.chat_id))))
.order_by(Chat.date_modified.desc()))
def get_messages(self, chat):
"""
Return a list of dictionaries with keys
'text', 'date_created', 'sender_username', 'recipient_username'
sorted by creation date in ascending order.
:param chat: Chat model instance
:returns: list of dictionaries
"""
messages = chat.messages.order_by(Message.date_created).all()
message_dict_list = []
for message in messages:
sender = message.sender
recipient = message.recipient
sender_name = sender.username if sender else None
recipient_name = recipient.username if recipient else None
message_dict = {'text': message.text,
'date_created': message.date_created.isoformat(),
'sender_username': sender_name,
'recipient_username': recipient_name}
message_dict_list.append(message_dict)
return message_dict_list
def get_unread_messages_query(self, chat):
"""
Return a query of unread messages from the given chat.
:param chat: Chat model instance
:returns: Message model query
"""
return (chat
.messages
.filter(Message.sender != self,
not_(Message.was_read)))
def search_users_query(self, username, users_query):
"""
Return a query of users (except current user)
from the given users_query
containing the given username string in the 'username' column
in ascending lexicographical order by 'username'.
:param username: string to search in 'username' columns
:param users_query: User model query to search
:returns: User model query
"""
query = (users_query
.filter(User.username.ilike('%' + username + '%')))
return query
@staticmethod
def verify_auth_token(token):
"""
Check the valididty of given token.
:param token: <PASSWORD>
:returns: True if token is valid,
False otherwise
"""
print('user verify function')
serializer = Serializer(current_app.config['SECRET_KEY'])
try:
print('hey')
data = serializer.loads(token)
print('data', data)
except Exception as exception:
print(exception)
return None
print(data)
return User.query.get(data['id'])
class Message(database.Model):
"""
Message model.
Methods defined here:
to_json(user)
Static methods defined here:
get_messages_list(message_query)
from_json(json_message)
flush_messages(message_query)
"""
__tablename__ = 'messages'
id = database.Column(database.Integer, primary_key = True)
was_read = database.Column(database.Boolean,
default=False)
text = database.Column(database.Text)
_date_created = database.Column(database.DateTime(timezone=True),
nullable=False,
default=utc_now)
sender_id = database.Column(database.Integer,
database.ForeignKey('users.id'),
nullable=False)
recipient_id = database.Column(database.Integer,
database.ForeignKey('users.id'),
nullable=False)
chat_id = database.Column(database.Integer,
database.ForeignKey('chats.id'),
nullable=False)
sender = database.relationship('User', foreign_keys=[sender_id])
recipient = database.relationship('User', foreign_keys=[recipient_id])
chat = database.relationship('Chat',
backref=database.backref('messages',
lazy='dynamic'),
foreign_keys=[chat_id])
def __repr__(self):
return (f'Message(id={self.id}, text={self.text}, '
+ f'sender={self.sender}, '
+ f'recipient={self.recipient}, '
+ f'was_read={self.was_read}, '
+ f'text={self.text}, '
+ f'chat={self.chat}, '
+ f'date_created={self.date_created})'
)
@hybrid_property
def date_created(self):
return self._date_created.astimezone(timezone.utc)
@date_created.expression
def date_created(self):
return self._date_created
@date_created.setter
def date_created(self, value):
self._date_created = value
def to_json(self, user):
"""
Return a dictionary representation
of current message.
:param user: current user (needed to get the chat name)
:returns: Message model instance turned into a dictionary
"""
if not self.recipient:
recipient_username = ''
else:
recipient_username = self.recipient.username
message = {'id': self.id,
'chat_id': self.chat_id,
'was_read': self.was_read,
'date_created': self.date_created,
'text': self.text,
'sender_username': self.sender.username,
'recipient_username': recipient_username,
'chat_name': self.chat.get_name(user)}
return message
@staticmethod
def get_messages_list(message_query):
"""
Return a list of dictionaries with keys
'text', 'sender_username', 'date_created'
for the messages from the given message_query
sorted by modification date in ascending order.
:param message_query: Message model query
:returns: list of dictionaries
"""
message_dict_list = []
for message in message_query.order_by(Message.date_created).all():
sender = message.sender
sender_username = sender.username if sender else None
date_created = message.date_created
message_dict = {'text': message.text,
'sender_username': sender_username,
'date_created': date_created.isoformat()}
message_dict_list.append(message_dict)
return message_dict_list
@staticmethod
def from_json(json_message):
"""
Return a Message model instance
created from the give json_message dictionary.
:param json_message: dictionary
:returns: Message model instance
"""
try:
text = str(json_message.get('text')).rstrip()
if text:
message = Message()
message.text = text[:current_app.config['MAX_STRING_LENGTH']]
return message
except (LookupError, ValueError):
pass
@staticmethod
def flush_messages(message_query):
"""
Set 'was_read' column to True for all messages
from the given message_query.
:param message_query: Message model query
"""
message_query.update({'was_read': True}, synchronize_session=False)
database.session.commit()
class AnonymousUser(AnonymousUserMixin):
def has_permission(self, permission):
return False
@property
def is_admin(self):
return False
class Permission:
ADMINISTRATION = 1
login_manager.anonymous_user = AnonymousUser
```
#### File: simple-messenger/tests/test_message_model.py
```python
from app import create_app, database
from app.models import Chat, Message
from app.models import User, Role
from app.exceptions import ValidationError
import unittest
class MessageModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app()
self.app_context = self.app.app_context()
self.app_context.push()
database.create_all()
Role.insert_roles()
self.bob = User(username='bob', password='<PASSWORD>',
email='<EMAIL>', confirmed=True)
self.arthur = User(username='arthur', password='<PASSWORD>',
email='<EMAIL>', confirmed=True)
self.chat_bob_arthur = Chat()
self.chat_bob_arthur.add_users([self.bob, self.arthur])
self.message1 = Message(text='hi there1',
sender=self.arthur,
recipient=self.bob,
was_read=True,
chat=self.chat_bob_arthur)
self.message2 = Message(text='hi there2',
sender=self.arthur,
recipient=self.bob,
chat=self.chat_bob_arthur)
self.message3 = Message(text='hi there3',
sender=self.arthur,
recipient=self.bob,
chat=self.chat_bob_arthur)
self.message4 = Message(text='hi there4',
sender=self.bob,
recipient=self.arthur,
chat=self.chat_bob_arthur)
database.session.add_all([self.bob, self.arthur,
self.chat_bob_arthur,
self.message1,
self.message2,
self.message3,
self.message4])
database.session.commit()
def tearDown(self):
database.session.remove()
database.drop_all()
self.app_context.pop()
def test_flush_messages(self):
self.assertFalse(self.message2.was_read)
self.assertFalse(self.message3.was_read)
self.assertTrue(self.message1.was_read)
self.assertFalse(self.message4.was_read)
Message.flush_messages(Message
.query
.filter(Message.sender == self.arthur))
self.assertTrue(self.message2.was_read)
self.assertTrue(self.message3.was_read)
self.assertTrue(self.message1.was_read)
self.assertFalse(self.message4.was_read)
def test_to_json(self):
message = self.message1
json_message = message.to_json(self.bob)
self.assertEqual(json_message,
{'id': message.id,
'chat_id': message.chat_id,
'was_read': message.was_read,
'date_created': message.date_created,
'text': message.text,
'sender_username': message.sender.username,
'recipient_username': message.recipient.username,
'chat_name': message.chat.get_name(self.bob)
})
``` |
{
"source": "96tm/warehouse-management-test",
"score": 2
} |
#### File: warehouse-management-test/cargo/models.py
```python
from django.db import models
from django.utils.translation import gettext as _
from common.models import format_date
def get_cargo_total(obj):
return sum([row.stock.price * row.number
for row in obj.cargostock_set.all()])
class Cargo(models.Model):
"""
Таблица поставок.
"""
class Meta:
verbose_name = _('Поставка')
verbose_name_plural = _('Поставки')
DONE = 'Исполнено'
IN_TRANSIT = 'В пути'
choices = [(DONE, DONE), (IN_TRANSIT, IN_TRANSIT)]
supplier = models.ForeignKey('supplier.Supplier',
on_delete=models.CASCADE,
verbose_name=_('Поставщик'))
status = models.CharField(max_length=10, choices=choices,
verbose_name=_('Статус'),
default=IN_TRANSIT)
date = models.DateTimeField(auto_now_add=True,
verbose_name=_('Дата поставки'))
stocks = models.ManyToManyField('warehouse.Stock',
through='common.CargoStock',
verbose_name=_('Товары'))
def __str__(self):
return (str(self.pk) + ', ' + str(self.supplier) + ', '
+ self.status + ', ' + str(format_date(self.date)))
class CargoDetails(models.Model):
"""
Таблица для оформления нескольки товаров в поставку.
"""
order_number = models.ForeignKey('cargo.Cargo', on_delete=models.CASCADE)
name = models.CharField(max_length=20)
quantity = models.SmallIntegerField(default=1)
def __str__(self):
return (str(self.order_number.pk) + ', ' + str(self.name) + ', '
+ str(self.quantity))
```
#### File: warehouse-management-test/cargo/views.py
```python
from django import forms
from django.contrib import messages
from django.views.generic import View
from django.utils.translation import gettext as _
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.shortcuts import render, redirect
from warehouse.models import Stock
from warehouse.forms import StockForm
from common.models import CargoStock
from .forms import CargoNewForm
# не кэшируем страницу для нормальной работы кода jQuery
# по добавлению formsets
@method_decorator(never_cache, name='dispatch')
class CargoFormsetsView(View):
"""
Class-based view для обработки страницы
поставки с добавлением нескольких товаров
в одной форме.
"""
stock_formset = forms.formset_factory(form=StockForm,
max_num=50,
min_num=1,
extra=0)
template = 'cargo/cargo_formsets.html'
def post(self, request):
form = CargoNewForm(request.POST)
formset = self.stock_formset(request.POST)
context = {'form': form, 'formset': formset}
if form.is_valid() and formset.is_valid() and formset.cleaned_data:
instance = form.save()
stocks = {}
for stock in formset.cleaned_data:
name = stock['name']
stocks[name] = stocks.get(name, 0) + stock['number']
for name, number in stocks.items():
stock = Stock.objects.get(name=name)
CargoStock.objects.create(cargo=instance,
stock=stock, number=number)
messages.info(request, _('Заявка отправлена'))
return redirect(to='mainpage:index')
else:
if not formset.cleaned_data:
context['formset'] = self.stock_formset()
return render(request, self.template, context)
def get(self, request):
form = CargoNewForm()
formset = self.stock_formset()
context = {'form': form, 'formset': formset}
return render(request, self.template, context)
```
#### File: warehouse-management-test/category/admin.py
```python
from django.contrib import admin
from .models import Category
from mptt.admin import DraggableMPTTAdmin
from common.models import subtotal_value
from django.utils.translation import gettext as _
@admin.register(Category)
class CategoryAdmin(DraggableMPTTAdmin):
"""
Отображение списка категорий.
"""
tree_auto_open = True
# mptt_level_indent = 30
mptt_indent_field = 'name'
list_display = ('tree_actions', 'id', 'indented_title',
'num_of_subcategory', 'total_value', )
list_display_links = ('indented_title', )
search_fields = ('name', )
def num_of_subcategory(self, obj):
return obj.get_children().count()
def total_value(self, obj):
return subtotal_value(obj)
num_of_subcategory.short_description = _('Количество подкатегорий')
total_value.short_description = _('Общая стоимость')
``` |
{
"source": "96-Zachary/BERT-NER-Chinese",
"score": 3
} |
#### File: 96-Zachary/BERT-NER-Chinese/SequenceTagger.py
```python
from torchcrf import CRF
import torch.nn.functional as F
from transformers.modeling_bert import *
from torch.nn.utils.rnn import pad_sequence
class BertForSequenceTagging(BertPreTrainedModel):
def __init__(self, config):
super(BertForSequenceTagging, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_data, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, inputs_embeds=None, head_mask=None):
input_ids, input_token_starts = input_data
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds)
sequence_output = outputs[0]
# obtain original token representations from sub_words representations (by selecting the first sub_word)
origin_sequence_output = [
layer[starts.nonzero().squeeze(1)]
for layer, starts in zip(sequence_output, input_token_starts)]
padded_sequence_output = pad_sequence(origin_sequence_output, batch_first=True)
padded_sequence_output = self.dropout(padded_sequence_output)
logits = self.classifier(padded_sequence_output)
outputs = (logits,)
if labels is not None:
loss_mask = labels.gt(-1)
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if loss_mask is not None:
active_loss = loss_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs
log_soft = F.log_softmax
class Bert_CRF(BertPreTrainedModel):
def __init__(self, config):
super(Bert_CRF, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.init_weights()
self.crf = CRF(self.num_labels, batch_first=True)
def forward(self, input_ids, attn_masks, labels=None): # dont confuse this with _forward_alg above.
outputs = self.bert(input_ids, attn_masks)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
emission = self.classifier(sequence_output)
attn_masks = attn_masks.type(torch.uint8)
if labels is not None:
loss = -self.crf(log_soft(emission, 2), labels, mask=attn_masks, reduction='mean')
return loss
else:
prediction = self.crf.decode(emission, mask=attn_masks)
return prediction
``` |
{
"source": "96-Zachary/DialogueNLP",
"score": 2
} |
#### File: DialogueNLP/layer/Optim.py
```python
import math
import torch.optim as optim
import torch.nn as nn
from torch.nn.utils import clip_grad_norm_
import layer.modules
import logging
logger = logging.getLogger(__name__)
class Optim(object):
def set_parameters(self, params):
self.params = list(params) # careful: params may be a generator
if self.method == 'sgd':
self.optimizer = optim.SGD(self.params, lr=self.lr)
elif self.method == 'adagrad':
self.optimizer = optim.Adagrad(self.params, lr=self.lr)
elif self.method == 'adadelta':
self.optimizer = optim.Adadelta(self.params, lr=self.lr)
elif self.method == 'adam':
# self.optimizer = optim.Adam(self.params, lr=self.lr)
self.optimizer = layer.modules.MyAdam(self.params, lr=self.lr)
else:
raise RuntimeError("Invalid optim method: " + self.method)
def __init__(self, method, lr, max_grad_norm, max_weight_value=None, lr_decay=1, start_decay_at=None,
decay_bad_count=6):
self.last_ppl = None
self.lr = lr
self.max_grad_norm = max_grad_norm
self.max_weight_value = max_weight_value
self.method = method
self.lr_decay = lr_decay
self.start_decay_at = start_decay_at
self.start_decay = False
self.decay_bad_count = decay_bad_count
self.best_metric = 0
self.bad_count = 0
def step(self):
# Compute gradients norm.
if self.max_grad_norm:
clip_grad_norm_(self.params, self.max_grad_norm)
self.optimizer.step()
if self.max_weight_value:
for p in self.params:
p.data.clamp_(0 - self.max_weight_value, self.max_weight_value)
# decay learning rate if val perf does not improve or we hit the start_decay_at limit
def updateLearningRate(self, ppl, epoch):
# if self.start_decay_at is not None and epoch >= self.start_decay_at:
# self.start_decay = True
# if self.last_ppl is not None and ppl > self.last_ppl:
# self.start_decay = True
#
# if self.start_decay:
# self.lr = self.lr * self.lr_decay
# print("Decaying learning rate to %g" % self.lr)
# self.last_ppl = ppl
if ppl >= self.best_metric:
self.best_metric = ppl
self.bad_count = 0
else:
self.bad_count += 1
logger.info('Bad_count: {0}\tCurrent lr: {1}'.format(self.bad_count, self.lr))
logger.info('Best metric: {0}'.format(self.best_metric))
if self.bad_count >= self.decay_bad_count and self.lr >= 1e-6:
self.lr = self.lr * self.lr_decay
logger.info("Decaying learning rate to %g" % self.lr)
self.bad_count = 0
self.optimizer.param_groups[0]['lr'] = self.lr
```
#### File: DialogueNLP/layer/Translator.py
```python
import layer
import torch.nn as nn
import torch
from torch.autograd import Variable
try:
import ipdb
except ImportError:
pass
class Translator(object):
def __init__(self, opt, model=None, dataset=None):
self.opt = opt
if model is None:
checkpoint = torch.load(opt.model)
model_opt = checkpoint['opt']
self.src_dict = checkpoint['dicts']['src']
self.tgt_dict = checkpoint['dicts']['tgt']
self.enc_rnn_size = model_opt.enc_rnn_size
self.dec_rnn_size = model_opt.dec_rnn_size
encoder = layer.Models.Encoder(model_opt, self.src_dict)
decoder = layer.Models.Decoder(model_opt, self.tgt_dict)
decIniter = layer.Models.DecInit(model_opt)
model = layer.Models.NMTModel(encoder, decoder, decIniter)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size // model_opt.maxout_pool_size, self.tgt_dict.size()),
nn.LogSoftmax())
model.load_state_dict(checkpoint['model'])
generator.load_state_dict(checkpoint['generator'])
if opt.cuda:
model.cuda()
generator.cuda()
else:
model.cpu()
generator.cpu()
model.generator = generator
else:
self.src_dict = dataset['dicts']['src']
self.tgt_dict = dataset['dicts']['tgt']
self.enc_rnn_size = opt.enc_rnn_size
self.dec_rnn_size = opt.dec_rnn_size
self.opt.cuda = True if len(opt.gpus) >= 1 else False
self.opt.n_best = 1
self.opt.replace_unk = False
self.tt = torch.cuda if opt.cuda else torch
self.model = model
self.model.eval()
self.copyCount = 0
def buildData(self, srcBatch, goldBatch):
srcData = [self.src_dict.convertToIdx(b,
layer.Constants.UNK_WORD) for b in srcBatch]
tgtData = None
if goldBatch:
tgtData = [self.tgt_dict.convertToIdx(b,
layer.Constants.UNK_WORD,
layer.Constants.BOS_WORD,
layer.Constants.EOS_WORD) for b in goldBatch]
return layer.Dataset(srcData, tgtData, self.opt.batch_size, self.opt.cuda)
def buildTargetTokens(self, pred, src, attn):
pred_word_ids = [x.item() for x in pred]
tokens = self.tgt_dict.convertToLabels(pred_word_ids, layer.Constants.EOS)
tokens = tokens[:-1] # EOS
if self.opt.replace_unk:
for i in range(len(tokens)):
if tokens[i] == layer.Constants.UNK_WORD:
_, maxIndex = attn[i].max(0)
tokens[i] = src[maxIndex[0]]
return tokens
def translateBatch(self, srcBatch, tgtBatch):
batchSize = srcBatch[0].size(1)
beamSize = self.opt.beam_size
# (1) run the encoder on the src
encStates, context = self.model.encoder(srcBatch)
srcBatch = srcBatch[0] # drop the lengths needed for encoder
decStates = self.model.decIniter(encStates[1]) # batch, dec_hidden
# (3) run the decoder to generate sentences, using beam search
# Expand tensors for each beam.
context = context.data.repeat(1, beamSize, 1)
decStates = decStates.unsqueeze(0).data.repeat(1, beamSize, 1)
att_vec = self.model.make_init_att(context)
padMask = srcBatch.data.eq(layer.Constants.PAD).transpose(0, 1).unsqueeze(0).repeat(beamSize, 1, 1).float()
beam = [layer.Beam(beamSize, self.opt.cuda) for k in range(batchSize)]
batchIdx = list(range(batchSize))
remainingSents = batchSize
for i in range(self.opt.max_sent_length):
# Prepare decoder input.
input = torch.stack([b.getCurrentState() for b in beam
if not b.done]).transpose(0, 1).contiguous().view(1, -1)
g_outputs, decStates, attn, att_vec = self.model.decoder(input, decStates, context,
padMask.view(-1, padMask.size(2)), att_vec)
# g_outputs: 1 x (beam*batch) x numWords
g_outputs = g_outputs.squeeze(0)
g_out_prob = self.model.generator.forward(g_outputs)
# batch x beam x numWords
wordLk = g_out_prob.view(beamSize, remainingSents, -1).transpose(0, 1).contiguous()
attn = attn.view(beamSize, remainingSents, -1).transpose(0, 1).contiguous()
active = []
father_idx = []
for b in range(batchSize):
if beam[b].done:
continue
idx = batchIdx[b]
if not beam[b].advance(wordLk.data[idx], attn.data[idx]):
active += [b]
father_idx.append(beam[b].prevKs[-1]) # this is very annoying
if not active:
break
# to get the real father index
real_father_idx = []
for kk, idx in enumerate(father_idx):
real_father_idx.append(idx * len(father_idx) + kk)
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
activeIdx = self.tt.LongTensor([batchIdx[k] for k in active])
batchIdx = {beam: idx for idx, beam in enumerate(active)}
def updateActive(t, rnnSize):
# select only the remaining active sentences
view = t.data.view(-1, remainingSents, rnnSize)
newSize = list(t.size())
newSize[-2] = newSize[-2] * len(activeIdx) // remainingSents
return view.index_select(1, activeIdx).view(*newSize)
decStates = updateActive(decStates, self.dec_rnn_size)
context = updateActive(context, self.enc_rnn_size)
att_vec = updateActive(att_vec, self.enc_rnn_size)
padMask = padMask.index_select(1, activeIdx)
# set correct state for beam search
previous_index = torch.stack(real_father_idx).transpose(0, 1).contiguous()
decStates = decStates.view(-1, decStates.size(2)).index_select(0, previous_index.view(-1)).view(
*decStates.size())
att_vec = att_vec.view(-1, att_vec.size(1)).index_select(0, previous_index.view(-1)).view(*att_vec.size())
remainingSents = len(active)
# (4) package everything up
allHyp, allScores, allAttn = [], [], []
n_best = self.opt.n_best
for b in range(batchSize):
scores, ks = beam[b].sortBest()
allScores += [scores[:n_best]]
valid_attn = srcBatch.data[:, b].ne(layer.Constants.PAD).nonzero().squeeze(1)
hyps, attn = zip(*[beam[b].getHyp(k) for k in ks[:n_best]])
attn = [a.index_select(1, valid_attn) for a in attn]
allHyp += [hyps]
allAttn += [attn]
return allHyp, allScores, allAttn, None
def translate(self, srcBatch, goldBatch):
# (1) convert words to indexes
dataset = self.buildData(srcBatch, goldBatch)
# (wrap(srcBatch), lengths), (wrap(tgtBatch), ), indices
src, tgt, indices = dataset[0]
# (2) translate
pred, predScore, attn, _ = self.translateBatch(src, tgt)
pred, predScore, attn = list(zip(
*sorted(zip(pred, predScore, attn, indices),
key=lambda x: x[-1])))[:-1]
# (3) convert indexes to words
predBatch = []
for b in range(src[0].size(1)):
predBatch.append(
[self.buildTargetTokens(pred[b][n], srcBatch[b], attn[b][n])
for n in range(self.opt.n_best)]
)
return predBatch, predScore, None
```
#### File: DialogueNLP/layer/TransModel.py
```python
import torch
import torch.nn as nn
'''
Example of Transformer Modal
'''
class Encoder(nn.Module):
def __init__(self, input_dim, hidden_dim, n_layers,
n_heads, pf_dim, dropout, device, max_length=100):
super().__init__()
self.device = device
self.tok_embedding = nn.Embedding(input_dim, hidden_dim)
self.pos_embedding = nn.Embedding(max_length, hidden_dim)
self.layers = nn.ModuleList([EncoderLayer(hidden_dim,
n_heads,
pf_dim,
dropout,
device) for _ in range(n_layers)])
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hidden_dim])).to(device)
def forward(self, src, src_mask):
# src = [batch_size, src_len]
# src_mask = [batch_src, src_len]
batch_size, src_len = src.shape[0], src.shape[1]
# pos = [batch_size, src_len]
pos = torch.arange(0, src_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
# src = [batch_size, src_len, hidden_dim]
src = self.dropout((self.tok_embedding(src) * self.scale) +
self.pos_embedding(pos))
# src = [bacth_size, src_len, hidden_dim]
for layer in self.layers:
src = layer(src, src_mask)
return src
class EncoderLayer(nn.Module):
def __init__(self, hidden_dim, n_heads, pf_dim, dropout, device):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hidden_dim)
self.ff_layer_norm = nn.LayerNorm(hidden_dim)
self.self_attention = MultiHeadAttentionLayer(
hidden_dim,
n_heads,
dropout,
device)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(
hidden_dim,
pf_dim,
dropout
)
self.dropout = nn.Dropout(dropout)
def forward(self, src, src_mask):
# src = [batch_size, src_len, hidden_dim]
# src_mask = [batch_size, src_len]
# _src == src = [batch_size, src_len, hidden_dim]
# MultiHeadAttention
_src, _ = self.self_attention(src, src, src, src_mask)
# Position-wise Feedforward Layer¶
src = self.self_attn_layer_norm(src + self.dropout(_src))
# positionwise feedforward
_src = self.positionwise_feedforward(src)
# dropout, residual and layer norm
# src = [batch_size, src_len, hidden_dim]
src = self.ff_layer_norm(src + self.dropout(_src))
return src
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, hidden_dim, n_heads, dropout, device):
super().__init__()
self.hidden_dim = hidden_dim
self.n_heads = n_heads
self.device = device
self.dropout = nn.Dropout(dropout)
assert hidden_dim % n_heads == 0
self.head_dim = hidden_dim // n_heads
self.fc_q = nn.Linear(hidden_dim, hidden_dim)
self.fc_k = nn.Linear(hidden_dim, hidden_dim)
self.fc_v = nn.Linear(hidden_dim, hidden_dim)
self.fc_o = nn.Linear(hidden_dim, hidden_dim)
self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
def forward(self, query, key, value, mask=None):
batch_size = query.shape[0]
'''
query = [batch_size, q_src_len, hidden_dim]
key = [batch_size, k_src_len, hidden_dim]
value = [batch_size, v_src_len, hidden_dim]
'''
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
'''
Q = [batch_size, n_heads, q_src_len, head_dim]
K = [batch_size, n_heads, k_src_len, head_dim]
V = [batch_size, n_heads, v_src_len, head_dim]
'''
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
# energy = [batch_size, n_heads, q_src_len, k_src_len]
energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e10)
# attention = [batch_size, n_heads, q_src_len(v_src_len), k_src_len]
attention = torch.softmax(energy, dim=-1)
# x = [batch_size, n_heads, q_src_len(v_src_len), head_dim]
x = torch.matmul(self.dropout(attention), V)
# x = [batch_size, n_heads, head_dim, q_src_len(v_src_len)]
x = x.permute(0, 2, 1, 3).contiguous()
# x = [batch_size, src_len, hidden_dim]
x = x.view(batch_size, -1, self.hidden_dim)
# x = [batch_size, src_len, hidden_dim]
x = self.fc_o(x)
return x, attention
class PositionwiseFeedforwardLayer(nn.Module):
def __init__(self, hidden_dim, pf_dim, dropout):
super().__init__()
self.fc_1 = nn.Linear(hidden_dim, pf_dim)
self.fc_2 = nn.Linear(pf_dim, hidden_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
# x = [batch_size, src_len, pf_dim]
x = self.fc_1(x)
# x = [batch_size, src_len, hidden_dim]
x = self.fc_2(x)
return x
class Decoder(nn.Module):
def __init__(self, output_dim, hidden_dim, n_layers,
n_heads, pf_dim, dropout, device, max_length=100):
super().__init__()
self.device = device
self.tok_embedding = nn.Embedding(output_dim, hidden_dim)
self.pos_embedding = nn.Embedding(max_length, hidden_dim)
self.layers = nn.ModuleList([DecoderLayer(hidden_dim,
n_heads,
pf_dim,
dropout,
device)
for _ in range(n_layers)])
self.fc_out = nn.Linear(hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hidden_dim])).to(device)
def forward(self, trg, enc_src, trg_mask, src_mask):
# trg = [batch_size, trg_len]
# enc_src = [batch_size, src_len, hidden_dim]
# trg_mask = [batch_size, trg_len]
# src_mask = [batch_size, src_len]
batch_size = trg.shape[0]
trg_len = trg.shape[1]
# pos = [batch size,_trg_len]
pos = torch.arange(0, trg_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
# trg = [batch_size, trg_len, hidden_dim]
trg = self.dropout((self.tok_embedding(trg) * self.scale) + self.pos_embedding(pos))
# trg = [batch_size, trg_len, hidden_dim]
# attention = [batch_size, n_heads, trg_len, src_len]
for layer in self.layers:
trg, attention = layer(trg, enc_src, trg_mask, src_mask)
# output = [batch_size, trg_len, output_dim]
# output = self.fc_out(trg)
return trg, attention
class DecoderLayer(nn.Module):
def __init__(self, hidden_dim, n_heads, pf_dim,
dropout, device):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hidden_dim)
self.enc_attn_layer_norm = nn.LayerNorm(hidden_dim)
self.ff_layer_norm = nn.LayerNorm(hidden_dim)
self.self_attention = MultiHeadAttentionLayer(hidden_dim, n_heads, dropout, device)
self.encoder_attention = MultiHeadAttentionLayer(hidden_dim, n_heads, dropout, device)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(
hidden_dim,
pf_dim,
dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, trg, enc_src, trg_mask, src_mask):
# trg = [batch_size, trg_len, hidden_dim]
# enc_src = [batch_size, src_len, hidden_dim]
# trg_mask = [batch_size, trg_len]
# src_mask = [batch_size, src_len]
# self attention
_trg, _ = self.self_attention(trg, trg, trg, trg_mask)
# dropout, residual connection and layer norm
# trg = [batch_size, trg_len, hidden_dim]
trg = self.self_attn_layer_norm(trg + self.dropout(_trg))
# encoder attention
_trg, attention = self.encoder_attention(trg, enc_src, enc_src, src_mask)
# dropout, residual connection and layer norm
# trg = [batch size, trg len, hid dim]
trg = self.enc_attn_layer_norm(trg + self.dropout(_trg))
# positionwise feedforward
_trg = self.positionwise_feedforward(trg)
# dropout, residual and layer norm
# trg = [batch size, trg len, hid dim]
# attention = [batch size, n heads, trg len, src len]
trg = self.ff_layer_norm(trg + self.dropout(_trg))
return trg, attention
class SelectAttention(nn.Module):
def __init__(self, emb_dim, length, opt):
super().__init__()
self.emb_dim = emb_dim
self.length = length
self.fc = nn.Linear(self.emb_dim, self.emb_dim)
self.dropout = nn.Dropout(opt.dropout)
def align(self, g_r1, g_r2, g_r1_mask, g_r2_mask):
# g_r1 = [batch_size, 1, r1_len, emb_dim]
# g_r2 = [batch_size, 1, r2_len, emb_dim]
g_r1 = g_r1.unsqueeze(1)
g_r2 = g_r2.unsqueeze(1)
energy = torch.matmul(g_r1, g_r2.permute(0, 1, 3, 2))
energy = energy.masked_fill(g_r2_mask == False, -1e10)
attention = torch.softmax(energy, dim=-1)
weights = torch.sum(attention, dim=-2).squeeze()
_, sort_idx = torch.sort(weights, dim=-1, descending=True)
sort_idx = sort_idx[:, :self.length]
g_r2 = g_r2.squeeze()
g_r2_mask = g_r2_mask.squeeze()
x_mask = torch.gather(g_r2_mask, 1, sort_idx).unsqueeze(1).unsqueeze(1)
sort_idx = sort_idx.unsqueeze(dim=-1).expand(sort_idx.shape[0], sort_idx.shape[1], g_r2.shape[-1])
x = torch.gather(g_r2, 1, sort_idx)
# x = torch.gather(g_r2, dim=2, sort_idx)
# x, x_mask = [], []
# for i in range(sort_idx.shape[0]):
# x.append(g_r2[i,:,sort_idx[i],:])
# x_mask.append(g_r2_mask[i,:,:,sort_idx[i]])
# x = torch.stack(x).squeeze()
# x_mask = torch.stack(x_mask)
return x, x_mask
def forward(self, g_r1, g_r2, g_r1_mask, g_r2_mask):
x, x_mask = self.align(g_r1, g_r2, g_r1_mask, g_r2_mask)
# x2 = self.align(g_r2, g_r1, g_r2_mask, g_r1_mask)
# x = (x1[:,:self.length,:] + x2[:,:self.length,:])/2
# x_mask = g_r1_mask[:,:,:,:self.length] & g_r2_mask[:,:,:,:self.length]
# x = torch.cat([x1[:,:self.length,:], x2[:,:self.length,:]], dim=-2)
# x_mask = torch.cat([g_r1_mask[:,:,:,:self.length], g_r2_mask[:,:,:,:self.length]], dim=-1)
# x = torch.cat([x1, x2], dim=-2)
# x_mask = torch.cat([g_r1_mask, g_r2_mask], dim=-1)
return x, x_mask
class Transformer(nn.Module):
def __init__(self, src_encoder, tgt_encoder, decoder, src_pad_idx, trg_pad_idx, opt):
super().__init__()
self.src_encoder = src_encoder
self.tgt_encoder = tgt_encoder
self.decoder = decoder
self.src_pad_idx = src_pad_idx
self.trg_pad_idx = trg_pad_idx
self.device = opt.device
self.select_attn = SelectAttention(opt.word_vec_size, 10, opt)
def make_src_mask(self, src):
# src = [batch_size, src_len]
src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2)
# sec_mask = [batch_size, 1, 1, src_len]
return src_mask
def make_trg_mask(self, trg):
# trg = [batch_size, trg_len]
# trg_pad_mask = [batch_size, 1, 1, trg_len]
trg_pad_mask = (trg != self.trg_pad_idx).unsqueeze(1).unsqueeze(2)
trg_len = trg.shape[1]
# trg_sub_mask = [trg_len, trg_len]
trg_sub_mask = torch.tril(torch.ones((trg_len, trg_len), device=self.device)).bool()
# trg_mask = [batch_size, 1, trg_len, trg_len]
trg_mask = trg_pad_mask & trg_sub_mask
return trg_mask
def forward(self, keys, guide1, guide2, tgt):
keys_mask = self.make_src_mask(keys)
keys_emb = self.src_encoder(keys, keys_mask)
guide1_mask = self.make_src_mask(guide1)
guide1_emb = self.tgt_encoder(guide1, guide1_mask)
guide2_mask = self.make_src_mask(guide2)
guide2_emb = self.tgt_encoder(guide2, guide2_mask)
add_emb, add_mask = self.select_attn(guide1_emb, guide2_emb, guide1_mask, guide2_mask)
tgt = tgt[:, :-1]
tgt_mask = self.make_trg_mask(tgt)
enc_emb = torch.cat([keys_emb, add_emb], dim=1)
enc_mask = torch.cat([keys_mask, add_mask], dim=-1)
# enc_emb = keys_emb
# enc_mask = keys_mask
output, attention = self.decoder(tgt, enc_emb, tgt_mask, enc_mask)
return output
nn.Module
```
#### File: DialogueNLP/metrics/Diversity.py
```python
import torch
import torch.nn as nn
import layer
def distinct_1(preds, targets):
'''
Relevance Average
preds = [len, batch_size]
targets = [len, batch_size]
embed is the embedding layer in model
'''
lens, nums = preds.shape
dis = []
for j in range(nums):
correct, error = 0, 0
true_idx = torch.where(targets[:, j] != layer.Constants.PAD)[0]
tmp_pred = preds[true_idx, j]
tmp_tgt = targets[true_idx, j]
for word in tmp_pred:
if word in tmp_tgt:
correct += 1
else:
error += 1
dis.append(error / lens)
dis_1 = sum(dis)
return dis_1, nums
def distinct_2(preds, targets):
'''
Relevance Average
preds = [len, batch_size]
targets = [len, batch_size]
embed is the embedding layer in model
'''
def bigrams(lists):
l = [[lists[i], lists[i + 1]] for i in range(len(lists) - 1)]
return l
lens, nums = preds.shape
dis = []
for j in range(nums):
correct, error = 0, 0
true_idx = torch.where(targets[:, j] != layer.Constants.PAD)[0]
tmp_pred = bigrams(preds[true_idx, j])
tmp_tgt = bigrams(targets[true_idx, j])
for word in tmp_pred:
if word in tmp_tgt:
correct += 1
else:
error += 1
dis.append(error / lens)
dis_2 = sum(dis)
return dis_2, nums
``` |
{
"source": "972d5defe3218bd62b741e6a2f11f5b3/riptable",
"score": 3
} |
#### File: riptable/benchmarks/bench_operators.py
```python
__all__ = [
'bench_op_add',
]
import itertools
import logging
import numpy as np
from numpy.random import default_rng
from itertools import product
from typing import List, Tuple
from .benchmark import timestamper
from .rand_data import rand_dataset
from .runner import create_comparison_dataset, create_trial_dataset, benchmark
from ..rt_categorical import Categorical
from ..rt_dataset import Dataset
from ..rt_numpy import empty
logger = logging.getLogger(__name__)
"""The logger for this module."""
def bench_op_add(**kwargs) -> Dataset:
# Implement a benchmark that uses the __add__ operator on two FastArrays
raise NotImplementedError()
# TODO: Implement benchmarks for other operators; for each one, need to try all applicable cases:
# * ndarray vs. FastArray
# * scalar OP array
# * array OP scalar
# * array OP array
# * for each of the above, test various combinations of dtypes as well e.g. add an int16 array to an int64 array
```
#### File: riptable/benchmarks/bench_primops.py
```python
__all__ = [
"bench_astype",
"bench_astype_numba",
"bench_astype_numpy",
"bench_bool_index",
"bench_bool_index_numpy",
"bench_compare_ops",
"bench_compare_ops_numpy",
"bench_mbget",
"bench_mbget_numba",
# comparisons
"compare_astype",
"compare_bool_index",
"compare_compare_ops",
"compare_mbget",
]
import itertools
import logging
import operator
from typing import List
import numpy as np
from numpy.random import default_rng
import numba as nb
from .benchmark import _timestamp_funcs
from .rand_data import rand_array, rand_fancyindex
from .runner import create_comparison_dataset, create_trial_dataset
from ..rt_enum import TypeRegister, NumpyCharTypes
from ..rt_dataset import Dataset
from ..rt_numpy import empty
from ..rt_utils import mbget, _mbget_2dims #, mbget_numba
logger = logging.getLogger(__name__)
"""The logger for this module."""
timestamper = _timestamp_funcs["get_nano_time"]
"""The timestamping function to use in benchmarks."""
# TODO: Additional benchmarks which would be useful for riptable development and comparison to other frameworks:
# * mbget vs. numpy fancy indexing (only on valid array indices -- -len(arr) <= x < len(arr))
# * mbget vs. numba-based equivalent to look for compiled code optimizations + thread scaling
# * indexing with a boolean mask (riptable vs. numba)
# * array conversion (i.e. elementwise type conversion) (arr1.astype(np.float32))
# * make sure to include the self-conversion case so that we look for optimizations there (like just calling memcpy)
# * equality and comparisons
# * elementwise array equality (arr1 == arr2)
# * array vs. scalar equality (arr == 123, arr == "foo", arr != '', etc.)
# * elementwise array comparison (arr1 < arr2)
# * array vs. scalar comparison (arr1 < 1.23, arr > 123, etc.)
# * it would also be useful (for demonstration purposes) to demo here how much faster these operations
# are on a string categorical compared to a normal array of strings (like the Categorical's .expand_array).
# * conversion-assignment, e.g. result[:] = arr[:]
def mbget_numba(aValues, aIndex) -> np.ndarray:
"""
Re-implementation of the 'mbget' fancy-indexing function with numba, for comparison with the riptide_cpp implementation.
Parameters
----------
aValues
aIndex
Returns
-------
"""
# make sure a aValues and aIndex are both numpy arrays
if isinstance(aValues, (list, tuple)):
aValues = TypeRegister.FastArray(aValues)
if isinstance(aIndex, (list, tuple)):
aIndex = TypeRegister.FastArray(aIndex)
if not isinstance(aValues, np.ndarray) or not isinstance(aIndex, np.ndarray):
raise TypeError(f"Values and index must be numpy arrays. Got {type(aValues)} {type(aIndex)}")
elif aValues.dtype.char == 'O':
raise TypeError(f"mbget does not support object types")
elif aIndex.dtype.char not in NumpyCharTypes.AllInteger:
raise TypeError(f"indices provided to mbget must be an integer type not {aIndex.dtype}")
if aValues.ndim == 2:
return _mbget_2dims(aValues, aIndex)
# TODO: probably need special code or parameter to set custom default value for NAN_TIME
if aValues.dtype.char in NumpyCharTypes.AllInteger + NumpyCharTypes.AllFloat:
result = _mbget_numeric(aValues, aIndex)
elif aValues.dtype.char in "SU":
result = _mbget_string(aValues, aIndex)
else:
raise Exception(f"mbget can't operate on an array of this type: {aValues.dtype}")
result = TypeRegister.newclassfrominstance(result, aValues)
return result
def _mbget_numeric(aValues, aIndex) -> np.ndarray:
result = empty(len(aIndex), dtype=aValues.dtype)
# Choose different implementation for signed vs. unsigned dtype.
# See comment in mbget_string for details.
_mbget_numeric_impl = _mbget_numeric_unsigned_impl if aIndex.dtype.kind == 'u' else _mbget_numeric_signed_impl
_mbget_numeric_impl(aValues, aIndex, result, result.inv)
return result
@nb.njit(cache=True, parallel=True, nogil=True)
def _mbget_numeric_signed_impl(aValues, aIndex, result, default_val):
num_elmnts = len(aValues)
for i in nb.prange(aIndex.shape[0]):
# This has one less branch (in the code) than the riptide_cpp implementation of mbget,
# because numba handles the negative/wraparound indexing for us. So the conditional logic
# to handle the negative indexing is still there; it may or may not be in the generated
# machine code depending on how numba chooses to generate it.
index = aIndex[i]
result[i] = aValues[index] if -num_elmnts <= index < num_elmnts else default_val
@nb.njit(cache=True, parallel=True, nogil=True)
def _mbget_numeric_unsigned_impl(aValues, aIndex, result, default_val):
num_elmnts = len(aValues)
for i in nb.prange(aIndex.shape[0]):
# This has one less branch (in the code) than the riptide_cpp implementation of mbget,
# because numba handles the negative/wraparound indexing for us. So the conditional logic
# to handle the negative indexing is still there; it may or may not be in the generated
# machine code depending on how numba chooses to generate it.
index = aIndex[i]
result[i] = aValues[index] if index < num_elmnts else default_val
#not using a default value here since we're handling byte strings only (default val. is 0)
def _mbget_string(aValues, aIndex) -> np.ndarray:
result = empty(len(aIndex), dtype=aValues.dtype)
itemsize = aValues.dtype.itemsize // 1 # ASCII
# Choose different implementation for signed vs. unsigned dtype.
# This is both for performance reasons and also because if we try to use the signed implementation
# with unsigned integer types, numba ends up doing extra/unnecessary conversions so the performance
# is poor; for that same reason, numba fails with an error on the uint64 type since it tries to cast
# the index value to a float before we use it as an array index (which isn't allowed).
# TODO: This decision could probably be pushed into the numba JIT-specialized generic so we don't need to choose here?
_mbget_string_impl = _mbget_string_unsigned_impl if aIndex.dtype.kind == 'u' else _mbget_string_signed_impl
_mbget_string_impl(aValues.view(np.uint8), aIndex, result.view(np.uint8), itemsize)
return result
@nb.njit(cache=True, parallel=True, nogil=True)
def _mbget_string_signed_impl(aValues, aIndex, result, itemsize): # byte array
numstrings = aValues.shape[0] // itemsize
for i in nb.prange(aIndex.shape[0]):
index = aIndex[i]
if -numstrings <= index < numstrings:
str_idx = index if index >= 0 else numstrings + aIndex[i]
for j in range(itemsize):
result[itemsize * i + j] = aValues[itemsize * str_idx + j]
else:
for j in range(itemsize):
result[itemsize * i + j] = 0
@nb.njit(cache=True, parallel=True, nogil=True)
def _mbget_string_unsigned_impl(aValues, aIndex, result, itemsize): # byte array
numstrings = aValues.shape[0] // itemsize
for i in nb.prange(aIndex.shape[0]):
index = aIndex[i]
if index < numstrings:
for j in range(itemsize):
result[itemsize * i + j] = aValues[itemsize * index + j]
else:
for j in range(itemsize):
result[itemsize * i + j] = 0
def astype_numba(arr, dst_dtype):
#only supports numeric-to-numeric type conversions
if arr.dtype.char in "SU" or dst_dtype.char in "SU":
raise Exception (f"Only numeric-to-numeric type conversions are supported.")
result = empty(arr.shape[0], dtype=dst_dtype)
_astype_numba(arr, result)
return result
# numba seems to emit poor quality code for this simple loop, and the performance is
# massively worse when parallel=True is specified. (Tested with numba 0.48, 0.50.1)
# Manually splitting the loop so the input data is chunked does not improve the performance either.
@nb.njit(cache=True, parallel=False, nogil=True)
def _astype_numba(arr, result):
for i in nb.prange(len(arr)):
# conversion occurs implicitly, and numba only supports conversion
# between arrays of numeric types.
result[i] = arr[i]
def bench_bool_index(**kwargs) -> Dataset:
warmup_iters = 0
iters = 21
rng_seeds = [12345]
data_dtypes = [
np.int16,
np.int32,
np.float64,
# TODO: Enable these additional data types; they're somewhat slow though, so we'd only want to
# run them under a 'detailed' / 'long-running' scenario
# np.dtype('S4'),
# np.dtype('S10'),
# np.dtype('<U8')
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
true_ratios = [
0.0,
0.2,
0.4,
0.6,
0.8,
1.0
]
setup_params = itertools.product(
rng_seeds,
data_dtypes,
data_lengths,
true_ratios
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
data_dtype,
data_length,
true_ratio,
) in setup_params:
# HACK: Until we have a better approach for supporting non-rectangular parameter spaces,
# or otherwise being able to skip certain combinations of parameters (e.g. because
# they're invalid, non-realistic, or otherwise don't make sense).
# if np.iinfo(index_dtype).max < data_length:
# continue
#
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(data_dtype))
mask = rng.random(data_length) < true_ratio
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
_ = data_array[mask]
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"data_dtype": np.dtype(data_dtype),
"data_length": data_length,
"true_ratio": true_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_bool_index_numpy(**kwargs) -> Dataset:
warmup_iters = 0
iters = 21
rng_seeds = [12345]
data_dtypes = [
np.int16,
np.int32,
np.float64,
# TODO: Enable these additional data types; they're somewhat slow though, so we'd only want to
# run them under a 'detailed' / 'long-running' scenario
# np.dtype('S4'),
# np.dtype('S10'),
# np.dtype('<U8')
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
true_ratios = [
0.0,
0.2,
0.4,
0.6,
0.8,
1.0
]
setup_params = itertools.product(
rng_seeds,
data_dtypes,
data_lengths,
true_ratios
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
data_dtype,
data_length,
true_ratio,
) in setup_params:
# HACK: Until we have a better approach for supporting non-rectangular parameter spaces,
# or otherwise being able to skip certain combinations of parameters (e.g. because
# they're invalid, non-realistic, or otherwise don't make sense).
# if np.iinfo(index_dtype).max < data_length:
# continue
#
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(data_dtype))
if hasattr(data_array, "_np"):
data_array = data_array._np
mask = rng.random(data_length) < true_ratio
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
_ = data_array[mask]
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"data_dtype": np.dtype(data_dtype),
"data_length": data_length,
"true_ratio": true_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_mbget(**kwargs) -> Dataset:
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 0
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
data_dtypes = [
np.int16,
np.int32,
np.float64,
# TODO: Enable these additional data types; they're somewhat slow though, so we'd only want to
# run them under a 'detailed' / 'long-running' scenario
np.dtype('S11'),
]
index_dtypes = [
np.int8,
np.uint8,
np.int16,
np.uint16,
np.int32,
np.uint32,
np.int64,
np.uint64,
# TODO: Add float32 / float64 once rand_fancyindex() supports them
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
index_lengths = [10, 100, 1_000, 10_000, 100_000, 1_000_000, 10_000_000]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
setup_params = itertools.product(
rng_seeds,
data_dtypes,
index_dtypes,
data_lengths,
index_lengths,
invalid_ratios,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
data_dtype,
index_dtype,
data_length,
index_length,
invalid_ratio,
) in setup_params:
# HACK: Until we have a better approach for supporting non-rectangular parameter spaces,
# or otherwise being able to skip certain combinations of parameters (e.g. because
# they're invalid, non-realistic, or otherwise don't make sense).
if np.iinfo(index_dtype).max < data_length:
continue
#
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(data_dtype), invalid_ratio=invalid_ratio)
fancyindex = rand_fancyindex(
rng,
index_length,
dtype=np.dtype(index_dtype),
source_arr_len=data_length,
invalid_ratio=invalid_ratio,
)
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
mbget(data_array, fancyindex)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"data_dtype": np.dtype(data_dtype),
"index_dtype": np.dtype(index_dtype),
"data_length": data_length,
"index_length": index_length,
"invalid_ratio": invalid_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_mbget_numba(**kwargs) -> Dataset:
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 1
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
data_dtypes = [
np.int16,
np.int32,
np.float64,
# TODO: Enable these additional data types; they're somewhat slow though, so we'd only want to
# run them under a 'detailed' / 'long-running' scenario
np.dtype('S11'),
]
index_dtypes = [
np.int8,
np.uint8,
np.int16,
np.uint16,
np.int32,
np.uint32,
np.int64,
np.uint64,
# TODO: Add float32 / float64 once rand_fancyindex() supports them
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
index_lengths = [10, 100, 1_000, 10_000, 100_000, 1_000_000, 10_000_000]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
setup_params = itertools.product(
rng_seeds,
data_dtypes,
index_dtypes,
data_lengths,
index_lengths,
invalid_ratios,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
data_dtype,
index_dtype,
data_length,
index_length,
invalid_ratio,
) in setup_params:
# HACK: Until we have a better approach for supporting non-rectangular parameter spaces,
# or otherwise being able to skip certain combinations of parameters (e.g. because
# they're invalid, non-realistic, or otherwise don't make sense).
if np.iinfo(index_dtype).max < data_length:
continue
#
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(data_dtype), invalid_ratio=invalid_ratio)
fancyindex = rand_fancyindex(
rng,
index_length,
dtype=np.dtype(index_dtype),
source_arr_len=data_length,
invalid_ratio=invalid_ratio,
)
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
mbget_numba(data_array, fancyindex)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"data_dtype": np.dtype(data_dtype),
"index_dtype": np.dtype(index_dtype),
"data_length": data_length,
"index_length": index_length,
"invalid_ratio": invalid_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_astype(**kwargs):
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 0
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
src_dtypes = [
np.int16,
np.int32,
np.float64,
# np.dtype('S11'),
]
dst_dtypes = [
np.int16,
np.int32,
np.float64,
# np.dtype('S11'),
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
setup_params = itertools.product(
rng_seeds,
src_dtypes,
dst_dtypes,
data_lengths,
invalid_ratios,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
src_dtype,
dst_dtype,
data_length,
invalid_ratio,
) in setup_params:
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(src_dtype), invalid_ratio=invalid_ratio)
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
data_array.astype(dtype=dst_dtype)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"src_dtype": np.dtype(src_dtype),
"dst_dtype": np.dtype(dst_dtype),
"data_length": data_length,
"invalid_ratio": invalid_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_astype_numpy(**kwargs):
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 0
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
src_dtypes = [
np.int16,
np.int32,
np.float64,
# np.dtype('S11'),
]
dst_dtypes = [
np.int16,
np.int32,
np.float64,
# np.dtype('S11'),
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
setup_params = itertools.product(
rng_seeds,
src_dtypes,
dst_dtypes,
data_lengths,
invalid_ratios,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
src_dtype,
dst_dtype,
data_length,
invalid_ratio,
) in setup_params:
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(src_dtype), invalid_ratio=invalid_ratio)
if hasattr(data_array, '_np'):
data_array = data_array._np
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
data_array.astype(dtype=dst_dtype)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"src_dtype": np.dtype(src_dtype),
"dst_dtype": np.dtype(dst_dtype),
"data_length": data_length,
"invalid_ratio": invalid_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_astype_numba(**kwargs):
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 1
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
src_dtypes = [
np.int16,
np.int32,
np.float64,
]
dst_dtypes = [
np.int16,
np.int32,
np.float64,
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
setup_params = itertools.product(
rng_seeds,
src_dtypes,
dst_dtypes,
data_lengths,
invalid_ratios,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
src_dtype,
dst_dtype,
data_length,
invalid_ratio,
) in setup_params:
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(src_dtype), invalid_ratio=invalid_ratio)
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
astype_numba(data_array, np.dtype(dst_dtype))
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"src_dtype": np.dtype(src_dtype),
"dst_dtype": np.dtype(dst_dtype),
"data_length": data_length,
"invalid_ratio": invalid_ratio
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_compare_ops(**kwargs):
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 0
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
arr1_dtypes = [
np.int16,
np.int32,
np.float64,
]
arr2_dtypes = [
np.int16,
np.int32,
np.float64,
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
ops = [
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.ge,
operator.gt
]
setup_params = itertools.product(
rng_seeds,
arr1_dtypes,
arr2_dtypes,
data_lengths,
invalid_ratios,
ops,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
arr1_dtype,
arr2_dtype,
data_length,
invalid_ratio,
op,
) in setup_params:
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
arr1 = rand_array(rng, data_length, dtype=np.dtype(arr1_dtype), invalid_ratio=invalid_ratio)
arr2 = rand_array(rng, data_length, dtype=np.dtype(arr2_dtype), invalid_ratio=invalid_ratio)
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
#invocation of actual actual function
op(arr1, arr2)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"arr1_dtype": np.dtype(arr1_dtype),
"arr2_dtype": np.dtype(arr2_dtype),
"operation": op.__name__,
"data_length": data_length,
"invalid_ratio": invalid_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_compare_ops_numpy(**kwargs):
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 0
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
arr1_dtypes = [
np.int16,
np.int32,
np.float64,
]
arr2_dtypes = [
np.int16,
np.int32,
np.float64,
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
ops = [
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.ge,
operator.gt
]
setup_params = itertools.product(
rng_seeds,
arr1_dtypes,
arr2_dtypes,
data_lengths,
ops,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
arr1_dtype,
arr2_dtype,
data_length,
op,
) in setup_params:
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
arr1 = rand_array(rng, data_length, dtype=np.dtype(arr1_dtype))._np
arr2 = rand_array(rng, data_length, dtype=np.dtype(arr2_dtype))._np
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
#invocation of actual actual function
op(arr1, arr2)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"arr1_dtype": np.dtype(arr1_dtype),
"arr2_dtype": np.dtype(arr2_dtype),
"operation": op.__name__,
"data_length": data_length,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def compare_mbget():
return create_comparison_dataset(
{
"mbget": bench_mbget(),
"mbget_numba": bench_mbget_numba(),
}
)
def compare_astype():
return create_comparison_dataset(
{
"astype": bench_astype(),
"astype_numpy": bench_astype_numpy(),
"astype_numba": bench_astype_numba(),
}
)
def compare_bool_index():
return create_comparison_dataset(
{
"bool_index": bench_bool_index(),
"bool_index_numpy": bench_bool_index_numpy()
}
)
def compare_compare_ops():
return create_comparison_dataset(
{
"compare_ops": bench_compare_ops(),
"compare_ops_numpy": bench_compare_ops_numpy(),
}
)
```
#### File: riptable/riptable/conftest.py
```python
import pytest
import numpy
import pandas
import riptable as rt
def get_doctest_dataset_data():
return {
'ds_simple_1': rt.Dataset({'A': [0, 1, 6, 7], 'B': [1.2, 3.1, 9.6, 21]}),
'ds_simple_2': rt.Dataset({'X': [0, 1, 6, 9], 'C': [2.4, 6.2, 19.2, 53]}),
'ds_complex_1': rt.Dataset(
{'A': [0, 6, 9, 11], 'B': ['Q', 'R', 'S', 'T'], 'C': [2.4, 6.2, 19.2, 25.9]}
),
'ds_complex_2': rt.Dataset(
{
'A': [0, 1, 6, 10],
'B': ['Q', 'R', 'R', 'T'],
'E': [1.5, 3.75, 11.2, 13.1],
}
),
}
@pytest.fixture(autouse=True)
def docstring_imports(doctest_namespace):
doctest_namespace['np'] = doctest_namespace['numpy'] = numpy
doctest_namespace['pd'] = doctest_namespace['pandas'] = pandas
doctest_namespace['rt'] = doctest_namespace['riptable'] = rt
@pytest.fixture(autouse=True)
def docstring_merge_datasets(doctest_namespace):
doctest_namespace.update(get_doctest_dataset_data())
```
#### File: riptable/hypothesis_tests/test_categorical_property.py
```python
from collections import Counter
from typing import List
import hypothesis
import pytest
import numpy as np
from hypothesis import given, HealthCheck
from hypothesis.strategies import (
composite,
shared,
integers,
lists,
booleans,
one_of,
)
from hypothesis.extra.numpy import (
arrays,
integer_dtypes,
unsigned_integer_dtypes,
datetime64_dtypes,
timedelta64_dtypes,
byte_string_dtypes,
unicode_string_dtypes,
)
from hypothesis.strategies import data
from .strategies.categorical_strategy import CategoricalStrategy
from .strategies.helper_strategies import one_darray_shape_strategy
from riptable import Categorical, hstack, FastArray
from riptable.rt_enum import CategoryMode
from riptable.Utils.teamcity_helper import is_running_in_teamcity
_MAX_SIZE = 1_000
def _get_category_to_count(categoricals) -> Counter:
if not isinstance(categoricals, list):
categorical = categoricals
# create a new list since list constructor with categoricals will return the underlying representation as a list
categoricals = list()
categoricals.append(categorical)
category_to_count = Counter()
for categorical in categoricals:
categories = categorical.categories()
multiplicities = categorical.grouping.ncountgroup
for category, multiplicity in zip(categories, multiplicities):
category_to_count[category] += multiplicity
return category_to_count
def _check_categorical(categorical: Categorical) -> (bool, str):
valid: bool = True
errors: List[str] = list()
if not isinstance(categorical, Categorical):
valid = False
errors.append(
f"Categorical {categorical} should be of type {type(Categorical)}"
)
return valid, "\n".join(errors)
@composite
def one_of_categorical_values(draw):
cat_values = integers(
min_value=1, max_value=np.iinfo(np.int64).max
) # add - bytes(), characters(),
return draw(one_of(cat_values))
@pytest.mark.skipif(reason="Categorical generator needs to be rewritten for better performance before re-enabling this test to run in TeamCity builds.")
# 2020-09-09T21:02:22.3088485Z ================================== FAILURES ===================================
# 2020-09-09T21:02:22.3088746Z ________ test_categorical_ctor[CategoryMode.StringArray-integer_dtype] ________
# 2020-09-09T21:02:22.3088996Z
# 2020-09-09T21:02:22.3089415Z value_strategy = arrays(dtype=integer_dtypes(endianness='=', sizes=(64,)), shape=one_darray_shape_strategy(), elements=integers(min_value=1, max_value=9223372036854775807))
# 2020-09-09T21:02:22.3089925Z category_mode = <CategoryMode.StringArray: 1>
# 2020-09-09T21:02:22.3090026Z
# 2020-09-09T21:02:22.3090166Z > ???
# 2020-09-09T21:02:22.3090416Z E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.13 seconds (0 invalid ones and 4 exceeded maximum size). Try decreasing size of the data you're generating (with e.g.max_size or max_leaves parameters).
# 2020-09-09T21:02:22.3091373Z E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test.
@given(data())
@pytest.mark.parametrize(
"value_strategy",
[
# Categorical values must be nonempty
pytest.param(
lists(one_of_categorical_values(), min_size=5, max_size=10),
id="list",
),
pytest.param(
lists(
one_of_categorical_values(), min_size=1, unique=True, max_size=10
),
id="unique_list",
),
pytest.param(
arrays(
shape=one_darray_shape_strategy(max_shape_size=10),
dtype=integer_dtypes(endianness="=", sizes=(64,)),
elements=integers(min_value=1, max_value=np.iinfo(np.int64).max),
),
id="integer_dtype",
),
pytest.param(
arrays(
shape=one_darray_shape_strategy(),
dtype=integer_dtypes(endianness="=", sizes=(64,)),
elements=integers(min_value=1, max_value=np.iinfo(np.int64).max),
fill=integers(min_value=0, max_value=np.iinfo(np.int64).max),
unique=True
),
id="integer_dtype_unique",
marks=[
pytest.mark.skip,
pytest.mark.xfail(reason='Now throws a hypothesis.errors.InvalidArgument: Cannot fill unique array with non-NaN value 1'),
]
),
],
)
@pytest.mark.parametrize(
"category_mode", [CategoryMode.StringArray, CategoryMode.Dictionary]
)
def test_categorical_ctor(value_strategy, category_mode, data):
# cat is drawn from CategoricalStrategy
ordered: bool = data.draw(booleans())
cat: Categorical = data.draw(
CategoricalStrategy(
value_strategy, category_mode=category_mode, ordered=ordered
)
)
assert _check_categorical(cat)
# Validate properties on constructing a Categorical from a Categorical's values and categories.
values, categories = cat.expand_array, cat._categories
# For Dictionary Categoricals, 'categories' should be the original Categorical's category_map.
if category_mode == CategoryMode.Dictionary:
categories = cat.category_mapping
cat2 = Categorical(values, categories=categories, ordered=ordered)
assert _check_categorical(cat2)
# Validate properties on constructing a Categorical given a Categorical.
cat3 = Categorical(cat2)
assert _check_categorical(cat3)
# Validate properties on constructing a Categorical using _from_categorical which is a fast path
# that skips internal routine checks, sorting, or making values unique, but should be identical to
# the original Categorical.
from_categorical = cat._categories_wrap
cat4 = Categorical(
values,
categories=categories,
_from_categorical=from_categorical,
ordered=ordered,
)
assert _check_categorical(cat4)
# TODO: add equality checks for the Categoricals above since they should all be equivalent.
# TODO remove hypothesis suppress_health_check after investigating FailedHealthCheck for test_categorical_property.test_hstack[CategoryMode_StringArray-unsigned_integer_dtype]
# E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.06 seconds (0 invalid ones and 5 exceeded maximum size). Try decreasing size of the data you're generating (with e.g.max_size or max_leaves parameters).
# As is, the unsigned_integer_dtype case uses min and max values for data generation.
@pytest.mark.skip(reason="Categorical generator needs to be rewritten for better performance before re-enabling this test to run in TeamCity builds.")
@hypothesis.settings(suppress_health_check=[HealthCheck.too_slow])
@given(data())
@pytest.mark.parametrize(
"datatype, elements",
[
pytest.param(
integer_dtypes(endianness="=", sizes=(64,)),
integers(min_value=1, max_value=np.iinfo(np.int64).max),
id="integer_dtype",
),
pytest.param(
unsigned_integer_dtypes(endianness="=", sizes=(64,)),
integers(min_value=1, max_value=np.iinfo(np.int64).max),
id="unsigned_integer_dtype",
),
pytest.param(byte_string_dtypes(endianness="="), None, id="byte_string_dtype"),
pytest.param(
datetime64_dtypes(endianness="="),
None,
id="datetime64_dtype",
marks=[
pytest.mark.xfail(reason="RIP-375 - Categorical unsupported dtypes"),
pytest.mark.skip,
],
),
pytest.param(
timedelta64_dtypes(endianness="="),
None,
id="timedelta64_dtype",
marks=[
pytest.mark.xfail(reason="RIP-375 - Categorical unsupported dtypes"),
pytest.mark.skip,
],
),
pytest.param(
unicode_string_dtypes(endianness="="),
None,
id="unicode_string_dtype",
marks=[
pytest.mark.xfail(reason="RIP-375 - Categorical unsupported dtypes"),
pytest.mark.skip,
],
),
],
)
@pytest.mark.parametrize("category_mode", [CategoryMode.StringArray])
def test_hstack(datatype, elements, category_mode, data):
shape = one_darray_shape_strategy()
dtype = shared(datatype)
msg = f"Using dtype {dtype}\nUsing elements {elements}\n"
# Increasing the maximum number of runs by a 10x magnitude will result in FailedHealthCheck errors with slow data generation.
max = data.draw(integers(min_value=1, max_value=5))
categoricals: List[Categorical] = list()
for i in range(max):
value_strategy = arrays(dtype, shape, elements=elements)
with_categories: bool = data.draw(booleans())
categoricals.append(
data.draw(
CategoricalStrategy(
value_strategy,
with_categories=with_categories,
category_mode=category_mode,
)
)
)
# Test #1: Length of hstacked categoricals should be the sum of the aggregate categoricals.
output = hstack(tuple(categoricals))
assert isinstance(output, Categorical)
assert len(output) == sum(map(len, categoricals)), (
f"Length of hstacked categoricals should be the sum of the aggregate categoricals\n"
+ msg
+ f"actual:\n{output}\nexpected:\n{categoricals}"
)
# Test #2: The hstacked categories should be equivalent to the set of aggregate categories.
expected_counts = _get_category_to_count(categoricals)
actual_counts = _get_category_to_count(output)
assert not set(actual_counts.elements()).symmetric_difference(
set(expected_counts.elements())
), (
f"The hstacked categories should be equivalent to the set of aggregate categories\n"
+ msg
+ f"actual {set(actual_counts.elements())}\nexpected {set(expected_counts.elements())}"
)
# Test #3: The hstacked multiplicity of categories should be equivalent to the multiplicity of aggregate categories.
# Test (2) is a subset of this equality check, but remains for clarity reasons when investigating failures.
assert expected_counts == actual_counts, (
f"The hstacked multiplicity of categories should be equivalent to the multiplicity of aggregate categories\n"
+ msg
+ f"actual {actual_counts}\nexpected {expected_counts}"
)
@pytest.mark.xfail(reason="RIP-375 - Categorical unsupported dtypes")
@pytest.mark.skipif(
is_running_in_teamcity(), reason="Please remove alongside xfail removal."
)
@pytest.mark.parametrize(
"data",
[
# ValueError: BuildArrayInfo array has bad dtype of 21
FastArray(["1970"], dtype="datetime64[Y]"),
# ValueError: BuildArrayInfo array has bad dtype of 22
FastArray([0], dtype="timedelta64[Y]"),
],
)
def test_falsifying_categorical_ctor(data):
Categorical(data)
@pytest.mark.skipif(True, reason="RIP-452: Mutikey Categorical isin is consistent with its single key isin alternative")
def test_multikey_categorical_isin():
# See Python/core/riptable/tests/test_categorical.py test_multikey_categorical_isin as an example
pass
```
#### File: riptable/hypothesis_tests/test_rt_numpy_property.py
```python
from math import nan
from typing import List
import numpy as np
import riptable as rt
from riptable import FastArray, FA
import pytest
import unittest
from numpy.testing import (
assert_array_equal,
assert_array_almost_equal,
assert_equal,
assert_allclose,
assert_almost_equal,
)
import hypothesis
from hypothesis import assume, event, example, given, HealthCheck
from hypothesis.extra.numpy import (
arrays,
boolean_dtypes,
floating_dtypes,
integer_dtypes,
unsigned_integer_dtypes,
)
from hypothesis.strategies import one_of
# riptable custom Hypothesis strategies
from .strategies.helper_strategies import (
generate_list_ndarrays,
generate_lists,
generate_ndarrays,
ndarray_shape_strategy,
one_darray_shape_strategy,
)
from riptable.Utils.teamcity_helper import is_running_in_teamcity
class TestCat2Keys:
@pytest.mark.skipif(
is_running_in_teamcity(), reason="Hypothesis generation taking too long."
)
@given(keys=generate_list_ndarrays())
def test_cat2keys_nested_array(self, keys):
key1, key2 = keys
multi_cat = rt.cat2keys(key1, key2)
print(f"key1 {repr(key1)}\nkey2: {repr(key2)}")
@pytest.mark.skipif(
is_running_in_teamcity(), reason="Hypothesis generation taking too long."
)
@given(keys=one_of(generate_lists(), generate_ndarrays()))
def test_cat2keys(self, keys):
key1, key2 = keys
multi_cat = rt.cat2keys(key1, key2)
assert len(key1) == len(key2) # add test to check different length lists
# these are the expected entries in the multi key categorical dictionary
n = len(key1)
expected_key1 = set(rt.FastArray([k for _ in range(n) for k in key1]))
expected_key2 = set(rt.FastArray([k for k in key2 for _ in range(n)]))
key_itr = iter(multi_cat.category_dict)
actual_key1 = set(multi_cat.category_dict[next(key_itr)])
actual_key2 = set(multi_cat.category_dict[next(key_itr)])
not_nan = lambda x: not np.isnan(x)
assert not set(
filter(not_nan, expected_key1.symmetric_difference(actual_key1))
), f"\nexpected {expected_key1}\nactual {actual_key1}"
assert not set(
filter(not_nan, expected_key2.symmetric_difference(actual_key2))
), f"\nexpected {expected_key2}\nactual {actual_key2}"
# need to handle tuple ordering and string dtype discrepancies
# Taking the entries one by one of expected_key1 and expected_key2 should produce the
# cartesian product of key1 and key2.
# expected_product = {(k1, k2) for k1, k2 in itertools.product(key1, key2)}
# actual_product = {(k1, k2) for k1, k2 in zip(actual_key1, actual_key2)}
# not_nan = lambda tup: not np.isnan(tup[0]) or not np.isnan(tup[1])
# assert not set(filter(not_nan, expected_product.symmetric_difference(actual_product))), f"expected {expected_product}\nactual {actual_product}\nmulti_cat {self.print_cat(multi_cat)}\n{self.print_keys(keys)}"
```
#### File: riptable/riptable/rt_datetime.py
```python
__all__ = ['DateTimeBase', 'DateTimeNano', 'TimeSpan', 'Date', 'DateSpan', 'DateTimeUTC',
'DateTimeNanoScalar', 'TimeSpanScalar', 'DateScalar', 'DateSpanScalar',
'parse_epoch', 'timestring_to_nano', 'datestring_to_nano', 'datetimestring_to_nano',
'strptime_to_nano']
import numpy as np
from datetime import datetime as dt
from datetime import date, timezone
from dateutil import tz
import time
import warnings
# import starfish as sf
import riptide_cpp as rc
from typing import Union, Tuple, List, Optional
from .rt_fastarray import FastArray
from .rt_enum import TypeRegister, DisplayArrayTypes, DisplayLength, DisplayJustification, DisplayTextDecoration, \
TypeId, TimeFormat, NumpyCharTypes, INVALID_DICT, DayOfWeek, SDSFlag, MATH_OPERATION
from .rt_numpy import mask_ori, mask_andi, mask_xori, searchsorted, arange, putmask, isnan, empty, sum, zeros, full, \
hstack
from .rt_hstack import hstack_any
from .rt_timers import *
from .Utils.rt_display_properties import ItemFormat
from .Utils.rt_metadata import MetaData, meta_from_version, META_VERSION
from .rt_categorical import Categorical
NANOS_PER_MICROSECOND = 1_000
NANOS_PER_MILLISECOND = 1_000_000
NANOS_PER_SECOND = 1_000_000_000
NANOS_PER_MINUTE = NANOS_PER_SECOND * 60
NANOS_PER_HOUR = NANOS_PER_MINUTE * 60
NANOS_PER_DAY = NANOS_PER_HOUR * 24
NANOS_PER_YEAR = NANOS_PER_DAY * 365
NANOS_PER_LEAPYEAR = NANOS_PER_DAY * 366
NANOS_AT_2000 = (NANOS_PER_YEAR * 30) + (NANOS_PER_DAY * 7)
SECONDS_PER_DAY = 60 * 60 * 24
DAYS_PER_YEAR = 365
DAYS_PER_LEAPYEAR = 366
DAYS_AT_2000 = (DAYS_PER_YEAR * 30) + 7
UTC_1970_DAY_SPLITS = FastArray([
0, # 1970
DAYS_PER_YEAR,
2 * DAYS_PER_YEAR,
(3 * DAYS_PER_YEAR) + (1),
(4 * DAYS_PER_YEAR) + (1),
(5 * DAYS_PER_YEAR) + (1),
(6 * DAYS_PER_YEAR) + (1),
(7 * DAYS_PER_YEAR) + (2),
(8 * DAYS_PER_YEAR) + (2),
(9 * DAYS_PER_YEAR) + (2),
(10 * DAYS_PER_YEAR) + (2), # 1980
(11 * DAYS_PER_YEAR) + (3),
(12 * DAYS_PER_YEAR) + (3),
(13 * DAYS_PER_YEAR) + (3),
(14 * DAYS_PER_YEAR) + (3),
(15 * DAYS_PER_YEAR) + (4),
(16 * DAYS_PER_YEAR) + (4),
(17 * DAYS_PER_YEAR) + (4),
(18 * DAYS_PER_YEAR) + (4),
(19 * DAYS_PER_YEAR) + (5),
(20 * DAYS_PER_YEAR) + (5), # 1990
(21 * DAYS_PER_YEAR) + (5),
(22 * DAYS_PER_YEAR) + (5),
(23 * DAYS_PER_YEAR) + (6),
(24 * DAYS_PER_YEAR) + (6),
(25 * DAYS_PER_YEAR) + (6),
(26 * DAYS_PER_YEAR) + (6),
(27 * DAYS_PER_YEAR) + (7),
(28 * DAYS_PER_YEAR) + (7),
(29 * DAYS_PER_YEAR) + (7),
DAYS_AT_2000, # 2000
DAYS_AT_2000 + DAYS_PER_LEAPYEAR,
DAYS_AT_2000 + (2 * DAYS_PER_YEAR) + (1),
DAYS_AT_2000 + (3 * DAYS_PER_YEAR) + (1),
DAYS_AT_2000 + (4 * DAYS_PER_YEAR) + (1),
DAYS_AT_2000 + (5 * DAYS_PER_YEAR) + (2),
DAYS_AT_2000 + (6 * DAYS_PER_YEAR) + (2),
DAYS_AT_2000 + (7 * DAYS_PER_YEAR) + (2),
DAYS_AT_2000 + (8 * DAYS_PER_YEAR) + (2),
DAYS_AT_2000 + (9 * DAYS_PER_YEAR) + (3),
DAYS_AT_2000 + (10 * DAYS_PER_YEAR) + (3), # 2010
DAYS_AT_2000 + (11 * DAYS_PER_YEAR) + (3),
DAYS_AT_2000 + (12 * DAYS_PER_YEAR) + (3),
DAYS_AT_2000 + (13 * DAYS_PER_YEAR) + (4),
DAYS_AT_2000 + (14 * DAYS_PER_YEAR) + (4),
DAYS_AT_2000 + (15 * DAYS_PER_YEAR) + (4),
DAYS_AT_2000 + (16 * DAYS_PER_YEAR) + (4),
DAYS_AT_2000 + (17 * DAYS_PER_YEAR) + (5),
DAYS_AT_2000 + (18 * DAYS_PER_YEAR) + (5),
DAYS_AT_2000 + (19 * DAYS_PER_YEAR) + (5),
DAYS_AT_2000 + (20 * DAYS_PER_YEAR) + (5), # 2020
DAYS_AT_2000 + (21 * DAYS_PER_YEAR) + (6),
DAYS_AT_2000 + (22 * DAYS_PER_YEAR) + (6),
DAYS_AT_2000 + (23 * DAYS_PER_YEAR) + (6),
DAYS_AT_2000 + (24 * DAYS_PER_YEAR) + (6),
DAYS_AT_2000 + (25 * DAYS_PER_YEAR) + (7),
DAYS_AT_2000 + (26 * DAYS_PER_YEAR) + (7),
DAYS_AT_2000 + (27 * DAYS_PER_YEAR) + (7),
DAYS_AT_2000 + (28 * DAYS_PER_YEAR) + (7),
DAYS_AT_2000 + (29 * DAYS_PER_YEAR) + (8),
DAYS_AT_2000 + (30 * DAYS_PER_YEAR) + (8), # 2030
DAYS_AT_2000 + (31 * DAYS_PER_YEAR) + (8),
DAYS_AT_2000 + (32 * DAYS_PER_YEAR) + (8),
DAYS_AT_2000 + (33 * DAYS_PER_YEAR) + (9),
DAYS_AT_2000 + (34 * DAYS_PER_YEAR) + (9),
DAYS_AT_2000 + (35 * DAYS_PER_YEAR) + (9),
DAYS_AT_2000 + (36 * DAYS_PER_YEAR) + (9),
DAYS_AT_2000 + (37 * DAYS_PER_YEAR) + (10),
DAYS_AT_2000 + (38 * DAYS_PER_YEAR) + (10),
DAYS_AT_2000 + (39 * DAYS_PER_YEAR) + (10),
DAYS_AT_2000 + (40 * DAYS_PER_YEAR) + (10), # 2040
])
# UTC @ midnight, years 1970 - 2040
UTC_1970_SPLITS = FastArray([
0, # 1970
NANOS_PER_YEAR,
2 * NANOS_PER_YEAR,
(3 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 1),
(4 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 1),
(5 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 1),
(6 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 1),
(7 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 2),
(8 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 2),
(9 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 2),
(10 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 2), # 1980
(11 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 3),
(12 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 3),
(13 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 3),
(14 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 3),
(15 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 4),
(16 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 4),
(17 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 4),
(18 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 4),
(19 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 5),
(20 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 5), # 1990
(21 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 5),
(22 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 5),
(23 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 6),
(24 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 6),
(25 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 6),
(26 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 6),
(27 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 7),
(28 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 7),
(29 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 7),
NANOS_AT_2000, # 2000
NANOS_AT_2000 + NANOS_PER_LEAPYEAR,
NANOS_AT_2000 + (2 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 1),
NANOS_AT_2000 + (3 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 1),
NANOS_AT_2000 + (4 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 1),
NANOS_AT_2000 + (5 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 2),
NANOS_AT_2000 + (6 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 2),
NANOS_AT_2000 + (7 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 2),
NANOS_AT_2000 + (8 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 2),
NANOS_AT_2000 + (9 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 3),
NANOS_AT_2000 + (10 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 3), # 2010
NANOS_AT_2000 + (11 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 3),
NANOS_AT_2000 + (12 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 3),
NANOS_AT_2000 + (13 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 4),
NANOS_AT_2000 + (14 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 4),
NANOS_AT_2000 + (15 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 4),
NANOS_AT_2000 + (16 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 4),
NANOS_AT_2000 + (17 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 5),
NANOS_AT_2000 + (18 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 5),
NANOS_AT_2000 + (19 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 5),
NANOS_AT_2000 + (20 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 5), # 2020
NANOS_AT_2000 + (21 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 6),
NANOS_AT_2000 + (22 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 6),
NANOS_AT_2000 + (23 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 6),
NANOS_AT_2000 + (24 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 6),
NANOS_AT_2000 + (25 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 7),
NANOS_AT_2000 + (26 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 7),
NANOS_AT_2000 + (27 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 7),
NANOS_AT_2000 + (28 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 7),
NANOS_AT_2000 + (29 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 8),
NANOS_AT_2000 + (30 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 8), # 2030
NANOS_AT_2000 + (31 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 8),
NANOS_AT_2000 + (32 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 8),
NANOS_AT_2000 + (33 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 9),
NANOS_AT_2000 + (34 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 9),
NANOS_AT_2000 + (35 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 9),
NANOS_AT_2000 + (36 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 9),
NANOS_AT_2000 + (37 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 10),
NANOS_AT_2000 + (38 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 10),
NANOS_AT_2000 + (39 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 10),
NANOS_AT_2000 + (40 * NANOS_PER_YEAR) + (NANOS_PER_DAY * 10), # 2040
])
MATLAB_EPOCH_DATENUM = 719529
EPOCH_DAY_OF_WEEK = DayOfWeek.Thursday
YDAY_SPLITS = FastArray([0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334])
YDAY_SPLITS_LEAP = FastArray([0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335])
MONTH_STR_ARRAY = FastArray(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
# need to hard code the nano cutoffs because FastArray can't do math yet
UTC_YDAY_SPLITS = FastArray([
(NANOS_PER_DAY * 0),
(NANOS_PER_DAY * 31),
(NANOS_PER_DAY * 59),
(NANOS_PER_DAY * 90),
(NANOS_PER_DAY * 120),
(NANOS_PER_DAY * 151),
(NANOS_PER_DAY * 181),
(NANOS_PER_DAY * 212),
(NANOS_PER_DAY * 243),
(NANOS_PER_DAY * 273),
(NANOS_PER_DAY * 304),
(NANOS_PER_DAY * 334)
])
UTC_YDAY_SPLITS_LEAP = FastArray([
(NANOS_PER_DAY * 0),
(NANOS_PER_DAY * 31),
(NANOS_PER_DAY * 60),
(NANOS_PER_DAY * 91),
(NANOS_PER_DAY * 121),
(NANOS_PER_DAY * 152),
(NANOS_PER_DAY * 182),
(NANOS_PER_DAY * 213),
(NANOS_PER_DAY * 244),
(NANOS_PER_DAY * 274),
(NANOS_PER_DAY * 305),
(NANOS_PER_DAY * 335)
])
TIME_FORMATS = {
1: "%Y%m%d", # ordinal date
2: "%#H:%M %p", # ms from midnight
3: "%Y%m%d %H:%M:%S",
4: "%H:%M:%S",
5: "%H:%M"
}
# ------------------------------------------------------------------------------------
def strptime_to_nano(dtstrings, format, from_tz=None, to_tz='NYC'):
'''
Converts datetime string to DateTimeNano object with user-specified format.
Parameters
----------
dtstrings : array of timestrings
format : timestring format
Currently supports the following escape codes:
Date:
-----
%y Year without century as zero-padded decimal number.
%Y Year with century as decimal number.
%m Month as a decimal number (with or without zero-padding).
%B Full month name: ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
%b Abbreviated month name: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
%d Day of the month as a decimal number (with or without zero-padding).
Time:
-----
%H Hour (24-hour clock) as a decimal number (with or without zero-padding).
(Note: if a %p formatter is present, this will be interpretted as a 12-hour clock hour)
%I Hour (12-hour clock) as a decimal number (with or without zero-padding).
(Note: unlike %H, must be 1-12)
%p Locale’s equivalent of either AM or PM.
%M Minute as a decimal number (with or without zero-padding).
%S Second as a decimal number (with or without zero-padding).
from_tz : a string for the timezone of origin: 'NYC', 'GMT', 'DUBLIN', etc.
to_tz : a string for the timezone that the time will be displayed in
Other Notes:
------------
Works best with timestrings that include a date:
- If no year is present in the string, an invalid time will be returned for all values.
- If no form of year/month/day is present, values will yield a time in 1970.
Consider using timestring_to_nano(), which also will accept one datestring for all times.
If the timestring ends in a '.', the following numbers will be parsed as a second fraction. This happens
automatically, no escape character is required in the format string.
If no time escape characters are present, will return midnight at all date values.
If formatted correctly, consider using datestring_to_nano()
Examples
--------
Date, with/without padding:
>>> dt = FastArray(['02/01/1992', '2/1/1992'])
>>> fmt = '%m/%d/%Y'
>>> strptime_to_nano(dt, fmt, from_tz='NYC')
DateTimeNano([19920201 00:00:00.000000000, 19920201 00:00:00.000000000])
Date + 24-hour clock:
>>> dt = FastArray(['02/01/1992 7:48:30', '2/1/1992 19:48:30'])
>>> fmt = '%m/%d/%Y %H:%M:%S'
>>> strptime_to_nano(dt, fmt, from_tz='NYC')
DateTimeNano([19920201 07:48:30.000000000, 19920201 19:48:30.000000000])
Date + 12-hour clock + am/pm:
>>> dt = FastArray(['02/01/1992 7:48:30 AM', '2/1/1992 7:48:30 PM'])
>>> fmt = '%m/%d/%Y %I:%M:%S %p'
>>> strptime_to_nano(dt, fmt, from_tz='NYC')
DateTimeNano([19920201 07:48:30.000000000, 19920201 19:48:30.000000000])
Date + time + second fraction:
>>> dt = FastArray(['02/01/1992 7:48:30.123456789', '2/1/1992 15:48:30.000000006'])
>>> fmt = '%m/%d/%Y %H:%M:%S'
>>> strptime_to_nano(dt, fmt, from_tz='NYC')
DateTimeNano([19920201 07:48:30.123456789, 19920201 15:48:30.000000006])
'''
if isinstance(format, str):
format = format.encode()
nano_times = rc.StrptimeToNanos(dtstrings, format)
return DateTimeNano(nano_times, from_tz=from_tz, to_tz=to_tz)
# ------------------------------------------------------------------------------------
def _possibly_convert_cat(arr):
'''
When a cateorical is passed into DateTime functions, we extract the unique categories
and then re-expand at the end
Returns
-------
samearry, None: if not a categorical
uniques, cat: if a categorical
'''
if isinstance(arr, TypeRegister.Categorical):
return arr.category_array, arr
return arr, None
# ------------------------------------------------------------------------------------
def datetimestring_to_nano(dtstring, from_tz=None, to_tz='NYC'):
'''
Converts datetime string to DateTimeNano object.
By default, the timestrings are assumed to be in Eastern Time. If they are already in UTC time, set gmt=True.
Parameters
----------
dtstring : array of timestrings in format YYYY-MM-DD HH:MM:SS, YYYYMMDD HH:MM:SS.ffffff, etc. (bytestrings/unicode supported)
from_tz : a string for the timezone of origin: 'NYC', 'GMT', 'DUBLIN', etc.
to_tz : a string for the timezone that the time will be displayed in
returns DateTimeNano
See Also: timestring_to_nano(), datestring_to_nano()
Examples
--------
>>> dts = FA(['2012-12-12 12:34:56.001002', '20130303 1:14:15', '2008-07-06 15:14:13'])
>>> datetimestring_to_nano(dts, from_tz='NYC')
DateTimeNano([20121212 12:34:56.001002000, 20130303 01:14:15.000000000, 20080706 15:14:13.000000000])
'''
nano_times = rc.DateTimeStringToNanos(dtstring)
return DateTimeNano(nano_times, from_tz=from_tz, to_tz=to_tz)
# ------------------------------------------------------------------------------------
def datestring_to_nano(datestring, time=None, from_tz=None, to_tz='NYC'):
'''
Converts date string to DateTimeNano object (default midnight).
By default, the timestrings are assumed to be in Eastern Time. If they are already in UTC time, set gmt=True.
Parameters
----------
datestring : array of datestrings in format YYYY-MM-DD or YYYYMMDD (bytestrings/unicode supported)
time : a single string or array of strings in the format HH:MM:SS.ffffff (bytestrings/unicode supported)
from_tz : a string for the timezone of origin: 'NYC', 'GMT', 'DUBLIN', etc.
to_tz : a string for the timezone that the time will be displayed in
returns DateTimenano
See Also: timestring_to_nano(), datetimestring_to_nano()
Examples
--------
Date only:
>>> dates = FA(['2018-01-01', '2018-01-02', '2018-01-03'])
>>> datestring_to_nano(dates, from_tz='NYC')
DateTimeNano([20180101 00:00:00.000000000, 20180102 00:00:00.000000000, 20180103 00:00:00.000000000])
With time:
>>> dates = FA(['2018-01-01', '2018-01-02', '2018-01-03'])
>>> datestring_to_nano(dates, time='9:30:00', from_tz='NYC')
DateTimeNano([20180101 09:30:00.000000000, 20180102 09:30:00.000000000, 20180103 09:30:00.000000000])
'''
nano_dates = rc.DateStringToNanos(datestring)
if time is None:
result = nano_dates
else:
if isinstance(time, (str, bytes)):
time = TypeRegister.FastArray([time])
time = rc.TimeStringToNanos(time)
result = nano_dates + time
result = DateTimeNano(result, from_tz=from_tz, to_tz=to_tz)
return result
# ------------------------------------------------------------------------------------
def timestring_to_nano(timestring, date=None, from_tz=None, to_tz='NYC'):
'''
Converts timestring to TimeSpan or DateTimeNano object.
By default, the timestrings are assumed to be in Eastern Time. If they are already in UTC time, set gmt=True.
If a date is specified, a DateTimeNano object will be returned.
If a date is not specified, a TimeSpan will be returned.
Parameters:
-----------
timestring : array of timestrings in format HH:MM:SS, H:MM:SS, HH:MM:SS.ffffff (bytestrings/unicode supported)
date : a single string or array of date strings in format YYYY-MM-DD (bytestrings/unicode supported)
from_tz : a string for the timezone of origin: 'NYC', 'GMT', 'DUBLIN', etc.
to_tz : a string for the timezone that the time will be displayed in
returns TimeSpan or DateTimeNano
See Also: datestring_to_nano(), datetimestring_to_nano()
Examples
---------
Return TimeSpan:
>>> ts = FA(['1:23:45', '12:34:56.000100', ' 14:00:00'])
>>> timestring_to_nano(ts, from_tz='NYC')
TimeSpan([01:23:45.000000000, 12:34:56.000100000, 14:00:00.000000000])
With single date string:
>>> ts = FA(['1:23:45', '12:34:56', '23:22:21'])
>>> timestring_to_nano(ts, date='2018-02-01', from_tz='NYC')
DateTimeNano([20180201 01:23:45.000000000, 20180201 12:34:56.000000000, 20180201 23:22:21.000000000])
Multiple date strings:
>>> ts = FA(['1:23:45', '12:34:56', '23:22:21'])
>>> dts = FA(['2018-02-01', '2018-02-07', '2018-05-12'])
>>> timestring_to_nano(ts, date=dts, from_tz='NYC')
DateTimeNano([20180201 01:23:45.000000000, 20180207 12:34:56.000000000, 20180512 23:22:21.000000000])
'''
nano_times = rc.TimeStringToNanos(timestring)
if date is None:
result = TimeSpan(nano_times)
else:
if isinstance(date, (str, bytes)):
date = TypeRegister.FastArray([date])
date = rc.DateStringToNanos(date)
result = date + nano_times
result = DateTimeNano(result, from_tz=from_tz, to_tz=to_tz)
return result
# ===========================================================================================
def parse_epoch(etime, to_tz='NYC'):
"""Days since epoch and milliseconds since midnight from nanosecond timestamps.
Parameters
----------
etime : array-like
UTC nanoseconds.
to_tz : str, default 'NYC'
TimeZone short string - see riptable.rt_timezone.
This routine didn't used to take a timezone, so it defaults to the previous setting.
Used in the phonyx data loader.
Returns
-------
days : array (int32)
Days since epoch.
millis : array (float64)
Milliseconds since midnight.
"""
dtn = DateTimeNano(etime, from_tz='UTC', to_tz=to_tz)
return dtn.days_since_epoch, dtn.millis_since_midnight()
# ------------------------------------------------------------
def _apply_inv_mask(arr1, arr2, fillval=None, arr1_inv_mask=None, arr2_inv_mask=None):
"""Preserve NaN date and time values in the final result of date/time class operations.
Called by time fraction properties and math operations.
"""
if isinstance(arr1, np.ndarray):
if len(arr1) == 1:
# broadcast array of 1 path
if arr1[0] <= 0:
return TypeRegister.FastArray([INVALID_DICT[arr2.dtype.num]])
return arr2
else:
if arr1_inv_mask is None:
arr1_inv_mask = arr1.isnan()
if fillval is None:
# use the sentinel or nan for the return array type, e.g. year() returns int32
fillval = INVALID_DICT[arr2.dtype.num]
putmask(arr2, arr1_inv_mask, fillval)
# apply the invalid mask from an operation with another array
if arr2_inv_mask is not None:
# return invalid fill, fixes broadcasting if math operations
# was with a scalar or single item array
if np.isscalar(arr2_inv_mask):
if arr2_inv_mask:
arr2[:] = fillval
elif len(arr2_inv_mask) == 1:
if arr2_inv_mask[0]:
arr2[:] = fillval
else:
putmask(arr2, arr2_inv_mask, fillval)
return arr2
else:
# scalar path
if arr1 <= 0:
return INVALID_DICT[arr2.dtype.num]
return arr2
# ========================================================
class DateTimeBase(FastArray):
"""Base class for DateTimeNano and TimeSpan.
Both of these subclasses have times with nanosecond precision.
"""
DEFAULT_FORMATTER = time.strftime
PRECISION = 9
NAN_TIME = 0
# ------------------------------------------------------------
def __new__(cls, values):
instance = np.asarray(values).view(cls)
instance._display_length = DisplayLength.Long
return instance
# ------------------------------------------------------------
@property
def _fa(self):
return self.view(FastArray)
# ------------------------------------------------------------
@property
def display_length(self):
if not hasattr(self, '_display_length'):
self._display_length = DisplayLength.Long
return self._display_length
# ------------------------------------------------------------
def get_classname(self):
return __class__.__name__
# ------------------------------------------------------------
def display_item(self, utcnano):
raise NotImplementedError(f"DateTimeBase subclasses need to override this method.")
# ------------------------------------------------------------
def _meta_dict(self, name=None):
raise NotImplementedError(f"DateTimeBase subclasses need to override this method.")
# ------------------------------------------------------------
def _as_meta_data(self, name=None):
# ** TODO: Date, DateSpan, DateTimeNano, TimeSpan all have very similar
# versions of this routine - collapse into one
if name is None:
name = self.get_name()
meta = MetaData(self._meta_dict(name))
return {meta['name']: self._fa}, [SDSFlag.OriginalContainer + SDSFlag.Stackable], meta.string
# ------------------------------------------------------------
def _build_sds_meta_data(self, name, **kwargs):
"""Build meta data for DateTimeNano
"""
meta = MetaData(self._meta_dict(name=name))
# for now there's only one array in this FastArray subclass
cols = []
tups = []
return meta, cols, tups
# ------------------------------------------------------------
def _build_string(self):
def qwrap(timestring):
return "".join(["'", timestring, "'"])
_slicesize = int(np.floor(DateTimeBase.MAX_DISPLAY_LEN / 2))
_asize = len(self)
# DFUNC = self.display_item
fmt, DFUNC = self.display_query_properties()
# print with break
if _asize > DateTimeBase.MAX_DISPLAY_LEN:
left_idx = self.view(FastArray)[:_slicesize]
right_idx = self.view(FastArray)[-_slicesize:]
left_strings = [qwrap(DFUNC(i, fmt)) for i in left_idx]
break_string = ["..."]
right_strings = [qwrap(DFUNC(i, fmt)) for i in right_idx]
all_strings = left_strings + break_string + right_strings
# print full
else:
all_strings = [qwrap(DFUNC(i, fmt)) for i in self]
result = ", ".join(all_strings)
return result
# ------------------------------------------------------------
@staticmethod
def _add_nano_ext(utcnano, timestr):
precision = DateTimeBase.PRECISION
if precision > 0:
if precision > 9:
precision = 9
power = 10 ** precision
nanos = int(utcnano % power)
nanostr = str(nanos).zfill(precision)
timestr = timestr + "." + nanostr
return timestr
# ------------------------------------------------------------
def __str__(self):
return self._build_string()
# ------------------------------------------------------------
def __repr__(self):
return self.get_classname() + "([" + self._build_string() + "])"
# ------------------------------------------------------------
def __getitem__(self, fld):
result = self._fa.__getitem__(fld)
if isinstance(result, FastArray):
# possible fix for strides bug
# if result.strides[0] != result.itemsize:
# result = result.copy()
result = self.newclassfrominstance(result, self)
if np.isscalar(result):
return self.get_scalar(result)
return result
# -------------------------------------------------------------
def _math_error_string(self, value, operator, reverse=False):
if reverse:
a = value
b = self
else:
a = self
b = value
return f"unsupported operand type(s) for {operator}: {type(a).__name__} {type(b).__name__}"
# ------------------------------------------------------------
def _funnel_mathops(self, funcname, value):
'''
Wrapper for all math operations on Date and DateSpan.
Both subclasses need to take over:
_check_mathops_nano()
_check_mathops()
maybe... still testing
_build_mathops_result()
Easier to catch forbidden operations here.
'''
# if funcname in self.forbidden_mathops:
# raise TypeError(f'Cannot perform {funcname} on {self.__class__.__name__} object.')
inv_mask = self.isnan()
other_inv_mask = None
return_type = None
caller = self._fa
# check if operand has nano precision, set invalid, return type accordingly
value, other_inv_mask, return_type, caller = self._check_mathops_base(funcname, value, other_inv_mask, return_type, caller)
# perform main math operation on fast array
func = getattr(caller, funcname)
result = func(value)
# set return type, preserve invalids for non-nano operands
if return_type is None:
return_type, other_inv_mask = self._check_mathops(funcname, value)
# if return type is still None, returning invalid fill
if return_type is None:
return other_inv_mask
# apply invalid mask(s) and wrap result in final return type
result = self._build_mathops_result(value, result, inv_mask, other_inv_mask, return_type)
return result
# ------------------------------------------------------------
def copy(self, order='K'):
instance = self._fa.copy(order=order)
return self.newclassfrominstance(instance, self)
# ========================================================
class TimeStampBase():
"""Parent class for DateTimeNano and Date.
"""
def __init__(self):
pass
# ------------------------------------------------------------
def _year(self, arr, fix_dst=False):
"""
Parameters
----------
arr : array
Underlying FastArray or result of previous timezone fixup.
fix_dst : bool, default False
If True, adjust array's stored times to match display. (DateTimeNano only)
Returns
-------
int32 FastArray of the year. For example 2003 is the integer 2003.
"""
if fix_dst:
arr = self._timezone.fix_dst(arr)
result = self._year_splits.searchsorted(arr, side='right').astype(np.int32, copy=False) + 1969
return result
# ------------------------------------------------------------
def _month(self, arr=None, fix_dst=False):
'''
Internal year to avoid performing the daylight savings fixup multiple times.
'''
if arr is None:
if fix_dst:
arr = self._timezone.fix_dst(self._fa)
fix_dst = False
else:
arr = self._fa
year = self._year(arr, fix_dst=fix_dst)
startyear = arr - self._year_splits[year - 1970]
maskleap = (year % 4) == 0
# get the months for non-leaps
smonth = self._yearday_splits.searchsorted(startyear, side='right')
# get the months for leap and fix any leapyears with maskleap
putmask(smonth, maskleap, self._yearday_splits_leap.searchsorted(startyear, side='right'))
return smonth.astype(np.int32, copy=False).view(FastArray)
# ------------------------------------------------------------
def _preserve_invalid_comparison(self, caller, other, funcname):
"""Date and DateTimeNano have multiple values for nan (0 and integer sentinel).
Both of their compare checks need to preserve nans in the result the same way.
"""
func = getattr(caller, funcname)
result = func(other)
if funcname == '__ne__':
result += self.isnan()
else:
result *= self.isnotnan()
return result
# ========================================================
class DateBase(FastArray):
"""Parent class for Date and Datespan.
Both of these subclasses have times with day precision.
"""
# NAN_DATE = INVALID_DICT[np.dtype(np.int32).num]
NAN_DATE = 0
# ------------------------------------------------------------
def __new__(cls, arr, **kwargs):
return arr.view(cls)
# ------------------------------------------------------------
def __init__(cls, arr, **kwargs):
pass
# ------------------------------------------------------------
@property
def _fa(self):
return self.view(FastArray)
# ------------------------------------------------------------
def __str__(self):
return self._build_string()
# ------------------------------------------------------------
def __repr__(self):
return self.get_classname() + "([" + self._build_string() + "])"
# ------------------------------------------------------------
def strftime(self, format, dtype='O'):
'''
Converts DateTimeNano to an array of object strings or a scalar string.
This routine has not been sped up yet.
Other Parameters
----------------
dtype: defaults to 'O', can change to 'S' or 'U'
Examples
--------
>>> rt.Date(rt.utcnow(4)).strftime('%D')
array(['11/04/19', '11/04/19', '11/04/19', '11/04/19'], dtype=object)
See Also
---------
http://strftime.org for format strings
datetime.datetime.strftime
'''
if isinstance(self, np.ndarray):
return np.asarray([dt.utcfromtimestamp(timestamp).strftime(format) for timestamp in self._fa* SECONDS_PER_DAY], dtype=dtype)
else:
return dt.strftime(dt.utcfromtimestamp(self * SECONDS_PER_DAY), format)
# ------------------------------------------------------------
@property
def display_length(self):
if not hasattr(self, '_display_length'):
self._display_length = DisplayLength.Long
return self._display_length
# # TODO uncomment when starfish is implemented and imported
# def _sf_display_query_properties(self):
# itemformat = sf.ItemFormat({'length':self.display_length,
# 'align':sf.DisplayAlign.Right})
# return itemformat, self.display_convert_func
# ------------------------------------------------------------
def display_query_properties(self):
# if TypeRegister.DisplayOptions.STARFISH:
# return self._sf_display_query_properties()
'''
Each instance knows how to format its time strings. The formatter is specified in TIME_FORMATS
The length property of item_format stores the index into TIME_FORMATS for the display_convert_func
'''
item_format = ItemFormat(
length=self.display_length,
justification=DisplayJustification.Right,
can_have_spaces=True,
decoration=None,
)
convert_func = self.display_convert_func
return item_format, convert_func
# ------------------------------------------------------------
def _build_string(self):
_slicesize = int(np.floor(DateTimeBase.MAX_DISPLAY_LEN / 2))
_asize = len(self)
fmt, DFUNC = self.display_query_properties()
# print with break
if _asize > DateTimeBase.MAX_DISPLAY_LEN:
left_idx = self.view(FastArray)[:_slicesize]
right_idx = self.view(FastArray)[-_slicesize:]
left_strings = [f"'{DFUNC(i, fmt)}'" for i in left_idx]
break_string = ["..."]
right_strings = [f"'{DFUNC(i, fmt)}'" for i in right_idx]
all_strings = left_strings + break_string + right_strings
# print full
else:
all_strings = [f"'{DFUNC(i, fmt)}'" for i in self]
result = ", ".join(all_strings)
return result
def __getitem__(self, fld):
'''
Restore the Date/DateSpan class after the indexing operation.
'''
result = self._fa[fld]
if isinstance(result, np.ndarray):
# possible fix for strides bug
# if result.strides[0] != result.itemsize:
# result = result.copy()
return self.newclassfrominstance(result, self)
if np.isscalar(result):
return self.get_scalar(result)
return result
# ------------------------------------------------------------
def _funnel_mathops(self, funcname, value):
'''
Wrapper for all math operations on Date and DateSpan.
Both subclasses need to take over:
_check_mathops_nano()
_check_mathops()
maybe... still testing
_build_mathops_result()
Easier to catch forbidden operations here.
'''
if funcname in self.forbidden_mathops:
raise TypeError(f'Cannot perform {funcname} on {self.__class__.__name__} object.')
inv_mask = self.isnan()
other_inv_mask = None
return_type = None
caller = self._fa
# check if operand has nano precision, set invalid, return type accordingly
value, other_inv_mask, return_type, caller = self._check_mathops_nano(funcname, value, other_inv_mask, return_type, caller)
# perform main math operation on fast array
func = getattr(caller, funcname)
result = func(value)
# set return type, preserve invalids for non-nano operands
if return_type is None:
return_type, other_inv_mask = self._check_mathops(funcname, value)
# if return type is still None, returning invalid fill
if return_type is None:
return other_inv_mask
# apply invalid mask(s) and wrap result in final return type
result = self._build_mathops_result(value, result, inv_mask, other_inv_mask, return_type)
return result
# ------------------------------------------------------------
def _build_mathops_result(self, value, result, inv_mask, other_inv_mask, return_type):
# restore invalid for Date and other operand if necessary
# print('**DateBase._build_mathops_result')
# print('value',value)
# print('result',result)
# print('inv_mask',inv_mask)
# print('other_inv_mask',other_inv_mask)
# print('return type',return_type)
result = _apply_inv_mask(self, result, fillval=self.NAN_DATE, arr1_inv_mask=inv_mask, arr2_inv_mask=other_inv_mask)
if not isinstance(result, return_type):
if return_type == DateTimeNano:
try:
# base on original to_tz
# use a try, because this may be hit by TimeSpan operand (no timezone)
to_tz = value._timezone._to_tz
except:
to_tz = 'GMT'
result = DateTimeNano(result, from_tz='GMT', to_tz=to_tz)
else:
result = return_type(result)
return result
# -------------------------------------------------------------
def min(self, **kwargs):
'''
Earliest date / datespan in array.
Returns
-------
obj:`Date`
An array with length 1.
Note
----
This returns an array, not a scalar. However, broadcasting rules will apply to operations with it.
'''
return self.__class__([self._fa.min()])
# -------------------------------------------------------------
def max(self, **kwargs):
'''
Latest date / datespan in array.
Returns
-------
obj:`Date`
An array with length 1.
Note
----
This returns an array, not a scalar. However, broadcasting rules will apply to operations with it.
'''
return self.__class__([self._fa.max()])
def _meta_dict(self, name=None):
classname = self.__class__.__name__
if name is None:
name = classname
metadict = {
'name': name,
'typeid': getattr(TypeId, classname),
'classname': classname,
'ncols': 0,
'version': self.MetaVersion,
'author': 'python',
'instance_vars': {
'_display_length': self.display_length,
},
'_base_is_stackable': SDSFlag.Stackable
}
return metadict
# ------------------------------------------------------------
def _as_meta_data(self, name=None):
if name is None:
name = self.get_name()
meta = MetaData(self._meta_dict(name=name))
return {meta['name']: self._fa}, [SDSFlag.OriginalContainer + SDSFlag.Stackable], meta.string
# ------------------------------------------------------------
def _build_sds_meta_data(self, name):
meta = MetaData(self._meta_dict(name=name))
cols = []
tups = []
return meta, cols, tups
# ------------------------------------------------------------
@classmethod
def _from_meta_data(cls, arrdict, arrflags, meta):
meta = MetaData(meta)
instance = cls([*arrdict.values()][0])
# combine loaded meta variables with class defaults
vars = meta['instance_vars']
for k, v in cls.MetaDefault.items():
vars.setdefault(k, v)
for k, v in vars.items():
setattr(instance, k, v)
return instance
# ------------------------------------------------------------
def copy(self, order='K'):
instance = self._fa.copy(order=order)
return self.newclassfrominstance(instance, self)
# ------------------------------------------------------------
@classmethod
def newclassfrominstance(cls, instance, origin):
result = instance.view(cls)
result._display_length = origin.display_length
return result
# ========================================================
class Date(DateBase, TimeStampBase):
'''
Date arrays have an underlying int32 array. The array values are number of days since January 1st. 1970.
Can be initialized from integer date values, strings, or matlab ordinal dates.
Parameters:
-----------
arr : array, categorical, list, or scalar
from_matlab : indicates that values are from matlab datenum
format : if initialized with string, specify a format string for strptime to parse date information
otherwise, will assume format is YYYYMMDD
Examples
---------
From strings:
>>> datestrings = tile(np.array(['2018-02-01', '2018-03-01', '2018-04-01']), 3)
>>> Date(datestrings)
Date([2018-02-01, 2018-03-01, 2018-04-01, 2018-02-01, 2018-03-01, 2018-04-01, 2018-02-01, 2018-03-01, 2018-04-01])
From riptable.Categorical (sometimes Matlab data comes in this way):
>>> c = Categorical(datestrings)
>>> c
Categorical([2018-02-01, 2018-03-01, 2018-04-01, 2018-02-01, 2018-03-01, 2018-04-01, 2018-02-01, 2018-03-01, 2018-04-01]) Length: 9
FastArray([1, 2, 3, 1, 2, 3, 1, 2, 3], dtype=int8) Base Index: 1
FastArray(['2018-02-01', '2018-03-01', '2018-04-01'], dtype='<U10') Unique count: 3
>>> d = Date(c)
>>> d
Date([2018-02-01, 2018-03-01, 2018-04-01, 2018-02-01, 2018-03-01, 2018-04-01, 2018-02-01, 2018-03-01, 2018-04-01])
From Matlab datenum:
>>> d = FA([737061.0, 737062.0, 737063.0, 737064.0, 737065.0])
>>> Date(dates, from_matlab=True)
Date([2018-01-01, 2018-01-02, 2018-01-03, 2018-01-04, 2018-01-05])
From riptable DateTimeNano:
>>> dtn = DateTimeNano.random(5)
>>> dtn
DateTimeNano([20150318 13:28:01.853344227, 20150814 17:34:43.991344669, 19761204 04:30:52.680683459, 20120524 06:44:13.482424912, 19830803 17:12:54.771824294])
>>> Date(dtn)
Date([2015-03-18, 2015-08-14, 1976-12-04, 2012-05-24, 1983-08-03])
'''
# for .SDS file format
MetaVersion = 1
MetaDefault = {
# vars for container loader
'name': 'Date',
'typeid': TypeId.Date,
'version': 0, # if no version, assume before versions implemented
'instance_vars': {
'_display_length': DisplayLength.Long
}
}
forbidden_mathops = ('__mul__', '__imul__')
def __new__(cls, arr, from_matlab=False, format=None):
instance = None
if isinstance(arr, list) or np.isscalar(arr):
arr = FastArray(arr)
if isinstance(arr, np.ndarray):
# if this the same class, do nothing
if not isinstance(arr, Date):
# sometimes matlab dates are categoricals
if isinstance(arr, TypeRegister.Categorical):
try:
cats = arr.category_array
# flip to correct integer before re-expanding
if cats.dtype.char in ('U', 'S'):
cats = cls._convert_datestring(cats).astype(np.int32, copy=False)
arr = TypeRegister.Categorical(arr._fa, cats)
arr = arr.expand_array
except:
raise TypeError(f'Could not re-expand categorical to array in mode {arr.category_mode.name}')
# fix datetimenano so the days match display (account for previous daylight savings fixup)
elif isinstance(arr, TypeRegister.DateTimeNano):
# there is a bug here -- do not think a timezone fixup is nec
# arr = arr._timezone.fix_dst(arr._fa, arr._timezone._dst_cutoffs)
arr = arr._fa // NANOS_PER_DAY
# flip strings to days from 1970
if arr.dtype.char in ('U', 'S'):
arr = cls._convert_datestring(arr, format=format)
# flip matlab ordinal dates to days from 1970
if from_matlab:
arr = cls._convert_matlab_days(arr)
elif arr.dtype.char in NumpyCharTypes.AllInteger + NumpyCharTypes.AllFloat:
arr = arr.astype(np.int32, copy=False)
else:
raise TypeError(f'Could not initialize Date object with array of type {arr.dtype}.')
else:
raise TypeError(f'Date objects must be initialized with numeric or string arrays, lists or scalars. Got {type(arr)}')
instance = arr.view(cls)
instance._display_length = DisplayLength.Long
return instance
# ------------------------------------------------------------
def __init__(self, arr, from_matlab=False, format=None):
pass
# ------------------------------------------------------------
def get_scalar(self, scalarval):
return DateScalar(scalarval, _from=self)
# -------------------------------------------------------
def diff(self, periods=1):
'''
Returns
-------
DateSpan
'''
result = self._fa.diff(periods=periods)
return DateSpan(result)
# ------------------------------------------------------------
@classmethod
def _convert_datestring(cls, arr, format=None):
'''
For construction from array of strings or categorical.
'''
if format is None:
arr = datestring_to_nano(arr, from_tz='UTC')._fa // NANOS_PER_DAY
# default assumes YYYYMMDD
else:
arr = strptime_to_nano(arr, format, from_tz='UTC')._fa // NANOS_PER_DAY
return arr
# ------------------------------------------------------------
@classmethod
def _convert_matlab_days(cls, arr):
'''
TODO: move this to a more generic superclass - almost exactly the same as DateTimeNano._convert_matlab_days
Parameters:
-----------
arr : array of matlab datenums (1 is 1-Jan-0000)
timezone : TimeZone object from DateTimeNano constructor
Converts matlab datenums to an array of int64 containing utc nanoseconds.
'''
inv_mask = isnan(arr)
# matlab dates come in as float
arr = FastArray(arr, dtype=np.int32)
arr = arr - MATLAB_EPOCH_DATENUM
putmask(arr, inv_mask, cls.NAN_DATE)
return arr
# ------------------------------------------------------------
def get_classname(self):
return __class__.__name__
# ------------------------------------------------------------
@staticmethod
def display_convert_func(date_num, itemformat: ItemFormat):
# TODO: apply ItemFormat options that were passed in
return Date.format_date_num(date_num, itemformat)
# ------------------------------------------------------------
@staticmethod
def format_date_num(date_num, itemformat):
if date_num == DateBase.NAN_DATE or date_num == INVALID_DICT[np.dtype(np.int32).num]:
return 'Inv'
format_str = Date._parse_item_format(itemformat)
localzone = tz.gettz('GMT')
try:
timestr = dt.fromtimestamp((date_num * SECONDS_PER_DAY), timezone.utc)
timestr = timestr.astimezone(localzone)
timestr = timestr.strftime(format_str)
except:
raise ValueError(f'Date number {date_num} is not a valid value for Date() object.')
return timestr
# ------------------------------------------------------------
@staticmethod
def _parse_item_format(itemformat):
return '%Y-%m-%d'
# ------------------------------------------------------------
def fill_invalid(self, shape=None, dtype=None, inplace=True):
arr = self._fill_invalid_internal(shape=shape, dtype=self.dtype, fill_val=self.NAN_DATE, inplace=inplace)
if arr is None:
return
return Date(arr)
# ------------------------------------------------------------
def isnan(self):
"""Both NaN date (0) and integer sentinel value are considered NaN.
"""
return self._fa.isnanorzero()
# ------------------------------------------------------------
def isnotnan(self):
"""Both NaN date (0) and integer sentinel value are considered NaN.
"""
return ~self.isnan()
# ------------------------------------------------------------
@property
def yyyymmdd(self):
return DateTimeNano(self._fa * NANOS_PER_DAY, from_tz='GMT', to_tz='GMT').yyyymmdd
# ------------------------------------------------------------
@property
def _year_splits(self):
"""Midnght on Jan. 1st from 1970 - 1940 in utc nanoseconds."""
return UTC_1970_DAY_SPLITS
# ------------------------------------------------------------
@property
def _yearday_splits(self):
"""Midnight on the 1st of the month in dayssince the beginning of the year."""
return YDAY_SPLITS
# ------------------------------------------------------------
@property
def _yearday_splits_leap(self):
"""Midnight on the 1st of the month in days since the beginning of the year during a leap year."""
return YDAY_SPLITS_LEAP
# ------------------------------------------------------------
@property
def year(self):
'''
Returns integer array of year value
Currently limited to 1970 - 2040, add to UTC_1970_DAY_SPLITS table to expand range.
>>> d = Date(['2016-02-01', '2017-02-01', '2018-02-01'])
>>> d.year
FastArray([2016, 2017, 2018])
'''
year = self._year(self._fa, fix_dst=False)
return _apply_inv_mask(self, year)
# ------------------------------------------------------------
@property
def month(self, arr=None):
'''
Returns zero-based integer array of month value 1=Jan, 2=Feb, etc.
>>> d = Date(['2000-02-29', '2018-12-25'])
>>> d.month
FastArray([ 2, 12])
'''
return _apply_inv_mask(self, self._month())
# ------------------------------------------------------------
@property
def monthyear(self, arr=None):
'''
Returns a string with three letter month + 4 digit year
Examples
--------
>>> d = Date(['2000-02-29', '2018-12-25'])
>>> d.monthyear
FastArray([ 'Feb2000','Dec2018'])
'''
month = self.month
yearstr = self.year.astype('S')
return MONTH_STR_ARRAY[month - 1] + yearstr
# ------------------------------------------------------------
@property
def is_leapyear(self):
'''
Returns boolean array, True when date was during a leap year.
>>> d = Date(['2000-01-01', '2001-01-01'])
>>> d.is_leapyear
FastArray([ True, False])
'''
year = self._year(self._fa, fix_dst=False)
arr = self._fa - self._year_splits[year - 1970]
maskleap = year % 4 == 0
return maskleap
# ------------------------------------------------------------
@property
def day_of_year(self):
'''
Returns one-based integer array where January 1st = 1
>>> d = Date(['2019-01-01', '2019-02-14'])
>>> d.day_of_year
FastArray([ 1, 45])
'''
year = self._year(self._fa, fix_dst=False)
arr = self._fa - self._year_splits[year - 1970]
arr += 1
return _apply_inv_mask(self, arr)
# ------------------------------------------------------------
@property
def day_of_month(self):
year = self._year(self._fa, fix_dst=False)
# subtract the days from start of year so all times are in MM-DD, etc.
startyear = self._fa - self._year_splits[year - 1970]
# treat the whole array like a non-leapyear
startmonth_idx = self._yearday_splits.searchsorted(startyear, side='right') - 1
monthtime = startyear - self._yearday_splits[startmonth_idx]
# fix up the leapyears with a different yearday split table
leapmask = (year % 4) == 0
startmonth_idx_leap = self._yearday_splits_leap.searchsorted(startyear[leapmask], side='right') - 1
monthtime[leapmask] = startyear[leapmask] - self._yearday_splits_leap[startmonth_idx_leap]
# unlike month, weekday, hour, etc. day of month starts at 1
monthday = monthtime + 1
return _apply_inv_mask(self, monthday)
# ------------------------------------------------------------
@property
def day_of_week(self):
'''
Returns array of integers from Monday (0) -> Sunday (6)
>>> d = Date(['2019-02-11', '2019-02-12', '2019-02-13', '2019-02-14', '2019-02-15', '2019-02-16', '2019-02-17'])
>>> d.day_of_week
FastArray([0, 1, 2, 3, 4, 5, 6])
'''
arr = (self._fa + EPOCH_DAY_OF_WEEK) % 7
return _apply_inv_mask(self, arr)
# ------------------------------------------------------------
@property
def is_weekend(self):
'''
Returns boolean array, True when day of week is Saturday or Sunday
>>> d = Date(['2019-02-11', '2019-02-12', '2019-02-13', '2019-02-14', '2019-02-15', '2019-02-16', '2019-02-17'])
>>> d.is_weekend
FastArray([False, False, False, False, False, True, True])
'''
return _apply_inv_mask(self, self.day_of_week > 4)
# ------------------------------------------------------------
@property
def is_weekday(self):
'''
Returns boolean array, True when day of week is Monday-Friday
>>> d = Date(['2019-02-11', '2019-02-12', '2019-02-13', '2019-02-14', '2019-02-15', '2019-02-16', '2019-02-17'])
>>> d.is_weekday
FastArray([ True, True, True, True, True, False, False])
'''
return _apply_inv_mask(self, self.day_of_week < 5)
# ------------------------------------------------------------
@property
def seconds_since_epoch(self):
'''
Many existing python datetime routines expect seconds since epoch.
This call is to eliminate "magic numbers" like 3600 from code.
'''
return _apply_inv_mask(self, self._fa * SECONDS_PER_DAY)
# ------------------------------------------------------------
@classmethod
def hstack(cls, dates):
'''
hstacks Date objects and returns a new Date object.
Will be called by riptable.hstack() if the first item in the sequence is a Date object.
Parameters:
-----------
dates : list or tuple of Date objects
>>> d1 = Date('2015-02-01')
>>> d2 = Date(['2016-02-01', '2017-02-01', '2018-02-01'])
>>> hstack([d1, d2])
Date([2015-02-01, 2016-02-01, 2017-02-01, 2018-02-01])
'''
# pass the subclass to the parent class routine
return hstack_any(dates, cls, Date)
# ------------------------------------------------------------
@classmethod
def range(cls, start, end=None, days=None, step=1, format=None, closed=None):
"""
Returns a Date object of dates from start date to end date.
Parameters
----------
start : str or int
Start date in int format YYYYMMDD, or string in ``format``.
end : str or int, optional
Start date in int format YYYYMMDD, or string in ``format``.
If not specified, days is required.
days : int, optional (required if ``end`` is None)
Number of days to generate.
step : int, optional, default 1
Spacing between date values.
format : str, optional
Format to convert start/end values if they are string
closed : `left`, `right`, or None (default)
If `left`, omit the end date.
If `right`, omit the start date.
If None, include both.
Only applies when constructing from start, end date with step of 1.
Examples
--------
>>> Date.range('2019-02-01', '2019-02-07')
Date([2019-02-01, 2019-02-02, 2019-02-03, 2019-02-04, 2019-02-05, 2019-02-06, 2019-02-07])
>>> Date.range('2019-02-01', '2019-02-07', step=2)
Date([2019-02-01, 2019-02-03, 2019-02-05])
>>> Date.range('2019-02-01', '2019-02-07', closed='right')
Date([2019-02-02, 2019-02-03, 2019-02-04, 2019-02-05, 2019-02-06, 2019-02-07])
Returns
-------
`Date`
Range of dates in given interval spaced by `step`.
"""
if isinstance(start, (int, np.integer)):
start = str(start)
# convert separately for more accurate error
if isinstance(start, (str, bytes)):
start = cls(start, format=format)._fa[0]
else:
raise TypeError(f'Start date must be string or integer. Got {type(start)}')
if end is None:
if days is None:
raise ValueError(f'Must set either ``end`` or ``days`` keyword.')
# compensate for step
end = start + (days * step)
end = cls(end)._fa[0]
else:
if isinstance(end, (int, np.integer)):
end = str(end)
if not isinstance(end, (str, bytes)):
raise TypeError(f'End date must be string or integer. Got {type(start)}')
end = cls(end, format=format)._fa[0]
if days is None and step == 1:
# include one or both ends
if closed is None:
end += 1
elif closed == 'right':
end += 1
start += 1
elif closed == 'left':
pass
else:
raise ValueError(f'Closed has to be either "left", "right" or None. Got {closed}')
arr = arange(start, end, step, dtype=np.int32)
return cls(arr)
# ------------------------------------------------------------
def _date_compare_check(self, funcname, other):
'''
Funnel for all comparison operations.
Helps Date interact with DateTimeNano, TimeSpan.
'''
caller = self._fa
if isinstance(other, (DateSpan, TimeSpan, DateSpanScalar, TimeSpanScalar)):
raise TypeError(f'Cannot perform {funcname} comparison operation between {type(self)} and {type(other)}.')
elif isinstance(other, DateTimeNano):
caller = self._fa * NANOS_PER_DAY
to_tz = other._timezone._to_tz
# fix the timezone to match the display of the DateTimeNano
caller = DateTimeNano(self._fa * NANOS_PER_DAY, from_tz=to_tz, to_tz=to_tz)
# looks weird now, saving explicit branchesfor if any forbidden types appear
elif isinstance(other, Date):
other = other._fa
elif isinstance(other, (str, bytes)):
other = Date(other)
# Categorical will fall through to constructor too
elif isinstance(other, np.ndarray):
other = Date(other)
# let everything else fall through for FastArray to catch
# restore invalids
return self._preserve_invalid_comparison(caller, other, funcname)
# -------------------COMPARISONS------------------------------
# ------------------------------------------------------------
def __ne__(self, other):
return self._date_compare_check('__ne__', other)
def __eq__(self, other):
return self._date_compare_check('__eq__', other)
def __ge__(self, other):
return self._date_compare_check('__ge__', other)
def __gt__(self, other):
return self._date_compare_check('__gt__', other)
def __le__(self, other):
return self._date_compare_check('__le__', other)
def __lt__(self, other):
return self._date_compare_check('__lt__', other)
# ------------------------------------------------------------
def __add__(self, value):
'''
Addition rules:
------------------
Date + Date = TypeError
Date + DateTimeNano = TypeError
Date + DateSpan = Date
Date + TimeSpan = DateTimeNano
All other operands will be treated as DateSpan and return Date.
'''
return self._funnel_mathops('__add__', value)
def __iadd__(self, value):
return self._funnel_mathops('__iadd__', value)
def __radd__(self, value):
return self._funnel_mathops('__add__', value)
# ------------------------------------------------------------
def __sub__(self, value):
'''
Subtraction rules:
------------------
Date - Date = DateSpan
Date - DateSpan = Date
Date - DateTimeNano = TimeSpan
Date - TimeSpan = DateTimeNano
All other operands will be treated as DateSpan and return Date.
'''
if isinstance(value, Date):
func = TypeRegister.MathLedger._BASICMATH_TWO_INPUTS
# need routine for int32 - int32 => int32 (operands have 0 as invalid, result has sentinel as invalid)
# right now, using the double return, gets recasted in the constructor
op = MATH_OPERATION.SUBDATETIMES
functup = (self, value)
result = func(functup, op, 0)
return DateSpan(result)
elif isinstance(value, DateTimeNano):
caller = DateTimeNano(self._fa * NANOS_PER_DAY, from_tz=value._timezone._from_tz)
return caller - value
else:
return self._funnel_mathops('__sub__', value)
def __isub__(self, value):
return self._funnel_mathops('__isub__', value)
def __rsub__(self, value):
if isinstance(value, (Date, DateTimeNano)):
return value.__sub__(self)
else:
raise NotImplementedError
def __mul__(self, other): raise NotImplementedError
def __matmul__(self, other): raise NotImplementedError
# need to check properties to see if division is happening
#def __truediv__(self, other): raise NotImplementedError
#def __floordiv__(self, other): raise NotImplementedError
#def __mod__(self, other): raise NotImplementedError
#def __divmod__(self, other): raise NotImplementedError
def __pow__(self, other, modulo=None): raise NotImplementedError
def __lshift__(self, other): raise NotImplementedError
def __rshift__(self, other): raise NotImplementedError
def __and__(self, other): raise NotImplementedError
def __xor__(self, other): raise NotImplementedError
def __or__(self, other): raise NotImplementedError
def __rmul__(self, other): raise NotImplementedError
def __rmatmul__(self, other): raise NotImplementedError
def __rtruediv__(self, other): raise NotImplementedError
def __rfloordiv__(self, other): raise NotImplementedError
def __rmod__(self, other): raise NotImplementedError
def __rdivmod__(self, other): raise NotImplementedError
def __rpow__(self, other): raise NotImplementedError
def __rlshift__(self, other): raise NotImplementedError
def __rrshift__(self, other): raise NotImplementedError
def __rand__(self, other): raise NotImplementedError
def __rxor__(self, other): raise NotImplementedError
def __ror__(self, other): raise NotImplementedError
def __imul__(self, other): raise NotImplementedError
def __imatmul__(self, other): raise NotImplementedError
def __itruediv__(self, other): raise NotImplementedError
def __ifloordiv__(self, other): raise NotImplementedError
def __imod__(self, other): raise NotImplementedError
def __ipow__(self, other, modulo=None): raise NotImplementedError
def __ilshift__(self, other): raise NotImplementedError
def __irshift__(self, other): raise NotImplementedError
def __iand__(self, other): raise NotImplementedError
def __ixor__(self, other): raise NotImplementedError
def __ior__(self, other): raise NotImplementedError
def __neg__(self): raise NotImplementedError
def __pos__(self): raise NotImplementedError
def __abs__(self): raise NotImplementedError
def __invert__(self): raise NotImplementedError
def __complex__(self): raise NotImplementedError
def __int__(self): raise NotImplementedError
def __float__(self): raise NotImplementedError
def __round__(self, ndigits=0): raise NotImplementedError
def __trunc__(self): raise NotImplementedError
def __floor__(self): raise NotImplementedError
def __ceil__(self): raise NotImplementedError
# ------------------------------------------------------------
def _check_mathops(self, funcname, value):
'''
This gets called after a math operation has been performed on the Date's FastArray.
Return type may differ based on operation. Preserves invalids from original input.
Parameters:
-----------
funcname : name of ufunc
value : original operand in math operation
returns return_type, other_inv_mask
'''
# for now, make Date the default return type
return_type = Date
other_inv_mask = None
if isinstance(value, Date):
if funcname in ('__add__', '__iadd__', '__isub__'):
raise TypeError(f'Cannot {funcname} operation between Date and Date')
return_type = DateSpan
other_inv_mask = value.isnan()
# invalid gets early exit
elif isinstance(value, (int, float, np.number)):
# return same length Date full of NAN_DATE
if isnan(value):
# other_inv_mask will hold the final return
return_type = None
other_inv_mask = Date(self.copy_invalid())
elif isinstance(value, np.ndarray):
other_inv_mask = isnan(value)
return return_type, other_inv_mask
# ------------------------------------------------------------
def _check_mathops_nano(self, funcname, value, other_inv_mask, return_type, caller):
'''
Operations with TimeSpan and DateTimeNano will flip to nano precision, or raise an error.
Parameters:
-----------
funcname : name of ufunc
value : original operand in math operation
other_inv_mask : None, might be set in this routine
return_type : None, might be set to TimeSpan or DateTimeNano
caller : FastArray view of Date object.
'''
if isinstance(value, TimeSpan):
return_type = DateTimeNano
other_inv_mask = value.isnan()
caller = self._fa * NANOS_PER_DAY
value = value._fa.astype(np.int64)
elif isinstance(value, DateTimeNano):
if funcname in ('__add__', '__iadd__', '__isub__'):
raise TypeError(f'Cannot perform addition between Date and DateTimeNano')
return_type = TimeSpan
other_inv_mask = value.isnan()
caller = self._fa * NANOS_PER_DAY
value = value._fa
return value, other_inv_mask, return_type, caller
# ------------------------------------------------------------
@classmethod
def _load_from_sds_meta_data(cls, name, arr, cols, meta):
'''
Restore Date class after loading from .sds file.
'''
# **** remove after implementing new metadata routine
if not isinstance(meta, MetaData):
meta = MetaData(meta)
arr = cls(arr)
# combine loaded meta variables with class defaults
vars = meta['instance_vars']
for k, v in cls.MetaDefault.items():
vars.setdefault(k, v)
for k, v in vars.items():
setattr(arr, k, v)
return arr
## ------------------------------------------------------------
@property
def start_of_month(self):
"""
Returns
-------
rt.Date array of first of self's month
"""
return self - self.day_of_month + 1
@property
def start_of_week(self):
"""
Returns
-------
rt.Date array of previous Monday
"""
return self - self.day_of_week
# ========================================================
class DateSpan(DateBase):
'''
DateSpan arrays have an underlying int32 array. The array values are in number of days.
These are created as the result of certain math operations on Date objects.
Parameters:
-----------
arr : numeric array, list, or scalar
unit : can set units to 'd' (day) or 'w' (week)
'''
# for .SDS file format
MetaVersion = 1
MetaDefault = {
# vars for container loader
'name': 'Date',
'typeid': TypeId.DateSpan,
'version': 0, # if no version, assume before versions implemented
'instance_vars': {
'_display_length': DisplayLength.Long
}
}
NAN_DATE = INVALID_DICT[7] # int32 sentinel
forbidden_mathops = ()
def __new__(cls, arr, unit=None):
instance = None
if isinstance(arr, list) or np.isscalar(arr):
arr = FastArray(arr, dtype=np.int32)
if isinstance(arr, np.ndarray):
if arr.dtype.char in NumpyCharTypes.AllInteger + NumpyCharTypes.AllFloat:
# is this unit really necessary?
if unit in ('W', 'w'):
arr = arr * 7
arr = arr.astype(np.int32, copy=False)
else:
raise TypeError(f'Could not initialize Date object with array of type {arr.dtype}.')
else:
raise TypeError(f'DateSpan objects must be initialized with numeric arrays, lists or scalars. Got {type(arr)}')
instance = arr.view(cls)
instance._display_length = DisplayLength.Long
return instance
# ------------------------------------------------------------
def __init__(self, arr, unit=None):
pass
# ------------------------------------------------------------
def get_classname(self):
return __class__.__name__
# ------------------------------------------------------------
def get_scalar(self, scalarval):
return DateSpanScalar(scalarval, _from=self)
# ------------------------------------------------------------
@staticmethod
def display_convert_func(date_num, itemformat: ItemFormat):
'''
Called by main rt_display() routine to format items in array correctly in Dataset display.
Also called by DateSpan's __str__() and __repr__().
'''
return DateSpan.format_date_span(date_num, itemformat)
# ------------------------------------------------------------
@staticmethod
def format_date_span(date_span, itemformat):
'''
Turn a single value in the DateSpan array into a string for display.
'''
if date_span == DateSpan.NAN_DATE:
return 'Inv'
if itemformat.length == DisplayLength.Short:
unit_str = 'd'
else:
if date_span == 1:
unit_str = ' day'
else:
unit_str = ' days'
# remove extra scalar wrapper
if isinstance(date_span, np.int32):
date_span = np.int32(date_span)
return str(date_span) + unit_str
# ------------------------------------------------------------
@property
def format_short(self): self._display_length = DisplayLength.Short
@property
def format_long(self): self._display_length = DisplayLength.Long
# ------------------------------------------------------------
@classmethod
def _load_from_sds_meta_data(cls, name, arr, cols, meta):
'''
Restore Date class after loading from .sds file.
'''
if not isinstance(meta, MetaData):
meta = MetaData(meta)
arr = cls(arr)
# combine loaded meta variables with class defaults
vars = meta['instance_vars']
for k, v in cls.MetaDefault.items():
vars.setdefault(k, v)
for k, v in vars.items():
setattr(arr, k, v)
return arr
# ------------------------------------------------------------
def fill_invalid(self, shape=None, dtype=None, inplace=True):
arr = self._fill_invalid_internal(shape=shape, dtype=self.dtype, inplace=inplace)
if arr is None:
return
return DateSpan(arr)
# ------------------------------------------------------------
@classmethod
def hstack(cls, dates):
'''
hstacks DateSpan objects and returns a new DateSpan object.
Will be called by riptable.hstack() if the first item in the sequence is a DateSpan object.
Parameters:
-----------
dates : list or tuple of DateSpan objects
>>> d1 = Date('2015-02-01')
>>> d2 = Date(['2016-02-01', '2017-02-01', '2018-02-01'])
>>> hstack([d1, d2])
Date([2015-02-01, 2016-02-01, 2017-02-01, 2018-02-01])
'''
# pass the subclass to the parent class routine
return hstack_any(dates, cls, DateSpan)
# ------------------------------------------------------------
def _check_mathops_nano(self, funcname, value, other_inv_mask, return_type, caller):
'''
Operations with TimeSpan and DateTimeNano will flip to nano precision, or raise an error.
Parameters:
-----------
funcname : name of ufunc
value : original operand in math operation
other_inv_mask : None, might be set in this routine
return_type : None, might be set to TimeSpan or DateTimeNano
caller : FastArray view of Date object.
'''
if isinstance(value, TimeSpan):
return_type = TimeSpan
other_inv_mask = value.isnan()
caller = self._fa * NANOS_PER_DAY
elif isinstance(value, DateTimeNano):
if funcname in ('__sub__', '__isub__'):
raise TypeError(f'Cannot perform {funcname} operation between DateSpan and DateTimeNano')
return_type = DateTimeNano
other_inv_mask = value.isnan()
caller = self._fa * NANOS_PER_DAY
value = value._fa
return value, other_inv_mask, return_type, caller
# ------------------------------------------------------------
def _check_mathops(self, funcname, value):
'''
This gets called after a math operation has been performed on the Date's FastArray.
Return type may differ based on operation. Preserves invalids from original input.
Parameters:
-----------
funcname : name of ufunc
value : original operand in math operation
returns return_type, other_inv_mask
'''
# for now, make Date the default return type
return_type = DateSpan
other_inv_mask = None
if isinstance(value, Date):
if funcname in ('__sub__', '__isub__'):
raise TypeError(f'Cannot perform {funcname} operation between DateSpan and Date')
return_type = Date
other_inv_mask = value.isnan()
# invalid gets early exit
elif isinstance(value, (int, float, np.number)):
# return same length Date full of NAN_DATE
if isnan(value) or value == self.NAN_DATE:
# other_inv_mask will hold the final return
return_type = None
other_inv_mask = DateSpan(self.copy_invalid())
elif isinstance(value, np.ndarray):
other_inv_mask = isnan(value)
return return_type, other_inv_mask
# ------------------------------------------------------------
def __add__(self, value):
return self._funnel_mathops('__add__', value)
def __iadd__(self, value):
return self._funnel_mathops('__iadd__', value)
def __sub__(self, value):
return self._funnel_mathops('__sub__', value)
def __isub__(self, value):
return self._funnel_mathops('__isub__', value)
# ------------------------------------------------------------
def _datespan_compare_check(self, funcname, other):
'''
Funnel for all comparison operations.
Helps Date interact with DateTimeNano, TimeSpan.
'''
caller = self._fa
if isinstance(other, (Date, DateTimeNano, TypeRegister.Categorical)):
# Date allows categorical comparisons, DateSpan does not
raise TypeError(f'Cannot perform {funcname} comparison operation between {type(self)} and {type(other)}.')
elif isinstance(other, TimeSpan):
caller = self._fa * NANOS_PER_DAY
# looks weird now, saving explicit branchesfor if any forbidden types appear
elif isinstance(other, DateSpan):
other = other._fa
# Categorical will fall through to constructor too
elif isinstance(other, np.ndarray):
other = Date(other)
# let everything else fall through for FastArray to catch
func = getattr(caller, funcname)
return func(other)
# -------------------COMPARISONS------------------------------
# ------------------------------------------------------------
def __ne__(self, other):
return self._datespan_compare_check('__ne__', other)
def __eq__(self, other):
return self._datespan_compare_check('__eq__', other)
def __ge__(self, other):
return self._datespan_compare_check('__ge__', other)
def __gt__(self, other):
return self._datespan_compare_check('__gt__', other)
def __le__(self, other):
return self._datespan_compare_check('__le__', other)
def __lt__(self, other):
return self._datespan_compare_check('__lt__', other)
# ------------------------------------------------------------
def DateTimeUTC(arr, to_tz='NYC', from_matlab=False, format=None, start_date=None, gmt=None):
"""Forces DateTimeNano ``from_tz`` keyword to 'UTC'.
For more see DateTimeNano.
"""
return DateTimeNano(arr, from_tz='UTC', to_tz=to_tz, from_matlab=from_matlab, format=format, start_date=start_date, gmt=gmt)
# ========================================================
class DateTimeCommon:
'''
Common functions shared between the array based class and the scalar
This class must be combine with another class because of dependency on _timezone
'''
# -CLOCK HH:MM------------------------------------------------
@property
def format_clock(self):
'''Set time to be displayed as HH:MM:SS'''
self._display_length = DisplayLength.Short
@property
def format_short(self):
'''Set time to be displayed as HH:MM:SS'''
self._display_length = DisplayLength.Short
# -YYYYMMDD----------------------------------------------------
@property
def format_medium(self):
'''Set time to be displayed as YYYYMMDD'''
self._display_length = DisplayLength.Medium
@property
def format_ymd(self):
'''Set time to be displayed as YYYYMMDD'''
self._display_length = DisplayLength.Medium
@property
def format_day(self):
'''Set time to be displayed as YYYYMMDD'''
self._display_length = DisplayLength.Medium
# -YYYYMMDD HH:MM:SS.nanosecond ---------------------------------
@property
def format_long(self):
'''Set time to be displayed as YYYYMMDD HH:MM:SS.fffffffff'''
self._display_length = DisplayLength.Long
@property
def format_full(self):
'''Set time to be displayed as YYYYMMDD HH:MM:SS.fffffffff'''
self._display_length = DisplayLength.Long
@property
def format_sig(self):
'''Set time to be displayed as YYYYMMDD HH:MM:SS.fffffffff'''
self._display_length = DisplayLength.Long
# ------------------------------------------------------------
@property
def days_since_epoch(self):
'''
Number of days since epoch.
Examples
--------
>>> dtn = DateTimeNano(['1970-01-11'], from_tz='NYC')
>>> dtn.days_since_epoch
FastArray([10], dtype=int64)
Returns
-------
int64 array
'''
arr = self._timezone.fix_dst(self)
return arr // NANOS_PER_DAY
# ------------------------------------------------------------
@property
def seconds_since_epoch(self):
'''
Number of seconds since epoch.
Examples
--------
>>> dtn = DateTimeNano(['1970-01-02'], from_tz='NYC')
>>> dtn.seconds_since_epoch
FastArray([86400], dtype=int64)
Returns
-------
int64 array
'''
arr = self._timezone.fix_dst(self)
return arr // NANOS_PER_SECOND
# ------------------------------------------------------------
def nanos_since_midnight(self):
'''
Nanosecond since midnight of the current day.
Examples
--------
>>> dtn = DateTimeNano(['2018-01-01 00:00:00.000123456'],from_tz='NYC')
>>> dtn.nanos_since_midnight()
FastArray([123456], dtype=int64)
Returns
-------
int64 array
See Also
--------
DateTimeNano.time_since_midnight
'''
arr = self._timezone.fix_dst(self)
arr = arr % NANOS_PER_DAY
return _apply_inv_mask(self, arr)
# ------------------------------------------------------------
def millis_since_midnight(self):
'''
Milliseconds since midnight of the current day.
Examples
--------
>>> dtn = DateTimeNano(['1992-02-01 00:00:01.002003004'], from_tz='NYC')
>>> dtn.millis_since_midnight()
FastArray([1002.003004])
Returns
-------
float64 array
Note
----
Unlike similar methods, this returns floating point, similar to common columns
in Matlab datasets.
'''
arr = self._timezone.fix_dst(self)
arr = arr % NANOS_PER_DAY
arr = arr / NANOS_PER_MILLISECOND
return _apply_inv_mask(self, arr)
# ------------------------------------------------------------
def date(self):
'''
Copies the object and removes hours, minutes, seconds, and second fractions.
All resulting times will be at midnight.
Examples
--------
>>> dtn = DateTimeNano(['2019-01-04 12:34', '2019-06-06 14:00'], from_tz='NYC')
>>> dtn.date()
DateTimeNano([20190104 00:00:00.000000000, 20190606 00:00:00.000000000])
Returns
-------
obj:`DateTimeNano`
'''
if self._timezone._dst_reverse is not None:
arr = self._timezone.fix_dst(self._fa)
arr = arr - (arr % NANOS_PER_DAY)
else:
arr = self._fa
arr = arr - (arr % NANOS_PER_DAY)
# from_tz needs to match to_tz (similar to from_matlab_days, except can't force 'GMT' because of DST fixup)
# return DateTimeNano(arr, from_tz=self._timezone._to_tz, to_tz='UTC')
result = DateTimeNano(arr, from_tz=self._timezone._to_tz, to_tz=self._timezone._to_tz)
if isinstance(self, DateTimeNanoScalar):
return result[0]
return result
# ------------------------------------------------------------
@property
def yyyymmdd(self):
'''
Returns integers in the format YYYYMMDD.
Accounts for daylight savings time, leap years.
Examples
--------
>>> dtn = DateTimeNano(['2018-01-09', '2000-02-29', '2000-03-01', '2019-12-31'], from_tz='NYC')
>>> dtn.yyyymmdd
FastArray([20180109, 20000229, 20000301, 20191231])
Returns
-------
int32 array
Note
----
this routine is very similar to day_of_month - can probably internal routines to avoid repeating code
'''
arr = self._fa
arr = self._timezone.fix_dst(arr)
year = self._year(arr, fix_dst=False)
# initialize result
final = year * 10_000
# subtract the nanos from start of year so all times are in MM-DD HH:MM:SS, etc.
startyear = arr - self._year_splits[year - 1970]
# treat the whole array like a non-leapyear
monthnum = self._yearday_splits.searchsorted(startyear, side='right')
startmonth_idx = monthnum - 1
monthtime = startyear - self._yearday_splits[startmonth_idx]
# fix up the leapyears with a different yearday split table
leapmask = (year % 4) == 0
monthnum_leap = self._yearday_splits_leap.searchsorted(startyear[leapmask], side='right')
startmonth_idx_leap = monthnum_leap - 1
monthnum[leapmask] = monthnum_leap
monthtime[leapmask] = startyear[leapmask] - self._yearday_splits_leap[startmonth_idx_leap]
# future optimization, takeover place, or send __setitem__ indexer to our version of it
# np.place(monthnum, leapmask, monthnum_leap)
# np.place(monthtime, leapmask, startyear[leapmask] - UTC_YDAY_SPLITS_LEAP[startmonth_idx_leap])
# add month and day values to final
final += monthnum.astype(np.int32) * 100
final += (monthtime // NANOS_PER_DAY) + 1
return final
# ------------------------------------------------------------
@property
def _year_splits(self):
"""Midnght on Jan. 1st from 1970 - 1940 in utc nanoseconds."""
return UTC_1970_SPLITS
# ------------------------------------------------------------
@property
def _yearday_splits(self):
"""Midnight on the 1st of the month in nanoseconds since the beginning of the year."""
return UTC_YDAY_SPLITS
# ------------------------------------------------------------
@property
def _yearday_splits_leap(self):
"""Midnight on the 1st of the month in nanoseconds since the beginning of the year during a leap year."""
return UTC_YDAY_SPLITS_LEAP
# ------------------------------------------------------------
def year(self):
'''
The year value for each entry in the array
Examples
---------
>>> dtn = DateTimeNano(['1984-02-01', '1992-02-01', '2018-02-01'], from_tz='NYC')
>>> dtn.year()
FastArray([1984, 1992, 2018])
Returns
-------
int32 array
'''
year = self._year(self._fa, fix_dst=True)
return _apply_inv_mask(self, year)
# ------------------------------------------------------------
def month(self):
'''
The month value for each entry in the array.
1=Jan, 2=Feb, etc. ( is leap-year aware )
Examples
--------
>>> dtn = DateTimeNano(['2000-02-29', '2018-12-25'], from_tz='NYC')
>>> dtn.month()
FastArray([ 2, 12])
Returns
-------
int32 array
'''
return _apply_inv_mask(self, self._month(fix_dst=True))
# ------------------------------------------------------------
def monthyear(self, arr=None):
'''
Returns a string with 3 letter month + 4 digit year
Examples
--------
>>> d = DateTimeNano(['2000-02-29', '2018-12-25'], from_tz='NYC')
>>> d.monthyear()
FastArray([ 'Feb2000','Dec2018'])
'''
month = self.month()
yearstr = self.year().astype('S')
return MONTH_STR_ARRAY[month - 1] + yearstr
# ------------------------------------------------------------
@property
def day_of_year(self):
'''
The day of year value for each entry in the array.
Day values are from 1 to 365 (or 366 if leap year)
Examples
--------
>>> dtn = DateTimeNano(['2019-01-01', '2019-02-01', '2019-12-31 23:59', '2000-12-31 23:59'], from_tz='NYC')
FastArray([ 1, 32, 365, 366], dtype=int64)
Returns
-------
int32 array
'''
result = self.nanos_since_start_of_year()
if isinstance(result, np.ndarray):
np.floor_divide(result, NANOS_PER_DAY, out=result)
else:
result = result // NANOS_PER_DAY
result += 1
return result
# ------------------------------------------------------------
@property
def day_of_month(self):
'''
The day of month value for each entry in the array.
Day values are from 1 to 31
Adjusts for daylight savings time, leap year
Examples
--------
>>> dtn = DateTimeNano(['2018-01-09', '2000-02-29', '2000-03-01', '2019-12-31'], from_tz='NYC')
>>> dtn.day_of_month
FastArray([ 9, 29, 1, 31], dtype=int64)
Returns
-------
int32 array
'''
arr = self._fa
year = self._year(arr, fix_dst=True)
# subtract the nanos from start of year so all times are in MM-DD HH:MM:SS, etc.
startyear = arr - self._year_splits[year - 1970]
# treat the whole array like a non-leapyear
startmonth_idx = self._yearday_splits.searchsorted(startyear, side='right') - 1
monthtime = startyear - self._yearday_splits[startmonth_idx]
# fix up the leapyears with a different yearday split table
leapmask = (year % 4) == 0
startmonth_idx_leap = self._yearday_splits_leap.searchsorted(startyear[leapmask], side='right') - 1
monthtime[leapmask] = startyear[leapmask] - self._yearday_splits_leap[startmonth_idx_leap]
# unlike month, weekday, hour, etc. day of month starts at 1
if isinstance(monthtime, np.ndarray):
np.floor_divide(monthtime, NANOS_PER_DAY, out=monthtime)
else:
monthtime = monthtime // NANOS_PER_DAY
monthtime += 1
return monthtime
# ------------------------------------------------------------
@property
def day_of_week(self):
'''
Day of week value for each entry in the array.
Monday (0) -> Sunday (6)
January 1st 1970 was a Thursday! (3)
Examples
--------
>>> dtn = DateTimeNano(['1992-02-01 19:48:00', '1995-05-12 05:12:00'], from_tz='NYC')
>>> dtn.day_of_week
FastArray([5, 4])
Returns
-------
int32 array
'''
arr = self.days_since_epoch
arr += EPOCH_DAY_OF_WEEK
if isinstance(arr, np.ndarray):
# inplace operation
np.mod(arr, 7, out=arr)
else:
arr = arr % 7
return arr
# ------------------------------------------------------------
@property
def start_of_week(self):
'''
Return the Monday for the week the TimeStamp is in
Returns a Date or DateScalar
'''
arr = self.days_since_epoch
arr += EPOCH_DAY_OF_WEEK
adjust = arr % 7
arr -= adjust
arr -= EPOCH_DAY_OF_WEEK
result = Date(arr)
if not isinstance(arr, np.ndarray):
return result[0]
return result
# ------------------------------------------------------------
@property
def is_dst(self):
'''
Boolean array, True if a time value was in daylight savings time for the displayed timezone.
If the timezone is GMT, returns False for all items, including invalid times.
Examples
--------
>>> dtn = DateTimeNano(['2018-11-03 12:34', '2018-11-04 12:34'], from_tz='NYC')
>>> dtn.is_dst
FastArray([ True, False])
>>> dtn = DateTimeNano(['2019-03-30 12:34', '2019-03-31 12:34'], from_tz='DUBLIN')
>>> dtn.is_dst
FastArray([False, True])
>>> dtn = DateTimeNano(['2019-03-30 12:34', '2019-03-31 12:34'], from_tz='GMT', to_tz='GMT')
>>> dtn.is_dst
FastArray([False, False])
Returns
-------
bool array
'''
return self._timezone._is_dst(self._fa)
# ------------------------------------------------------------
@property
def tz_offset(self):
'''
Array of hour offset from GMT. Accounts for daylight savings time in timezone set by to_tz.
If the timezone is GMT, returns all 0.
Examples
--------
dtn = DateTimeNano(['2018-11-03 12:34', '2018-11-04 12:34'], from_tz='NYC')
>>> dtn.tz_offset
FastArray([-4, -5])
>>> dtn = DateTimeNano(['2019-03-30 12:34', '2019-03-31 12:34'], from_tz='DUBLIN', from_tz='DUBLIN')
>>> dtn.tz_offset
FastArray([0, 1])
>>> dtn = DateTimeNano(['2019-03-30 12:34', '2019-03-31 12:34'], from_tz='GMT', to_tz='GMT')
>>> dtn.tz_offset
FastArray([0, 0])
Returns
-------
int32 array
'''
return self._timezone._tz_offset(self._fa)
# -----------------------------------------------------
def putmask(self, arr1, filter, arr2):
'''
scalar or array putmask
'''
if isinstance(arr1, np.ndarray):
return putmask(arr1, filter, arr2)
else:
if filter:
return arr2
else:
return arr1
# ------------------------------------------------------------
@property
def is_weekday(self):
'''
Returns boolean array of wether or not each time occured on a weekday.
Examples
--------
(Monday, Thursday, Saturday)
>>> dtn = DateTimeNano(['2019-01-07', '2019-01-10', '2019-01-12'],from_tz='NYC')
>>> dtn.is_weekday
FastArray([ True, True, False])
Returns
-------
bool array
'''
inv_mask = self.isnan()
isweekday = self.day_of_week < 5
self.putmask(isweekday, inv_mask, False)
return isweekday
# ------------------------------------------------------------
@property
def is_weekend(self):
'''
Returns boolean array of wether or not each time occured on a weekend.
Examples
--------
(Monday, Thursday, Saturday)
>>> dtn = DateTimeNano(['2019-01-07', '2019-01-10', '2019-01-12'],from_tz='NYC')
>>> dtn.is_weekend
FastArray([False, False, True])
Returns
-------
bool array
'''
inv_mask = self.isnan()
isweekend = self.day_of_week > 4
self.putmask(isweekend, inv_mask, False)
return isweekend
# ------------------------------------------------------------
@property
def day(self):
'''
Fractional day time relative to 24 hours.
Examples
--------
>>> dtn = DateTimeNano(['2000-02-01 19:48:00.000000'], from_tz='NYC')
>>> dtn.day
FastArray([0.825])
Returns
-------
float64 array
Notes
-----
this is different than properties for hour, minute, and second as the
relative unit is its own unit.
'''
inv_mask = self.isnan()
arr = self._timezone.fix_dst(self._fa)
arr = arr % NANOS_PER_DAY
arr = arr / NANOS_PER_DAY
self.putmask(arr, inv_mask, np.nan)
return arr
# ------------------------------------------------------------
@property
def hour(self):
'''
Hours relative to the current day (with partial hour decimal).
Examples
--------
>>> dtn = DateTimeNano(['2000-02-01 19:48:00.000000'], from_tz='NYC')
>>> dtn.hour
>>> FastArray([19.8])
Returns
-------
float64 array
See Also
--------
DateTimeNano.hour_span
'''
return self._hour()
# -----------------------------------------------------
@property
def hour_span(self):
'''
Hours relative to the current day (with partial hour decimal) as a TimeSpan object.
Examples
--------
>>> dtn = DateTimeNano(['2000-02-01 19:48:00.000000'], from_tz='NYC')
>>> dtn.hour_span
TimeSpan([19:48:00.000000000])
Returns
-------
obj:`TimeSpan`
See Also
--------
DateTimeNano.hour
'''
return self._hour(span=True)
def _hour(self, span=False):
inv_mask = self.isnan()
arr = self._timezone.fix_dst(self._fa)
arr = arr % NANOS_PER_DAY
if span:
result = TypeRegister.TimeSpan(arr)
else:
result = arr / NANOS_PER_HOUR
self.putmask(result, inv_mask, np.nan)
return result
# ------------------------------------------------------------
def _time_fraction(self, modulo, divisor, span=False):
'''
Internal routine for minute, second, millisecond, microsecond, nanosecond (+span) properties.
None of these need to account for timezone.
'''
inv_mask = self.isnan()
arr = self._fa % modulo
if span:
if isinstance(self, DateTimeNano):
result = TypeRegister.TimeSpan(arr)
else:
result = TypeRegister.TimeSpanScalar(arr)
else:
result = arr / divisor
self.putmask(result, inv_mask, np.nan)
return result
# ------------------------------------------------------------
@property
def minute(self):
'''
Minutes relative to the current hour (with partial minute decimal).
Examples
--------
>>> dtn = DateTimeNano(['2000-02-01 19:48:30.000000'], from_tz='NYC')
>>> dtn.minute
>>> FastArray([48.5])
Returns
-------
float64 array
See Also
--------
DateTimeNano.minute_span
'''
return self._time_fraction(NANOS_PER_HOUR, NANOS_PER_MINUTE)
@property
def minute_span(self):
'''
Minutes relative to the current hour (with partial minute decimal) as a TimeSpan object
Examples
--------
>>> dtn = DateTimeNano(['2000-02-01 19:48:30.000000'], from_tz='NYC')
>>> dtn.minute_span
>>> TimeSpan([00:48:30.000000000])
Returns
-------
obj:`TimeSpan`
See Also
--------
DateTimeNano.minute
'''
return self._time_fraction(NANOS_PER_HOUR, NANOS_PER_MINUTE, span=True)
# ------------------------------------------------------------
@property
def second(self):
'''
Seconds relative to the current minute (with partial second decimal).
Examples
--------
>>> dtn = DateTimeNano(['2000-02-01 19:48:30.100000'], from_tz='NYC')
>>> dtn.seconds
>>> FastArray([30.1])
Returns
-------
float64 array
See Also
--------
DateTimeNano.second_span
'''
return self._time_fraction(NANOS_PER_MINUTE, NANOS_PER_SECOND)
@property
def second_span(self):
'''
Seconds relative to the current minute (with partial second decimal) as a TimeSpan object.
Examples
--------
>>> dtn = DateTimeNano(['2000-02-01 19:48:30.100000'], from_tz='NYC')
>>> dtn.second_span
TimeSpan([00:00:30.100000000])
'''
return self._time_fraction(NANOS_PER_MINUTE, NANOS_PER_SECOND, span=True)
# ------------------------------------------------------------
@property
def millisecond(self):
'''
Milliseconds relative to the current second (with partial millisecond decimal).
Examples
--------
>>> dtn = DateTimeNano(['1992-02-01 12:00:01.123000000'], from_tz='NYC')
>>> dtn.millisecond
FastArray([123.])
Returns
-------
float64 array
See Also
--------
DateTimeNano.millisecond_span
'''
return self._time_fraction(NANOS_PER_SECOND, NANOS_PER_MILLISECOND)
@property
def millisecond_span(self):
'''
Milliseconds relative to the current second (with partial millisecond decimal) as a TimeSpan object.
Examples
--------
>>> dtn = DateTimeNano(['1992-02-01 12:00:01.123000000'], from_tz='NYC')
>>> dtn.millisecond_span
TimeSpan([00:00:00.123000000])
Returns
-------
obj:`TimeSpan`
See Also
--------
DateTimeNano.millisecond
'''
return self._time_fraction(NANOS_PER_SECOND, NANOS_PER_MILLISECOND, span=True)
# ------------------------------------------------------------
@property
def microsecond(self):
'''
Microseconds relative to the current millisecond (with partial microsecond decimal)
Examples
--------
>>> dtn = DateTimeNano(['1992-02-01 12:00:01.000123000'], from_tz='NYC')
>>> dtn.microsecond
FastArray([123.])
Returns
-------
float64 array
See Also
--------
DateTimeNano.microsecond_span
'''
return self._time_fraction(NANOS_PER_MILLISECOND, NANOS_PER_MICROSECOND)
@property
def microsecond_span(self):
'''
Microseconds relative to the current millisecond (with partial microsecond decimal) as a TimeSpan object.
Examples
--------
>>> dtn = DateTimeNano(['1992-02-01 12:00:01.000123000'], from_tz='NYC')
>>> dtn.microsecond_span
TimeSpan([00:00:00.000123000])
Returns
-------
obj:`TimeSpan`
See Also
--------
DateTimeNano.microsecond
'''
return self._time_fraction(NANOS_PER_MILLISECOND, NANOS_PER_MICROSECOND, span=True)
# ------------------------------------------------------------
@property
def nanosecond(self):
'''
Nanoseconds relative to the current microsecond.
Examples
--------
>>> dtn = DateTimeNano(['1992-02-01 12:00:01.000000123'], from_tz='NYC')
>>> dtn.nanosecond
FastArray([123.])
Returns
-------
float64 array
See Also
--------
DateTimeNano.nanosecond_span
'''
return self._time_fraction(NANOS_PER_MICROSECOND, 1)
@property
def nanosecond_span(self):
'''
Nanoseconds relative to the current microsecond as a TimeSpan object.
Examples
--------
>>> dtn = DateTimeNano(['1992-02-01 12:00:01.000000123'], from_tz='NYC')
>>> dtn.nanosecond_span
TimeSpan([00:00:00.000000123])
Returns
-------
obj:`TimeSpan`
See Also
--------
DateTimeNano.nanosecond
'''
return self._time_fraction(NANOS_PER_MICROSECOND, 1, span=True)
# ------------------------------------------------------------
def nanos_since_start_of_year(self):
'''
Nanoseconds since Jan. 1st at midnight of the current year.
Examples
--------
>>> dtn = DateTimeNano(['2018-01-01 00:00:00.000123456'],from_tz='NYC')
>>> dtn.nanos_since_start_of_year()
FastArray([123456], dtype=int64)
Returns
-------
int64 array
See Also
--------
DateTimeNano.time_since_start_of_year
'''
arr = self._timezone.fix_dst(self._fa)
year = self._year(arr, fix_dst=False)
arr = arr - self._year_splits[year - 1970]
return arr
# ------------------------------------------------------------
def time_since_start_of_year(self):
'''
Nanoseconds since Jan. 1st at midnight of the current year as a TimeSpan object.
Examples
--------
>>> dtn = DateTimeNano(['2018-01-01 16:00:00.000123456'],from_tz='NYC')
>>> dtn.time_since_start_of_year()
TimeSpan([16:00:00.000123456])
Returns
-------
obj:`TimeSpan`
See Also
--------
DateTimeNano.nanos_since_start_of_year
Note
----
Nanosecond precision will be lost after ~52 days
'''
result = TimeSpan(self.nanos_since_start_of_year())
if isinstance(self, DateTimeNano):
return result
return result[0]
# ------------------------------------------------------------
def time_since_midnight(self):
'''
Elapsed time since midnight as a TimeSpan object.
Examples
--------
>>> dtn = DateTimeNano(['2000-02-29 00:00:00.000000100','2000-02-29 00:00:00.000123456'], from_tz='NYC')
>>> dtn.nanos_since_midnight()
TimeSpan([00:00:00.000000100, 00:00:00.000123456])
Returns
-------
obj:`TimeSpan`
See Also
--------
DateTimeNano.nanos_since_midnight, DateTimeNano.millis_since_midnight
'''
return self.hour_span
# ------------------------------------------------------------
# for DateTimeNano and DateTimeNanoScalar
def _build_mathops_result(self, other, funcname, call_super, other_inv_mask, inplace, op, return_type):
'''
Operates on fastarray or takes invalid fast track for DateTimeNano math operations like add/sub
'''
input1 = self
if not isinstance(self, np.ndarray):
input1 = DateTimeNano(self)
func = TypeRegister.MathLedger._BASICMATH_TWO_INPUTS
if call_super:
if inplace:
# inplace operations need to save invalids beforehand
input1_mask = input1.isnan()
else:
input1_mask = None
# also need to apply invalid from operand
if other_inv_mask is None:
other_inv_mask = isnan(other)
func = getattr(input1._fa, funcname)
result = func(other)
result = _apply_inv_mask(input1, result, fillval=DateTimeBase.NAN_TIME, arr1_inv_mask=input1_mask, arr2_inv_mask=other_inv_mask)
else:
if inplace:
functup = (input1, other, input1)
else:
functup = (input1, other)
result = func(functup, op, 0)
if result is None:
raise RuntimeError(f'Could not perform {funcname} operation with DateTimeNano and {type(other)} {other}')
if return_type == DateTimeNano:
result = DateTimeNano(result, from_tz='GMT', to_tz=input1._timezone._to_tz)
else:
result = return_type(result)
# check if both were scalars, then return a scalar
if not isinstance(self, np.ndarray) and not isinstance(other, np.ndarray):
return result[0]
return result
def strftime(self, format, dtype='O'):
'''
Converts DateTimeNano to an array of object strings or a scalar string.
This routine has not been sped up yet.
Other Parameters
----------------
dtype: defaults to 'O', can change to 'S' or 'U'
Examples
--------
>>> rt.utcnow(4).strftime('%c')
array(['Thu Oct 31 14:55:14 2019', 'Thu Oct 31 14:55:14 2019',
'Thu Oct 31 14:55:14 2019', 'Thu Oct 31 14:55:14 2019'], dtype='<U24')
>>> rt.utcnow(4).strftime('%X.%f')
array(['15:03:04.697686', '15:03:04.697687', '15:03:04.697687',
'15:03:04.697687'], dtype='<U15')
See Also
---------
http://strftime.org for format strings
datetime.datetime.strftime
'''
in_seconds = self / NANOS_PER_SECOND
to_tz = self._timezone._to_tz
if to_tz in ['GMT', 'UTC']:
if isinstance(in_seconds, np.ndarray):
return np.asarray([dt.utcfromtimestamp(timestamp).strftime(format) for timestamp in in_seconds], dtype=dtype)
else:
return dt.strftime(dt.utcfromtimestamp(in_seconds), format)
else:
# Choose timezone from to_tz
localzone = tz.gettz(self._timezone._timezone_str)
if isinstance(in_seconds, np.ndarray):
return np.asarray([dt.fromtimestamp(timestamp, localzone).strftime(format) for timestamp in in_seconds], dtype=dtype)
else:
return dt.strftime(dt.fromtimestamp(in_seconds, localzone), format)
# ========================================================
class DateTimeNano(DateTimeBase, TimeStampBase, DateTimeCommon):
'''
DateTimeNano arrays have an underlying int64 array. The array is in UTC nanosecond time that defaults to display
correctly in eastern/NYC time, accounting for daylight savings time.
Parameters
----------
arr : nanoseconds in integer, timestrings, numpy datetime64 array
from_tz : if initialized from strings, user is required to specify the timezone of origin (otherwise default is UTC)
Currently supported:
'NYC' : US/Eastern (accounts for daylight savings)
'DUBLIN' : Dublin (accounts for daylight savings)
'GMT' : Greenwich Mean Time
'UTC' : (not a timezone, but accepted as an alias for GMT)
to_tz : controls how the data will be displayed. if not set, will match the from_tz keyword
from_matlab : indicates that the input was matlab ordinal days (may also have fraction of day, so specify from_tz)
format : specify a format for timestrings - will be ignored unless strings are in arr
See strptime_to_nano() docstring for format info
start_date : specify a string start date for times in format YYYYMMDD. all values in the provided array
will be interpretted as nanoseconds, timespan, or clock strings in HH:MM
(gmt **deprecated)
Examples
--------
From DateTimeNano timestamps already in GMT:
>>> dtn = DateTimeNano([1514828730123456000], from_tz='GMT')
>>> dtn
DateTimeNano([20180101 12:45:30.123456000])
From DateTimeStrings in NYC time:
>>> dtn = DateTimeNano(['2018-01-01 12:45:30.123456000'], from_tz='NYC')
>>> dtn
DateTimeNano([20180101 12:45:30.123456000])
From numpy datetime64 array (different resolution, note numpy drops precision):
>>> dtn = np.array(['2018-11-02 09:30:00.002201', '2018-11-02 09:30:00.004212', '2018-11-02 09:30:00.005351'], dtype='datetime64[ms]')
>>> dtn = DateTimeNano(dtn, from_tz='NYC')
>>> dtn
DateTimeNano([20181102 09:30:00.002000000, 20181102 09:30:00.004000000, 20181102 09:30:00.005000000])
String parsing differences:
---------------------------
- riptable DateTimeNano string parsing is more forgiving than Numpy datetime64 arrays
- In some cases an object is returned when numpy returns an error.
- In other cases, a different result is returned.
- The lower limit for DateTimeNano string parsing is unix epoch time.
- You can always guarantee the same results by using the full ISO-8601 datetime format (YYYY-MM-DDTHH:mm:ss.fffffffff)
Without zero padding:
>>> DateTimeNano(['2018-1-1 1'], from_tz='NYC')
DateTimeNano([20180101 01:00:00.000000000])
>>> np.array(['2018-1-1 1'], dtype='datetime64[ns]')
ValueError: Error parsing datetime string "2018-1-1 1" at position 5
Extra characters:
>>> DateTimeNano(['2018-10-11 10:11:00.123 '], from_tz='NYC')
DateTimeNano([20181011 10:11:00.123000000])
>>> np.array(['2018-10-11 10:11:00.123 '], dtype='datetime64[ns]')
DeprecationWarning: parsing timezone aware datetimes is deprecated; this will raise an error in the future
array(['2018-10-11T10:11:00.123000000'], dtype='datetime64[ns]')
Without separators:
>>> DateTimeNano(['20181231'], from_tz='NYC')
DateTimeNano([20181231 00:00:00.000000000])
>>> np.array(['20181231'], dtype='datetime64[ns]')
array(['1840-08-31T19:51:12.568664064'], dtype='datetime64[ns]')
ISO-8601 format:
>>> DateTimeNano(['2018-12-31T12:34:56.789123456'],from_tz='NYC')
DateTimeNano([20181231 12:34:56.789123456])
>>> np.array(['2018-12-31T12:34:56.789123456'], dtype='datetime64[ns]')
array(['2018-12-31T12:34:56.789123456'], dtype='datetime64[ns]')
strptime like formatting
>>> a=DateTimeNano(['12/31/19', '6/30/19'], format='%m/%d/%y', from_tz='NYC')
>>> a=DateTimeNano(['12/31/2019', '6/30/2019'], format='%m/%d/%Y', from_tz='NYC')
DateTimeNano([20191231 00:00:00.000000000, 20190630 00:00:00.000000000])
From Matlab:
>>> a=DateTimeNano([737426], from_matlab=True, from_tz='NYC')
>>> a
DateTimeNano([20190101 00:00:00.000000000])
>>> a.format_day; a
DateTimeNano([20190101])
From utcnow:
>> from datetime import datetime as dt
>> dt.utcnow()
datetime.datetime(2019, 2, 7, 20, 12, 44, 116810)
>> DateTimeNano([GetNanoTime()], from_tz="GMT")
DateTimeNano([20190207 15:12:44.116810400])
Math operations:
----------------
The following math operations can be performed and will yeild the following object types
DateTimeNano - DateTimeNano = TimeSpan
DateTimeNano - TimeSpan = DateTimeNano
DateTimeNano + TimeSpan = DateTimeNano
TimeSpan - TimeSpan = TimeSpan
TimeSpan + TimeSpan = TimeSpan
Other notes:
------------
- The constructor does not attempt to preserve NaN times from python datetime objects.
'''
MetaVersion = 0
MetaDefault = {
'name': 'DateTimeNano',
'typeid': TypeId.DateTimeNano,
'ncols': 0,
'version': 0,
'instance_vars': {
'_display_length': DisplayLength.Long,
'_to_tz': 'NYC'
}
}
# TODO: add more intervals here and to DateTimeNano quarters
# need to interact with the business calendar class
# maybe merge these with TimeSpan unit conversion dict?
FrequencyStrings = {
'H': 'h',
'T': 'm',
'MIN': 's',
'S': 's',
'L': 'ms',
'MS': 'ms',
'U': 'us',
'US': 'us',
'N': 'ns',
'NS': 'ns',
}
_INVALID_FREQ_ERROR = "Invalid frequency: {}"
# ------------------------------------------------------------
def __new__(cls, arr, from_tz=None, to_tz=None, from_matlab=False, format=None, start_date=None, gmt=None):
'''
Array of nanoseconds since Unix Epoch (held in int64)
All DateTimeNano objects hold nanoseconds in GMT time.
Parameters:
-----------
arr : nanoseconds in integer, timestrings, numpy datetime64 array
from_tz : if initialized from strings, user is required to specify the timezone of origin (otherwise default is UTC)
Currently supported:
'NYC' : US/Eastern (accounts for daylight savings)
'DUBLIN' : Dublin (accounts for daylight savings)
'GMT' : Greenwich Mean Time
to_tz : controls how the data will be displayed. if not set, will match the from_tz keyword
from_matlab : indicates that the input was matlab ordinal days (may also have fraction of day, so specify from_tz)
format : specify a format for timestrings - will be ignored unless strings are in arr
See strptime_to_nano() docstring for format info
start_date : specify a single date for datetime nano. all times in provided array will be interpretted
as nanoseconds.
(gmt **deprecated)
Notes:
------
- If the integer data in a DateTimeNano object is extracted, it is in GMT time. To initialize another
DateTimeNano with the same underlying array, need to set from_tz='GMT' or 'UTC'
- the gmt keyword is no longer used, need to add a deprication warning at some point
- DateTimeNano has no knowledge of timezones. All timezone operations are handled by the TimeZone class
Examples
--------
>>> dtn = DateTimeNano(['20180201 12:34'], from_tz='NYC')
>>> dtn
DateTimeNano([20180201 12:34:00.000000000])
>>> dtn = DateTimeNano(['2/1/1992', '5/12/1995'], from_tz='NYC', format='%m/%d/%Y')
>>> dtn
DateTimeNano([19920201 00:00:00.000000000, 19950512 00:00:00.000000000])
'''
# changing defaults / requirments based on constructor
# non-string constructors don't require from_tz keyword to be set
# need to store original keyword values to check in the funnel (saving all in case we add more)
_orig_from_tz = from_tz
if from_tz is None:
from_tz = 'UTC'
_from_matlab = from_matlab
_format = format
_start_date = start_date
# check for categorical of string or dates
arr, cat = _possibly_convert_cat(arr)
if isinstance(arr, TypeRegister.Date):
if to_tz is None:
to_tz = 'UTC'
# will automatically flip to int64, send through as nanosecond integer array
arr = arr._fa * NANOS_PER_DAY
else:
if to_tz is None:
to_tz = 'NYC'
# create a timezone object to handle daylight savings, any necessary conversion, etc.
_timezone = TypeRegister.TimeZone(from_tz=from_tz, to_tz=to_tz)
if from_matlab:
instance = cls._convert_matlab_days(arr, _timezone)
else:
if start_date is not None:
if not isinstance(arr, np.ndarray):
arr = FastArray(arr)
# if array was strings, interpret as timespan
# numeric arrays will also be interpretted as timespan
if arr.dtype.char in 'US':
arr = TimeSpan(arr)
# interpret as start date in nanoseconds
if isinstance(start_date, (str, bytes)):
start_date = FastArray(start_date)
start_date = rc.DateStringToNanos(start_date)[0]
elif isinstance(start_date, Date):
if len(start_date) == len(arr):
# user has passed in multiple start dates
start_date = start_date._fa * NANOS_PER_DAY
else:
start_date = start_date[0] * NANOS_PER_DAY
else:
raise TypeError(f'Start date must be string in format YYYYMMDD or Date object. Got type {type(start_date)}')
instance = None
if isinstance(arr, list) or np.isscalar(arr):
arr = FastArray(arr)
if isinstance(arr, np.ndarray):
if arr.dtype.char == 'O':
# possibly python datetime object
if isinstance(arr[0], dt):
# warn if it will take more than 1 second
if len(arr) > 750_000:
warnings.warn(f"Python is converting {len(arr)} datetime objects. Performance may suffer.")
arr = np.array([t.isoformat() for t in arr], dtype='datetime64[ns]')
# string
if arr.dtype.char in 'US':
if _orig_from_tz is None:
raise ValueError(TypeRegister.TimeZone.tz_error_msg)
# if format specified, use our strptime
if format is not None:
instance = strptime_to_nano(arr, format, from_tz=from_tz, to_tz=to_tz)
else:
# otherwise assume ISO-8601 format
instance = datetimestring_to_nano(arr, from_tz=from_tz, to_tz=to_tz)
# check for categorical of string
if cat is not None:
# re-expand since it came in as a categorical
instance = cat.expand_any(instance)
return instance
# flip numpy datetime64 array
elif arr.dtype.char == 'M':
instance = arr.astype('datetime64[ns]', copy=False).view(np.int64)
# don't allow timespan arrays without start date
elif isinstance(arr, TimeSpan) and start_date is None:
raise TypeError(f'Cannot create DateTimeNano from TimeSpan array unless start_date is provided.')
elif arr.dtype.char in NumpyCharTypes.AllInteger + NumpyCharTypes.AllFloat:
pass
else:
raise TypeError(f"Cannot create DateTimeNano object from {arr.dtype}")
# only flip to int64 if necessary
# TODO: for uint64 do we want a .view() so we dont have to convert?
instance = arr.astype(np.int64, copy=False)
if start_date is not None:
instance = instance + start_date
# match stored utc nano to desired display
instance = _timezone.to_utc(instance)
else:
raise TypeError(f"Cannot initialize DateTimeNano with type {type(arr)}, must be list or array.")
# check for categorical of string
if cat is not None:
# re-expand since it came in as a categorical
instance = cat.expand_any(instance)
instance = instance.view(cls)
instance._display_length = DisplayLength.Long
instance._timezone = _timezone
return instance
# ------------------------------------------------------------
def __init__(self, arr, from_matlab=False, from_tz=None, to_tz=None, format=None, start_date=None, gmt=None):
pass
# ------------------------------------------------------------
def get_classname(self):
'''
Return object's class name for array repr.
Returns
-------
obj:`str`
Object's class name.
'''
return __class__.__name__
# ------------------------------------------------------------
def get_scalar(self, scalarval):
return DateTimeNanoScalar(scalarval, _from=self)
# ------------------------------------------------------------
@classmethod
def _convert_matlab_days(cls, arr, timezone):
'''
Parameters:
-----------
arr : array of matlab datenums (1 is 1-Jan-0000)
timezone : TimeZone object from DateTimeNano constructor
Converts matlab datenums to an array of int64 containing utc nanoseconds.
'''
inv_mask = isnan(arr)
# matlab dates come in as float
# first, flip to float64 so no precision is lost
arr = arr.astype(np.float64, copy=False)
arr = arr - MATLAB_EPOCH_DATENUM
# might be a better way to do this with fewer array copies
arr *= NANOS_PER_DAY
arr = arr.astype(np.int64)
arr = timezone.to_utc(arr, inv_mask=inv_mask)
putmask(arr, inv_mask, cls.NAN_TIME)
return arr
# ------------------------------------------------------------
def set_timezone(self, tz):
'''
Changes the timezone that the times are displayed in.
Different lookup array will be used for daylight savings fixups.
Does not modify the underlying array.
Parameters
----------
tz : str
Abbreviated name of desired timezone. See rt.TimeZone.valid_timezones
Examples
--------
Normal:
>>> dtn = DateTimeNano(['2019-01-07 10:36'], from_tz='NYC', to_tz='NYC')
>>> dtn
DateTimeNano([20190107 10:36:00.000000000])
>>> dtn.set_timezone('DUBLIN')
>>> dtn
DateTimeNano([20190107 15:36:00.000000000])
NYC is in daylight savings time, Dublin is not:
>>> dtn = DateTimeNano(['2019-03-15 10:36'], from_tz='NYC', to_tz='NYC')
>>> dtn
DateTimeNano([20190315 10:36:00.000000000])
>>> dtn.set_timezone('DUBLIN')
>>> dtn
DateTimeNano([20190315 14:36:00.000000000])
'''
self._timezone._set_timezone(tz)
# ------------------------------------------------------------
def astimezone(self, tz):
'''
Returns a new DateTimeNano object in a different displayed timezone.
The new object holds a reference to the same underlying array.
Parameters
----------
tz : str
Abbreviated name of desired timezone. See rt.TimeZone.valid_timezones
Returns
-------
obj:`DateTimeNano`
Notes
-----
Unlike Python's datetime.datetime.astimezone(), accepts strings, not timezone objects.
'''
return DateTimeNano(self._fa, from_tz='GMT', to_tz=tz)
# ------------------------------------------------------------
def to_iso(self):
'''
Generates a FastArray of ISO-8601 timestamp bytestrings.
The string will match the time +/- timezone offset displayed in the output of the DateTimeNano object.
Examples
--------
>>> dtn = DateTimeNano(['2019-01-22 12:34'],from_tz='NYC')
>>> dtn
DateTimeNano([20190122 12:34:00.000000000])
>>> dtn.to_iso()
FastArray([b'2019-01-22T12:34:00.000000000'], dtype='|S48')
>>> dtn = DateTimeNano(['2019-01-22'],from_tz='GMT',to_tz='NYC')
>>> dtn
DateTimeNano([20190121 19:00:00.000000000])
>>> dtn.to_iso()
FastArray([b'2019-01-21T19:00:00.000000000'], dtype='|S48')
Returns
-------
obj:`FastArray`
'''
inv_mask = self.isnan()
arr = self._timezone.fix_dst(self._fa)
arr = arr.astype('datetime64[ns]')
putmask(arr, inv_mask, np.datetime64('nat'))
return arr.astype('S')
@property
def display_length(self):
if not hasattr(self, '_display_length'):
self._display_length = DisplayLength.Long
return self._display_length
# TODO uncomment when starfish is implemented and imported
# def _sf_display_query_properties(self):
# itemformat = sf.ItemFormat({'length':self.display_length,
# 'align':sf.DisplayAlign.Right,
# 'timezone_str':self._timezone._timezone_str})
# return itemformat, self.display_convert_func
# ------------------------------------------------------------
def display_query_properties(self):
# if TypeRegister.DisplayOptions.STARFISH:
# return self._sf_display_query_properties()
'''
Call back for display functions to get the formatting function and style for timestrings.
Each instance knows how to format its time strings. The formatter is specified in TIME_FORMATS
The length property of item_format stores the index into TIME_FORMATS for the display_convert_func
Returns
-------
obj:`ItemFormat`
See riptable.Utils.rt_display_properties
function
Callback function for formatting the timestring
'''
item_format = ItemFormat(
length=self.display_length,
justification=DisplayJustification.Right,
can_have_spaces=True,
decoration=None,
timezone_str=self._timezone._timezone_str
)
convert_func = self.display_convert_func
return item_format, convert_func
# ------------------------------------------------------------
@staticmethod
def display_convert_func(utcnano, itemformat: ItemFormat):
'''
Convert a utc nanosecond timestamp to a string for display.
Parameters
----------
utcnano : int
Timestamp in nanoseconds, a single value from a DateTimeNano array
itemformat : obj:`ItemFormat`
Style object retrieved from display callback.
Returns
-------
str
Timestamp as string.
See Also
--------
DateTimeNano.display_query_properties
riptable.Utils.rt_display_properties
'''
# TODO: apply ItemFormat options that were passed in
return DateTimeNano.format_nano_time(utcnano, itemformat)
# ------------------------------------------------------------
def display_item(self, utcnano):
'''
Convert a utc nanosecond timestamp to a string for array repr.
Parameters
----------
utcnano : int
Timestamp in nanoseconds, a single value from a DateTimeNano array
Returns
-------
str
Timestamp as string.
'''
itemformat, _ = self.display_query_properties()
return self.format_nano_time(utcnano, itemformat)
# -----------------------------------------------------------
@classmethod
def _parse_item_format(cls, itemformat):
'''
Translate a value in the DisplayLength enum into a time format string
'''
if itemformat.length == DisplayLength.Short:
format_str = TIME_FORMATS[4]
display_nano = False
elif itemformat.length == DisplayLength.Medium:
format_str = TIME_FORMATS[1]
display_nano = False
elif itemformat.length == DisplayLength.Long:
format_str = TIME_FORMATS[3]
display_nano = True
else:
raise ValueError(f"Don't know how to interpret display length: {itemformat.length.name}")
return format_str, display_nano
# -----------------------------------------------------------
@staticmethod
def format_nano_time(utcnano, itemformat):
'''
Convert a utc nanosecond timestamp to a string for display.
Parameters
----------
utcnano : int
Timestamp in nanoseconds, a single value from a DateTimeNano array
itemformat : obj:`ItemFormat`
Style object retrieved from display callback.
Returns
-------
str
Timestamp as string.
Notes
-----
Uses Python's datetime module for final string conversion.
'''
# TODO: cache the time format string returned
format_str, display_nano = DateTimeNano._parse_item_format(itemformat)
if utcnano == INVALID_DICT[np.dtype(np.int64).num] or utcnano == 0:
return 'Inv'
# view UTC time as local time
# tz is dateutil.tz
# dt is datetime.datetime
# timezone is datetime.timezone
localzone = tz.gettz(itemformat.timezone_str)
try:
timestr = dt.fromtimestamp((utcnano // NANOS_PER_SECOND), timezone.utc)
timestr = timestr.astimezone(localzone)
timestr = timestr.strftime(format_str)
except:
raise ValueError(f"DateTime: the utc nano value {utcnano!r} for {timezone.utc!r} is not valid.")
# possible add ms,us,ns precision to seconds
# each instance should attempt to set its own precision based on how it was constructed
if display_nano:
timestr = DateTimeBase._add_nano_ext(utcnano, timestr)
return timestr
# ------------------------------------------------------------
@classmethod
def _from_meta_data(cls, arrdict, arrflags, meta):
meta = MetaData(meta)
# combine saved attributes with defaults based on version number
vars = meta['instance_vars']
for k, v in cls.MetaDefault['instance_vars'].items():
vars.setdefault(k, v)
for k, v in cls.MetaDefault.items():
meta.setdefault(k, v)
# preparing for future versions in case reconstruction changes
version = meta['version']
if version != cls.MetaVersion:
# current version is 0, will not get hit
if version == 0:
pass
else:
raise ValueError(
f"DateTimeNano cannot load. Version {version!r} not supported. Current version installed is {cls.MetaVersion!r}. Update riptable.")
# datetime nano integers are always in GMT
instance = [*arrdict.values()][0]
instance = cls(instance, from_tz='GMT', to_tz=vars['_to_tz'])
# after constructor is called, restore all instance variables
# only need to set this one, to_tz, timezone_str are handled by TimeZone class
instance._display_length = vars['_display_length']
return instance
def _meta_dict(self, name=None):
"""Meta dictionary for _build_sds_meta_data, _as_meta_data
"""
classname = self.__class__.__name__
if name is None:
name = classname
metadict = {
'name': name,
'typeid': getattr(TypeId, classname),
'classname': classname,
'ncols': 0,
'version': self.MetaVersion,
'author': 'python',
'instance_vars': {
'_display_length': self.display_length,
'_to_tz': self._timezone._to_tz
},
'_base_is_stackable': SDSFlag.Stackable
}
return metadict
# ------------------------------------------------------------
@classmethod
def _load_from_sds_meta_data(cls, name, arr, cols, meta, tups: Optional[list] = None):
'''
Note
----
This will be changed to a private method with a different name as it only pertains
to the SDS file format.
Load DateTimeNano from an SDS file as the correct class.
Restore formatting if different than default.
Parameters
----------
name : item's name in the calling container, or the classname 'DateTimeNano' by default
arr : underlying integer FastArray in UTC nanoseconds
cols : empty list (not used for this class)
meta : meta data generated by build_meeta_data() routine
tups : empty list (not used for this class)
returns reconstructed DateTimeNano object.
'''
if tups is None:
tups = list()
if not isinstance(meta, MetaData):
meta = MetaData(meta)
# combine saved attributes with defaults based on version number
vars = meta['instance_vars']
for k, v in cls.MetaDefault['instance_vars'].items():
vars.setdefault(k, v)
for k, v in cls.MetaDefault.items():
meta.setdefault(k, v)
# preparing for future versions in case reconstruction changes
version = meta['version']
if version != cls.MetaVersion:
# current version is 0, will not get hit
if version == 0:
pass
else:
raise ValueError(
f"DateTimeNano cannot load. Version {version!r} not supported. Current version installed is {cls.MetaVersion!r}. Update riptable.")
# datetime nano integers are always in GMT
instance = DateTimeNano(arr, from_tz='GMT', to_tz=vars['_to_tz'])
# after constructor is called, restore all instance variables
# only need to set this one, to_tz, timezone_str are handled by TimeZone class
instance._display_length = vars['_display_length']
return instance
# ------------------------------------------------------------
@classmethod
def newclassfrominstance(cls, instance, origin):
'''
Restore timezone/length info.
'''
result = instance.view(cls)
result._timezone = origin._timezone.copy()
result._display_length = origin._display_length
return result
# ------------------------------------------------------------
def info(self):
'''
Returns
-------
str
Verbose array repr with timezone information.
'''
print(self.__repr__(verbose=True))
# -------------------------------------------------------
def diff(self, periods=1):
'''
Returns
-------
TimeSpan
'''
result = self._fa.diff(periods=periods)
return TimeSpan(result)
# ------------------------------------------------------------
def __repr__(self, verbose=False):
repr_strings = []
tz_string = f", to_tz='{self._timezone._to_tz}'"
repr_strings.append(self.get_classname() + "([" + self._build_string() + "]" + tz_string + ")")
if verbose is False:
return "\n".join(repr_strings)
repr_strings.append(f"Displaying in timezone: {self._timezone._timezone_str}")
repr_strings.append(f"Origin: {self._timezone._from_tz}")
repr_strings.append(f"Offset: {self._timezone._offset} hours")
return "\n".join(repr_strings)
# ------------------------------------------------------------
@classmethod
def hstack(cls, dtlist):
'''
Performs an hstack on a list of DateTimeNano objects.
All items in list must have their display set to the same timezone.
Parameters
----------
dtlist : obj:`list` of obj:`DateTimeNano`
DateTimeNano objects to be stacked.
Examples
--------
>>> dtn1 = DateTimeNano(['2019-01-01', '2019-01-02'], from_tz='NYC')
>>> dtn2 = DateTimeNano(['2019-01-03', '2019-01-04'], from_tz='NYC')
>>> DateTimeNano.hstack([dtn1, dtn2])
DateTimeNano([20190101 00:00:00.000000000, 20190102 00:00:00.000000000, 20190103 00:00:00.000000000, 20190104 00:00:00.000000000])
Returns
-------
obj:`DateTimeNano`
'''
return hstack_any(dtlist, cls, DateTimeNano)
# ------------------------------------------------------------
def shift(self, periods=1):
'''
Modeled on pandas.shift.
Values in the array will be shifted to the right if periods is positive, to the left if negative.
Spaces at either end will be filled with invalid.
If abs(periods) >= the length of the array, the result will be full of invalid.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
obj:`DateTimeNano`
'''
temp = FastArray.shift(self, periods=periods)
return self.newclassfrominstance(temp, self)
# -------------------------------------------------------------
def cut_time(self, buckets: Union[int, 'TimeSpan', List], start_time: Tuple = None,
end_time: Tuple = None, add_pre_bucket: bool = False, add_post_bucket: bool = False,
label: str = "left", label_fmt: str = "%H:%M", nyc: bool = False) -> TypeRegister.Categorical:
"""
Analogous to rt.cut() but for times. We ignore the date part and cut based on time of day component only.
Parameters
----------
buckets: int or rt.TimeSpan or a list of for custom buckets
Specify your bucket size or buckets. Supply either an int for the common use case of equally sized minute buckets or a custom list
Acceptable lists formats:
[(h, m, s, ms)] - it'll assume fields are 0 if length is less than 4
start_time: optional if buckets is explicitly supplied, (h, m) or (h, m, s) or (h, m , s, ms) tuple
left end point of first bucket, this type may change in future
end_time:
see start_time, right end point of last bucket
add_pre_bucket: bool
add a pre-open bucket or treat as invalid ?
add_post_bucket: bool
add a after close bucket or treat as invalid ?
label: optional str
"left": for left end points
"right": for right end points
label_fmt: optional str
strftime format for label
nyc: bool, default is False
convenience shortcut to default to NYC start and end time, ignored if buckets explicitly supplied
Returns
-------
rt.Categorical
See Also
--------
inspired from pandas TimeGrouper
Examples
--------
TODO - sanitize - add cut_time examples
See the version history for structure of older examples.
"""
# first define some helper functions
def timetuple_to_nsm(tup) -> int:
if not 2 <= len(tup) <= 4:
raise ValueError("Expected (h,m), (h,m,s) or (h,m,s,ms)")
zeros = (0,) * (4 - len(tup))
h, m, s, ms = tup + zeros
return 1_000_000 * (ms + 1000 * (s + 60 * (m + 60 * h)))
def nano_to_label(nanos: int) -> str:
# Just care about minutes, hours
return DateTimeNanoScalar(nanos, to_tz='UTC').strftime(label_fmt)
def scalar(arr_or_scalar):
try:
len(arr_or_scalar)
except Exception:
return arr_or_scalar
if len(arr_or_scalar) == 1:
return arr_or_scalar[0]
raise ValueError("not a length 1 array")
# end helper functions
is_already_list = False
if isinstance(buckets, int):
buckets = TimeSpan(buckets, "m")
elif isinstance(buckets, TimeSpan):
pass
elif isinstance(buckets, type([])):
is_already_list = True
else:
raise ValueError(f"Unknown bucket_size type, got: {type(buckets)}")
# two cases bucket_size is already a list or it's a TimeSpan
if is_already_list:
bucket_cut_points = [timetuple_to_nsm(xx) for xx in sorted(buckets)]
else:
if nyc and (start_time is not None and end_time is not None):
raise ValueError("If nyc is True then you can't set both start and end time bounds")
if nyc:
if start_time is None: start_time = (9, 30)
if end_time is None: end_time = (16, 15)
if start_time is None or end_time is None:
raise ValueError("Need to specify start and end times")
bucket_cut_points = []
now_nsm = timetuple_to_nsm(start_time)
end_time_nsm = timetuple_to_nsm(end_time)
bucket_size_nsm = buckets.nanoseconds
while now_nsm < end_time_nsm:
bucket_cut_points.append(scalar(now_nsm))
now_nsm += bucket_size_nsm
bucket_cut_points.append(end_time_nsm)
if add_pre_bucket:
bucket_cut_points.insert(timetuple_to_nsm((0, 0)), 0)
if add_post_bucket:
bucket_cut_points.append(timetuple_to_nsm((24, 0)))
if label == "right":
bucket_cut_labels = [nano_to_label(xx) for xx in bucket_cut_points[1:]]
elif label == "left":
bucket_cut_labels = [nano_to_label(xx) for xx in bucket_cut_points[:-1]]
else:
raise ValueError(f"Unknown label, got {label}")
if add_pre_bucket:
bucket_cut_labels[0] = "pre"
if add_post_bucket:
bucket_cut_labels[-1] = "post"
cat = searchsorted(bucket_cut_points, self.nanos_since_midnight())
# map right side invalid to 0
cat[cat >= len(bucket_cut_points)] = 0
return TypeRegister.Categorical(cat, bucket_cut_labels, base_index=1, ordered=False)
# -------------------------------------------------------------
def fill_invalid(self, shape=None, dtype=None, inplace=True):
arr = self._fill_invalid_internal(shape=shape, dtype=self.dtype, fill_val=self.NAN_TIME, inplace=inplace)
if arr is None:
return
return DateTimeNano(arr, from_tz='GMT', to_tz=self._timezone._to_tz)
# -------------------------------------------------------------
def isnan(self):
'''
Boolean array, True where DateTimeNano == NaN time or int64 sentinel (min int)
Returns
-------
bool array
True where object == NaN time or int64 sentinel (min int), otherwise False.
Notes
-----
Currently using 0 for NaN time. This constant is held in the DateTimeBase class.
'''
return self._fa.isnanorzero()
# -------------------------------------------------------------
def isnotnan(self):
'''
Boolean array, True where DateTimeNano != NaN time or int64 sentinel (min int).
Returns
-------
bool array
True where object != NaN time or int64 sentinel (min int), otherwise False.
Notes
-----
Currently using 0 for NaN time. This constant is held in the DateTimeBase class.
'''
return ~self.isnan()
# -------------------------------------------------------------
def _datetimenano_compare_check(self, funcname, other):
caller = self._fa
if isinstance(other, (DateTimeNano, DateTimeNanoScalar)):
if other._timezone._to_tz != self._timezone._to_tz:
warnings.warn(
f'DateTimeNano objects are being displayed in different timezones. Results may not appear to be correct for {funcname}')
elif isinstance(other, (Date, DateScalar)):
other = DateTimeNano(other._fa * NANOS_PER_DAY, from_tz=self._timezone._to_tz, to_tz=self._timezone._to_tz)
elif isinstance(other, (TimeSpan, DateSpan, TimeSpanScalar, DateSpanScalar)):
raise TypeError(f'Cannot compare DateTimeNano with {type(other)}')
# let everything else fall through to fast array
# restore invalids
return self._preserve_invalid_comparison(caller, other, funcname)
# -------------------COMPARISONS------------------------------
# ------------------------------------------------------------
def __ne__(self, other):
return self._datetimenano_compare_check('__ne__', other)
def __eq__(self, other):
return self._datetimenano_compare_check('__eq__', other)
def __ge__(self, other):
return self._datetimenano_compare_check('__ge__', other)
def __gt__(self, other):
return self._datetimenano_compare_check('__gt__', other)
def __le__(self, other):
return self._datetimenano_compare_check('__le__', other)
def __lt__(self, other):
return self._datetimenano_compare_check('__lt__', other)
# -------------------------------------------------------------
def min(self, **kwargs):
'''
Earliest timestamp in array.
Returns
-------
obj:`DateTimeNano`
An array with length 1.
Note
----
This returns an array, not a scalar. However, broadcasting rules will apply to operations with it.
'''
return DateTimeNano([self._fa.min()], from_tz='GMT', to_tz=self._timezone._to_tz)
# return DateTimeNanoScalar(self._fa.min(), timezone=self._timezone)
# -------------------------------------------------------------
def max(self, **kwargs):
'''
Latest timestamp in array.
Returns
-------
obj:`DateTimeNano`
An array with length 1.
Note
----
This returns an array, not a scalar. However, broadcasting rules will apply to operations with it.
'''
return DateTimeNano([self._fa.max()], from_tz='GMT', to_tz=self._timezone._to_tz)
# return DateTimeNanoScalar(self._fa.max(), timezone=self._timezone)
# -------------------------------------------------------------
def diff(self, periods=1):
'''
Calculate the n-th discrete difference.
Parameters
----------
periods : int, optional
The number of times values are differenced. If zero, the input
is returned as-is.
Returns
-------
obj:`TimeSpan`
'''
return TimeSpan(self._fa.diff(periods=periods).astype(np.float64))
# -------------------------------------------------------------
def __radd__(self, other):
return self.__add__(other)
# -------------------------------------------------------------
def __iadd__(self, other):
# warnings.warn(f'Currently allowing inplace operation __iadd__ on DateTimeNano. May change in the future.')
return self.__add__(other, inplace=True)
# -------------------------------------------------------------
def __add__(self, other, inplace=False):
call_super = False
other_inv_mask = None
func = TypeRegister.MathLedger._BASICMATH_TWO_INPUTS
op = None
return_type = DateTimeNano
if not isinstance(other, np.ndarray) and not isinstance(other, (DateTimeNanoScalar, DateScalar, TimeSpanScalar, DateSpanScalar)):
# TJD change
if np.isscalar(other):
other = np.int64(other)
else:
other = FastArray(other, dtype=np.int64)
# op = MATH_OPERATION.ADDDATES
call_super = True
else:
if isinstance(other, (DateTimeNano, DateTimeNanoScalar)):
raise TypeError(f'Cannot add two objects {type(self)} and {type(other)}')
elif isinstance(other, (Date, DateScalar)):
raise TypeError(f'Cannot add two objects {type(self)} and {type(other)}')
elif isinstance(other, (TimeSpan, TimeSpanScalar)):
other_inv_mask = isnan(other)
other = other.astype(np.int64)
call_super = True
# op = MATH_OPERATION.ADDDATES
elif isinstance(other, (DateSpan, DateSpanScalar)):
other_inv_mask = isnan(other)
other = other.astype(np.int64) * NANOS_PER_DAY
call_super = True
# op = MATH_OPERATION.ADDDATES
else:
other = other.view(FastArray)
other = other.astype(np.int64, copy=False)
call_super = True
# op = MATH_OPERATION.ADDDATES
if inplace:
funcname = '__iadd__'
else:
funcname = '__add__'
return self._build_mathops_result(other, funcname, call_super, other_inv_mask, inplace, op, return_type)
# -------------------------------------------------------------
def __rsub__(self, other):
if isinstance(other, (Date, DateScalar)):
return other.__sub__(self)
else:
raise TypeError(f'DateTimeNano can only be subtracted from DateTimeNano or Date.')
# -------------------------------------------------------------
def __isub__(self, other):
warnings.warn(f'Currently allowing inplace operation __isub__ on DateTimeNano. May change in the future.')
return self.__sub__(other, inplace=True)
# -------------------------------------------------------------
def __sub__(self, other, inplace=False):
call_super = False
other_inv_mask = None
func = TypeRegister.MathLedger._BASICMATH_TWO_INPUTS
op = None
if not isinstance(other, np.ndarray) and not isinstance(other, (DateTimeNanoScalar, DateScalar, TimeSpanScalar, DateSpanScalar)):
return_type = DateTimeNano
# TJD change
if np.isscalar(other):
other = np.int64(other)
else:
other = FastArray(other, dtype=np.int64)
call_super = True
else:
if isinstance(other, (DateTimeNano, DateTimeNanoScalar)):
# ready to go
return_type = TimeSpan
if inplace:
raise TypeError(f'__sub__ returns TimeSpan. Cannot perform inplace.')
op = MATH_OPERATION.SUBDATETIMES
elif isinstance(other, (Date, DateScalar)):
return_type = TimeSpan
op = MATH_OPERATION.SUBDATETIMES
if inplace:
raise TypeError(f'__sub__ returns TimeSpan. Cannot perform inplace.')
# upcast Date
other = other.astype(np.int64) * NANOS_PER_DAY
elif isinstance(other, (TimeSpan, TimeSpanScalar)):
# apply our own mask during this track
return_type = DateTimeNano
# upcast TimeSpan to preserve precision
other = other.astype(np.int64)
call_super = True
elif isinstance(other, (DateSpan, DateSpanScalar)):
return_type = DateTimeNano
# need to get mask before upcasting
other_inv_mask = isnan(other)
other = other.astype(np.int64) * NANOS_PER_DAY
call_super = True
else:
# user fastarray operation
return_type = DateTimeNano
other = other.view(FastArray)
other = other.astype(np.int64, copy=False)
# op = MATH_OPERATION.SUBDATETIMESLEFT
call_super = True
if inplace:
funcname = '__isub__'
else:
funcname = '__sub__'
return self._build_mathops_result(other, funcname, call_super, other_inv_mask, inplace, op, return_type)
def __matmul__(self, other): raise NotImplementedError
# need to check properties to see if division is happening
#def __truediv__(self, other): raise NotImplementedError
#def __floordiv__(self, other): raise NotImplementedError
#def __mod__(self, other): raise NotImplementedError
#def __divmod__(self, other): raise NotImplementedError
def __pow__(self, other, modulo=None): raise NotImplementedError
def __lshift__(self, other): raise NotImplementedError
def __rshift__(self, other): raise NotImplementedError
def __and__(self, other): raise NotImplementedError
def __xor__(self, other): raise NotImplementedError
def __or__(self, other): raise NotImplementedError
def __rmul__(self, other): raise NotImplementedError
def __rmatmul__(self, other): raise NotImplementedError
def __rtruediv__(self, other): raise NotImplementedError
def __rfloordiv__(self, other): raise NotImplementedError
def __rmod__(self, other): raise NotImplementedError
def __rdivmod__(self, other): raise NotImplementedError
def __rpow__(self, other): raise NotImplementedError
def __rlshift__(self, other): raise NotImplementedError
def __rrshift__(self, other): raise NotImplementedError
def __rand__(self, other): raise NotImplementedError
def __rxor__(self, other): raise NotImplementedError
def __ror__(self, other): raise NotImplementedError
def __imul__(self, other): raise NotImplementedError
def __imatmul__(self, other): raise NotImplementedError
def __itruediv__(self, other): raise NotImplementedError
def __ifloordiv__(self, other): raise NotImplementedError
def __imod__(self, other): raise NotImplementedError
def __ipow__(self, other, modulo=None): raise NotImplementedError
def __ilshift__(self, other): raise NotImplementedError
def __irshift__(self, other): raise NotImplementedError
def __iand__(self, other): raise NotImplementedError
def __ixor__(self, other): raise NotImplementedError
def __ior__(self, other): raise NotImplementedError
def __neg__(self): raise NotImplementedError
def __pos__(self): raise NotImplementedError
def __invert__(self): raise NotImplementedError
def __complex__(self): raise NotImplementedError
def __int__(self): raise NotImplementedError
def __float__(self): raise NotImplementedError
def __round__(self, ndigits=0): raise NotImplementedError
def __trunc__(self): raise NotImplementedError
def __floor__(self): raise NotImplementedError
def __ceil__(self): raise NotImplementedError
# -------------------------------------------------------------
# ----raise error on certain math operations-------------------
# def __radd__(self, value):
# return self.__add__(value)
def __mul__(self, value):
return self._guard_math_op(value, '__mul__')
def __floordiv__(self, value):
return self._guard_math_op(value, '__floordiv__')
def __truediv__(self, value):
return self._guard_math_op(value, '__truediv__')
def __abs__(self):
raise TypeError(f"Cannot perform absolute value on DateTimeNano object.")
def _guard_math_op(self, value, op_name):
if isinstance(value, DateTimeBase):
raise TypeError(f"Cannot perform operation {op_name} between DateTimeNano and {type(value)}")
op = getattr(self._fa, op_name)
return op(value)
# -------------------------------------------------------------
@classmethod
def _random(cls, sz, to_tz='NYC', from_tz='NYC', inv=None, start=None, end=None):
'''
Internal routine for random(), random_invalid()
'''
if start is None:
start = NANOS_PER_YEAR
end = NANOS_PER_YEAR * 50
else:
start = (start - 1970) * NANOS_PER_YEAR
if end is None:
# maybe test if leap year?
end = start + NANOS_PER_YEAR
arr = np.random.randint(start, end, sz, dtype=np.int64)
if inv is not None:
putmask(arr, inv, 0)
return DateTimeNano(arr, to_tz=to_tz, from_tz=from_tz)
@classmethod
def random(cls, sz, to_tz='NYC', from_tz='NYC', inv=None, start=None, end=None):
'''
Returns a random DateTimeNano object.
Times will range from from 1971 -> 2021 unless `start` and `end` are specified.
Parameters
----------
sz : int
Length of generated array
to_tz : str, optional, default 'NYC'
Timezone string for display
from_tz : str, optional, default 'NYC'
Timezone string for timezone of origin
inv : bool array, optional, default None
An invalid mask True where invalid times should be inserted
start : int, optional, default None
Start year for range of random times. If no end year provided, all times will be within start year
end : int, optional, default None
End year for range of random times. Only used if `start` provided.
Examples
--------
>>> DateTimeNano.random(3)
DateTimeNano([19980912 15:31:08.025189457, 19931121 15:48:32.855425859, 19930915 14:58:31.376750294])
Returns
-------
obj:`DateTimeNano`
See Also
--------
DateTimeNano.random_invalid
'''
return cls._random(sz, to_tz=to_tz, from_tz=from_tz, inv=inv, start=start, end=end)
@classmethod
def random_invalid(cls, sz, to_tz='NYC', from_tz='NYC', start=None, end=None):
'''
Returns a random DateTimeNano object. Inserts invalids using a random boolean mask.
Times will range from from 1971 -> 2021 unless `start` and `end` are specified.
Parameters
----------
sz : int
Length of generated array
to_tz : str, optional, default 'NYC'
Timezone string for display
from_tz : str, optional, default 'NYC'
Timezone string for timezone of origin
start : int, optional, default None
Start year for range of random times. If no end year provided, all times will be within start year
end : int, optional, default None
End year for range of random times. Only used if `start` provided.
Same as DateTimeNano.random(), but random invalid mask is also generated.
Examples
--------
>>> DateTimeNano.random_invalid(3)
DateTimeNano([19920830 16:17:24.935335183, Inv, Inv])
Returns
-------
obj:`DateTimeNano`
See Also
--------
DateTimeNano.random
'''
inv = np.random.randint(0, 2, sz, dtype=np.bool)
return cls._random(sz, to_tz=to_tz, from_tz=from_tz, inv=inv, start=start, end=end)
# -------------------------------------------------------------
def resample(self, rule, dropna=False):
"""Convenience method for frequency conversion and resampling of
DateTimeNano arrays.
Parameters
----------
rule : string
The offset string or object representing target conversion.
Can also begin the string with a number e.g. '3H'
Currently supported:
H hour
T, min minute
S second
L, ms millisecond
U, us microsecond
N, ns nanosecond
dropna : bool, default False
If True, returns a DateTimeNano the same length as caller, with all values rounded to specified frequency.
If False, returns a DateTimeNano range from caller's min to max with values at every specified frequency.
Examples
--------
>>> dtn = DateTimeNano(['2015-04-15 14:26:54.735321368',
'2015-04-20 07:30:00.858219615',
'2015-04-23 13:15:24.526871083',
'2015-04-21 02:25:11.768548100',
'2015-04-24 07:47:54.737776979',
'2015-04-10 23:59:59.376589955'],
from_tz='UTC', to_tz='UTC')
>>> dtn.resample('L', dropna=True)
DateTimeNano(['20150415 14:26:54.735000000', '20150420 07:30:00.858000000', '20150423 13:15:24.526000000', '20150421 02:25:11.768000000', '20150424 07:47:54.737000000', '20150410 23:59:59.376000000'], to_tz='UTC')
>>> dtn = DateTimeNano(['20190417 17:47:00.000001',
'20190417 17:47:00.000003',
'20190417 17:47:00.000005'],
from_tz='NYC')
>>> dtn.resample('1us')
DateTimeNano(['20190417 17:47:00.000001000', '20190417 17:47:00.000002000', '20190417 17:47:00.000003000', '20190417 17:47:00.000004000', '20190417 17:47:00.000005000'], to_tz='NYC')
Returns
-------
dtn : `DateTimeNano`
"""
# -------------------------------------------------------
def parse_rule(rule):
# returns an integer or float amount and unit string
amount = None
for i, c in enumerate(rule):
if not c.isnumeric() and c != '.':
if i == 0:
amount = 1
else:
amount = rule[:i]
try:
amount = int(amount)
except:
amount = float(amount)
break
# never hit a string interval code
if amount is None:
raise ValueError(self._INVALID_FREQ_ERROR.format(rule))
unit = rule[i:].upper()
unit = self.FrequencyStrings.get(unit, None)
if unit is None:
raise ValueError(self._INVALID_FREQ_ERROR.format(rule))
return amount, unit
# -------------------------------------------------------
def get_time_unit(unit):
if unit in TimeSpan.unit_convert_factors:
unit = TimeSpan.unit_convert_factors[unit]
else:
raise NotImplementedError(f'Currently supports frequency strings {[*self.FrequencyStrings]}')
return unit
# -------------------------------------------------------
def time_interval(amount, unit):
# amount is a multiplier for the unit
# unit is a TimeSpan unit or for larger units, will be assigned separately to maintain precision
# TODO: check for nan times
# should these be included in any min/max calculation?
# TJD note this needs to be reviewed... min max should return a scalar not an array of 1
start = np.int64(self.min()[0])
end = np.int64(self.max()[0])
unit = get_time_unit(unit)
step = np.int64(amount * unit)
start = start - (start % step)
# should this include both ends?
end = (end - (end % step) + step)
stamps = arange(start, end, step=step)
interval = DateTimeNano(stamps, to_tz=self._timezone._to_tz)
return interval
# -------------------------------------------------------
def as_time_interval(amount, unit):
# returns a date time nano the same length as the original
# may have repeats, empty will not appear
unit = get_time_unit(unit)
step = np.int64(amount * unit)
timediff = self._fa % step
return self - timediff
# -------------------------------------------------------
if not isinstance(rule, str):
raise TypeError(f'Rule must be a string. Got {type(rule)}.')
amount, unit = parse_rule(rule)
if dropna:
resampled = as_time_interval(amount, unit)
else:
resampled = time_interval(amount, unit)
return resampled
# ========================================================
class TimeSpanBase:
"""
"""
ReduceFuncs = False
unit_convert_factors = {
'Y': NANOS_PER_YEAR,
'W': NANOS_PER_DAY * 7,
'D': NANOS_PER_DAY,
'h': NANOS_PER_HOUR,
'm': NANOS_PER_MINUTE,
's': NANOS_PER_SECOND,
'ms': NANOS_PER_MILLISECOND,
'us': NANOS_PER_MICROSECOND,
'ns': 1
}
# ------------------------------------------------------------
def get_classname(self):
return __class__.__name__
# ------------------------------------------------------------
def strftime(self, format, dtype='U'):
'''
Converts DateTimeNano to an array of object strings or a scalar string.
This routine has not been sped up yet.
Other Parameters
----------------
dtype: defaults to 'U', can change to 'S' or 'U'
Examples
--------
>>> rt.Date(rt.utcnow(4)).strftime('%D')
array(['11/04/19', '11/04/19', '11/04/19', '11/04/19'], dtype=object)
See Also
---------
http://strftime.org for format strings
datetime.datetime.strftime
'''
# get negative mask since strftime does not handle negative
isnegative = self._fa < 0
if isinstance(self, np.ndarray):
result= np.asarray([dt.utcfromtimestamp(timestamp).strftime(format) for timestamp in self._fa.abs() / 1_000_000_000.0], dtype=dtype)
if isnegative.sum() > 0:
if dtype == 'S':
negcol = zeros(result.shape, dtype='S1')
negcol[isnegative] = b'-'
else:
negcol = zeros(result.shape, dtype='U1')
negcol[isnegative] = '-'
result = negcol + result
else:
result = dt.strftime(dt.utcfromtimestamp(abs(self) / 1_000_000_000.0), format)
if isnegative:
# check dtype 'S'
if dtype == 'S':
result = b'-' + result
else:
result = '-' + result
return result
# ------------------------------------------------------------
# --------RETURN FLOAT ARRAY AT DIFFERENT RESOLUTIONS---------
@property
def days(self):
"""Timespan as float64 array of days.
Note
----
Loss of nanosecond precision at ~52 days.
"""
return self._fa / NANOS_PER_DAY
@property
def hours(self):
"""Timespan as float64 array of hours."""
return self._fa / NANOS_PER_HOUR
@property
def minutes(self):
"""Timespan as float64 array of minutes."""
return self._fa / NANOS_PER_MINUTE
@property
def seconds(self):
"""Timespan as float64 array of seconds."""
return self._fa / NANOS_PER_SECOND
@property
def milliseconds(self):
"""Timespan as float64 array of milliseconds."""
return self._fa / NANOS_PER_MILLISECOND
@property
def microseconds(self):
"""Timespan as float64 array of microseconds."""
return self._fa / NANOS_PER_MICROSECOND
@property
def nanoseconds(self):
"""Timespan as float64 array of nanoseconds (same as underlying array)."""
return self._fa
@property
def hhmmss(self):
"""Timespan as int64 array in format HHMMSS."""
SEC_PER_MIN = 60
hour, remainder = divmod(self.astype(np.int64) // NANOS_PER_SECOND, 3600)
minutes, seconds = divmod(remainder, SEC_PER_MIN)
return (10_000 * hour + 100 * minutes + seconds).astype(np.int64)
# ------------------------------------------------------------
@classmethod
def _unit_to_nano_span(cls, values, unit):
'''
:param values: FastArray from calling constructor
:param unit: unit string (see numpy's timedelta64 dtype)
'''
if isinstance(unit, bytes):
unit = unit.decode()
try:
mult = cls.unit_convert_factors[unit]
except:
raise ValueError(f"Cannot initialize span with {unit} units.")
if mult != 1:
values = values * mult
return values
# ------------------------------------------------------------
@staticmethod
def display_item(nanosecs, itemformat=None):
if itemformat is not None:
length = itemformat.length
else:
length = DisplayLength.Short
if length == DisplayLength.Medium:
return TimeSpan.display_item_unit(nanosecs)
else:
return TimeSpan.display_item_clock(nanosecs)
# ------------------------------------------------------------
@staticmethod
def display_item_unit(nanosecs):
'''
For each item, finds the highest unit to express it in amounts between 1 and 1000 or standard time measure.
e.g. 59.123m, 678.823ms, 30ns
'''
if np.isnan(nanosecs):
return "Inv"
# TODO add different formatting for large time spans (> 1 day)
divisor, unit_str = TimeSpan._display_resolution(nanosecs)
if divisor == 1:
delta = str(nanosecs)
else:
delta = nanosecs / divisor
delta = "{0:.3f}".format(delta)
return delta + unit_str
@staticmethod
def _display_resolution(nanosecs):
'''
Get extension and divisor for display_item_unit() (see above)
'''
nanosecs = abs(nanosecs)
divisor = NANOS_PER_HOUR
unit_str = 'h'
if nanosecs < 1_000:
divisor = 1
unit_str = 'ns'
elif nanosecs < 1_000_000:
divisor = 1_000
unit_str = 'us'
elif nanosecs < NANOS_PER_SECOND:
divisor = 1_000_000
unit_str = 'ms'
elif nanosecs < NANOS_PER_MINUTE:
divisor = NANOS_PER_SECOND
unit_str = 's'
elif nanosecs < NANOS_PER_HOUR:
divisor = NANOS_PER_MINUTE
unit_str = 'm'
# we should probably use a different format past this point
# maybe a formatting string with more info
# elif max_time < NANOS_PER_DAY:
# divisor = NANOS_PER_HOUR
# unit_str = 'h'
return divisor, unit_str
# ------------------------------------------------------------
@staticmethod
def display_item_clock(nanosecs):
'''
Long clock format (default) HH:MM:SS.<nano-decimal>
'''
format_str = "%H:%M:%S"
item = abs(nanosecs)
if isnan(item):
timestr = 'Inv'
else:
gmt_time = time.gmtime(item / NANOS_PER_SECOND)
timestr = DateTimeBase.DEFAULT_FORMATTER(format_str, gmt_time)
days = np.int64(item) // NANOS_PER_DAY
if days > 0:
timestr = str(days) + 'd ' + timestr
if nanosecs < 0:
timestr = "-" + timestr
timestr = DateTimeBase._add_nano_ext(item, timestr)
return timestr
# ------------------------------------------------------------
@staticmethod
def display_convert_func(nanosecs, itemformat: ItemFormat):
return TimeSpan.display_item(nanosecs, itemformat=itemformat)
# TODO uncomment when starfish is implemented and imported
# def _sf_display_query_properties(self):
# itemformat = sf.ItemFormat({'length':self.display_length,
# 'align':sf.DisplayAlign.Right})
# return itemformat, self.display_convert_func
# ------------------------------------------------------------
def display_query_properties(self):
# if TypeRegister.DisplayOptions.STARFISH:
# return self._sf_display_query_properties()
item_format = ItemFormat(
length=self.display_length,
justification=DisplayJustification.Right,
can_have_spaces=True,
decoration=None
)
convert_func = self.display_convert_func
return item_format, convert_func
# --BINARY OPERATIONS------------------------------------------
# -------------------------------------------------------------
def __add__(self, value):
other_inv_mask = None
# TimeSpan add
if not isinstance(value, np.ndarray):
value = FastArray(value).astype(np.float64)
else:
# DateTimeNano / Date will fix up this operation
if isinstance(value, (DateTimeNano, DateTimeNanoScalar, Date, DateScalar)):
return value.__add__(self)
elif isinstance(value, (DateSpan, DateSpanScalar)):
other_inv_mask = isnan(value)
value = value._fa * NANOS_PER_DAY
else:
other_inv_mask = isnan(value)
value = value.view(FastArray)
value = value.astype(np.float64, copy=False)
return self._fix_binary_ops(value, '__add__', other_inv_mask=other_inv_mask)
# -------------------------------------------------------------
def __radd__(self, value):
return self.__add__(value)
# -------------------------------------------------------------
def __sub__(self, value):
if isinstance(value, (DateTimeNano, DateTimeNanoScalar, Date, DateScalar)):
return value.__rsub__(self)
return self._fix_binary_ops(value, '__sub__')
# -------------------------------------------------------------
def __rsub__(self, value):
if not isinstance(value, np.ndarray):
value = FastArray(value).astype(np.float64)
else:
if isinstance(value, (DateTimeNano, DateTimeNanoScalar, Date, DateScalar)):
return value.__sub__(self)
elif isinstance(value, (DateSpan, DateSpanScalar)):
other_inv_mask = isnan(value)
value = value._fa * NANOS_PER_DAY
# interpret everything else as nanosecond timespan values
else:
other_inv_mask = isnan(value)
value = value.view(FastArray)
value = value.astype(np.float64, copy=False)
return self._fix_binary_ops(value, '__rsub__')
# -------------------------------------------------------------
def __mul__(self, value):
if isinstance(value, (TimeSpan, DateSpan, Date, DateTimeNano, TimeSpanScalar, DateSpanScalar, DateScalar, DateTimeNanoScalar)):
raise TypeError(f"Cannot multiply TimeSpan by {type(value)} object.")
if not isinstance(value, np.ndarray):
value = FastArray(value).astype(np.float64)
return self._fix_binary_ops(value, '__mul__')
# -------------------------------------------------------------
def __rmul__(self, other):
return self.__mul__(other)
# -------------------------------------------------------------
def __floordiv__(self, value):
if isinstance(value, (TimeSpan, TimeSpanScalar)):
result = self._fa.__floordiv__(value)
return result
else:
raise TypeError(f"Can only floor divide TimeSpan objects with other timespan objects not type {type(value)}.")
# -------------------------------------------------------------
def __truediv__(self, value):
# handle TimeSpan('00:30:00') / TimeSpan('01:00:00') with truediv
if isinstance(value, (TimeSpan, TimeSpanScalar)):
return self._fa.__truediv__(value)
return self._fix_binary_ops(value, '__truediv__')
# -------------------------------------------------------------
def _fix_binary_ops(self, value, op_name, other_inv_mask=None):
'''
Preserves invalids from integer arrays. If valid, wraps result fastarray in TimeSpan object.
'''
# print("binary", type(self), type(value), op_name)
if np.isscalar(self):
op = getattr(np.float64, op_name)
result = op(self, value)
else:
# get the array version
op = getattr(FastArray, op_name)
result = op(self, value)
if np.isscalar(result):
result = TimeSpanScalar(result)
elif isinstance(result, np.ndarray):
if other_inv_mask is None:
# this shouldn't get hit, test
if result.dtype.char in NumpyCharTypes.AllInteger:
inv_mask = value == INVALID_DICT[result.dtype.num]
result[inv_mask] = np.nan
else:
# possible nan fill
if len(other_inv_mask) == 1:
if isnan(other_inv_mask)[0]:
result = TimeSpan(full(len(self), np.nan, dtype=np.float64))
else:
result[other_inv_mask] = np.nan
result = TimeSpan(result)
return result
def __pow__(self, other, modulo=None): raise NotImplementedError
def __lshift__(self, other): raise NotImplementedError
def __rshift__(self, other): raise NotImplementedError
def __and__(self, other): raise NotImplementedError
def __xor__(self, other): raise NotImplementedError
def __or__(self, other): raise NotImplementedError
# def __rmul__(self, other): raise NotImplementedError
def __rmatmul__(self, other): raise NotImplementedError
def __rtruediv__(self, other): raise NotImplementedError
def __rfloordiv__(self, other): raise NotImplementedError
def __rmod__(self, other): raise NotImplementedError
def __rdivmod__(self, other): raise NotImplementedError
def __rpow__(self, other): raise NotImplementedError
def __rlshift__(self, other): raise NotImplementedError
def __rrshift__(self, other): raise NotImplementedError
def __rand__(self, other): raise NotImplementedError
def __rxor__(self, other): raise NotImplementedError
def __ror__(self, other): raise NotImplementedError
def __imul__(self, other): raise NotImplementedError
def __imatmul__(self, other): raise NotImplementedError
def __itruediv__(self, other): raise NotImplementedError
def __ifloordiv__(self, other): raise NotImplementedError
def __imod__(self, other): raise NotImplementedError
def __ipow__(self, other, modulo=None): raise NotImplementedError
def __ilshift__(self, other): raise NotImplementedError
def __irshift__(self, other): raise NotImplementedError
def __iand__(self, other): raise NotImplementedError
def __ixor__(self, other): raise NotImplementedError
def __ior__(self, other): raise NotImplementedError
# def __neg__(self): raise NotImplementedError
# def __pos__(self): raise NotImplementedError
# def __abs__(self): raise NotImplementedError
def __invert__(self): raise NotImplementedError
def __complex__(self): raise NotImplementedError
def __int__(self): raise NotImplementedError
# def __float__(self): raise NotImplementedError
def __round__(self, ndigits=0): raise NotImplementedError
def __trunc__(self): raise NotImplementedError
def __floor__(self): raise NotImplementedError
def __ceil__(self): raise NotImplementedError
#--UNARY OPERATIONS-------------------------------------------
#-------------------------------------------------------------
def __abs__(self): return self._unary_ufunc_builder('__abs__')
def __neg__(self): return self._unary_ufunc_builder('__neg__')
def __pos__(self): return self._unary_ufunc_builder('__pos__')
def abs(self): return self.__abs__()
def _unary_ufunc_builder(self, op_name):
if np.isscalar(self):
func = getattr(np.float64, op_name)
return TimeSpanScalar(func(self))
else:
# call the fastarray version of the function
return TimeSpan(getattr(self._fa, op_name)())
# ------------------------------------------------------------
@classmethod
def _reduce_func_builder(cls):
'''
Generates all reduce functions - which return a single value (in nanoseconds).
The value will be flipped to float64 (we don't need higher precision than nanoseconds), and put in a
new TimeSpan.
'''
for name in ['sum', 'mean', 'std', 'var', 'min', 'max', 'median',
'nansum', 'nanmean', 'nanstd', 'nanvar', 'nanmin', 'nanmax', 'nanmedian']:
func_string = []
func_string.append("def " + name + "(self, **kwargs):")
func_string.append(" r = self._fa." + name + "()")
func_string.append(" r = FastArray(r, dtype=np.float64)")
func_string.append(" return TimeSpan(r)")
func_string.append("setattr(cls, '" + name + "', " + name + ")")
exec("\n".join(func_string))
# ------------------------------------------------------------
# -------------------------------------------------------------
def _timespan_compare_check(self, funcname, other):
func = getattr(self._fa, funcname)
if isinstance(other, (str, bytes)):
other = TimeSpan(other)[0]
if isinstance(other, (DateTimeNano, Date)):
raise TypeError(f'Cannot compare TimeSpan with {type(other)}')
# upcast DateSpan to nanoseconds
elif isinstance(other, DateSpan):
other = (other._fa * NANOS_PER_DAY).astype(np.float64)
# let everything else fall through to fast array
result = func(other)
# invalid will automatically be handled because TimeSpan is float
return result
# -------------------COMPARISONS------------------------------
# ------------------------------------------------------------
def __ne__(self, other):
return self._timespan_compare_check('__ne__', other)
def __eq__(self, other):
return self._timespan_compare_check('__eq__', other)
def __ge__(self, other):
return self._timespan_compare_check('__ge__', other)
def __gt__(self, other):
return self._timespan_compare_check('__gt__', other)
def __le__(self, other):
return self._timespan_compare_check('__le__', other)
def __lt__(self, other):
return self._timespan_compare_check('__lt__', other)
# ========================================================
class TimeSpan(TimeSpanBase, DateTimeBase):
"""Array of time delta in nanoseconds, held in float64.
Parameters:
-----------
values : numeric or string array or scalar
If string, interpreted as HH:MM:SS.ffffff ( seconds/second fractions optional )
If numeric, interpreted as nanoseconds, unless `unit` provided.
single number or array / list of numbers (unless unit is specified, will assume nanoseconds)
unit : str, optional, default 'ns'
Precision of data in the constructor. All will be converted to nanoseconds.
Valid units: 'Y', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'
Examples
---------
From single string:
>>> dts = TimeSpan('12:34')
>>> dts
TimeSpan([12:34:00.000000000])
From milliseconds since midnight:
>>> dts = TimeSpan(FA([34500000., 36500000., 38500000.,]), unit='ms')
>>> dts
TimeSpan([09:35:00.000000000, 10:08:20.000000000, 10:41:40.000000000])
From the result of DateTimeNano subtraction:
>>> dtn1 = DateTimeNano(['2018-01-01 09:35:00'], from_tz='NYC')
>>> dtn2 = DateTimeNano(['2018-01-01 07:15:00'], from_tz='NYC')
>>> dtn1 - dtn2
TimeSpan([02:20:00.000000000])
Certain DateTimeNano properties can return a TimeSpan:
>>> dtn = DateTimeNano(['2018-01-01 09:35:00'], from_tz='NYC')
>>> dtn.hour_span
TimeSpan([09:35:00.000000000])
Can be added to DateTimeNano objects:
>>> dtn = DateTimeNano(['2018-01-01 09:35:00'], from_tz='NYC')
>>> ts = TimeSpan(FA([8400000000000.0]))
>>> dtn + ts
DateTimeNano([20180101 11:55:00.000000000])
Can be multiplied / divided by scalars:
>>> ts = TimeSpan(FA([8400000000000.0]))
>>> ts
TimeSpan([02:20:00.000000000])
>>> ts / 2
TimeSpan([01:10:00.000000000])
>>> ts * 5.6
TimeSpan([13:04:00.000000000])
"""
# ------------------------------------------------------------
def __new__(cls, values, unit=None):
# handle all input as array, scalars -> array of one item
if not isinstance(values, np.ndarray):
values = FastArray(values)
# strings must be in format HH:MM / HH:MM:SS / HH:MM:SS.ffffff
if values.dtype.char in 'US':
# send to wrapper for strptime
return timestring_to_nano(values)
# init class math funcs
if cls.ReduceFuncs is False:
cls._reduce_func_builder()
cls.ReduceFuncs = True
# handle all others as numeric
instance = values.astype(np.float64, copy=False)
if unit is not None:
instance = cls._unit_to_nano_span(instance, unit)
# wrap in class
instance = instance.view(cls)
instance._display_length = DisplayLength.Short
return instance
# ------------------------------------------------------------
def get_classname(self):
return __class__.__name__
# ------------------------------------------------------------
def get_scalar(self, scalarval):
return TimeSpanScalar(scalarval, _from=self)
# ------------------------------------------------------------
@classmethod
def newclassfrominstance(cls, instance, origin):
result = instance.view(cls)
result._display_length = origin.display_length
return result
# ------------------------------------------------------------
@classmethod
def hstack(cls, tspans):
'''
TODO: maybe add type checking?
This is a very simple class, rewrap the hstack result in class.
'''
return hstack_any(tspans, cls, TimeSpan)
# ------------------------------------------------------------
def fill_invalid(self, shape=None, dtype=None, inplace=True):
arr = self._fill_invalid_internal(shape=shape, dtype=self.dtype, inplace=inplace)
if arr is None:
return
return TimeSpan(arr)
@classmethod
def _from_meta_data(cls, arrdict, arrflags, meta):
if not isinstance(meta, MetaData):
meta = MetaData(meta)
version = meta.get('version', 0)
default_meta = meta_from_version(cls, version)
# combine saved attributes with defaults based on version number
vars = meta['instance_vars']
for k, v in default_meta.items():
meta.setdefault(k, v)
for k, v in default_meta['instance_vars'].items():
vars.setdefault(k, v)
instance = [*arrdict.values()][0]
instance = TimeSpan(instance)
# restore all instance variables
vars = meta['instance_vars']
for name, value in vars.items():
setattr(instance, name, value)
return instance
def _meta_dict(self, name=None):
classname = self.__class__.__name__
if name is None:
name = classname
metadict = {
'name': name,
'typeid': getattr(TypeId, classname),
'classname': classname,
'ncols': 0,
'version': META_VERSION,
'author': 'python',
'instance_vars': {
'_display_length': self.display_length
},
'_base_is_stackable': SDSFlag.Stackable
}
return metadict
# ------------------------------------------------------------
@classmethod
def _load_from_sds_meta_data(cls, name, arr, cols, meta, tups: Optional[list] = None):
'''
Load DateTimeNano from an SDS file as the correct class.
Restore formatting if different than default.
'''
if tups is None:
tups = list()
if not isinstance(meta, MetaData):
meta = MetaData(meta)
version = meta.get('version', 0)
default_meta = meta_from_version(cls, version)
# combine saved attributes with defaults based on version number
vars = meta['instance_vars']
for k, v in default_meta.items():
meta.setdefault(k, v)
for k, v in default_meta['instance_vars'].items():
vars.setdefault(k, v)
instance = TimeSpan(arr)
# restore all instance variables
vars = meta['instance_vars']
for name, value in vars.items():
setattr(instance, name, value)
return instance
# ==========================================================
# Scalars
# ==========================================================
class DateScalar(np.int32):
'''
Derived from np.int32
days since unix epoch in 1970
TODO: need to inherit math functions
'''
__slots__ = '_display_length'
# ------------------------------------------------------------
def __new__(cls, arr, **kwargs):
return super().__new__(cls, arr)
# ------------------------------------------------------------
def __init__(*args, **kwargs):
self = args[0]
_from = kwargs.get('_from', None)
if _from is not None and hasattr(_from, '_display_length'):
self._display_length = _from._display_length
else:
self._display_length = DisplayLength.Long
def get_item_format(self):
item_format = ItemFormat(
length=self._display_length,
justification=DisplayJustification.Right,
can_have_spaces=True,
decoration=None,
)
return item_format
# ------------------------------------------------------------
@property
def _fa(self):
return self
# ------------------------------------------------------------
def get_classname(self):
return __class__.__name__
def __repr__(self):
itemformat = self.get_item_format()
return Date.format_date_num(self._np, itemformat)
def __str__(self):
itemformat = self.get_item_format()
return Date.format_date_num(self._np, itemformat)
# ------------------------------------------------------------
def strftime(self, format, dtype='O'):
'''
Converts Date to an array of object strings or a scalar string.
This routine has not been sped up yet.
Other Parameters
----------------
dtype: defaults to 'O', can change to 'S' or 'U'
Examples
--------
>>> rt.Date(rt.utcnow(4))[0].strftime('%D')
'11/04/19'
See Also
---------
http://strftime.org for format strings
datetime.datetime.strftime
'''
return dt.strftime(dt.utcfromtimestamp(self * SECONDS_PER_DAY), format)
# ------------------------------------------------------------
@property
def _np(self):
return self.view(np.int32)
# used in adding a scalar to a Dataset
def repeat(self, repeats, axis=None):
return Date(self._np.repeat(repeats, axis=axis))
def tile(self, repeats):
return Date(self._np.tile(repeats))
# ==========================================================
class DateSpanScalar(np.int32):
'''
Derived from np.int32
Number of days between two dates
'''
__slots__ = '_display_length'
# ------------------------------------------------------------
def __new__(cls, arr, **kwargs):
return super().__new__(cls, arr)
# ------------------------------------------------------------
def __init__(*args, **kwargs):
self = args[0]
_from = kwargs.get('_from', None)
if _from is not None:
self._display_length = _from._display_length
else:
self._display_length = DisplayLength.Long
def get_item_format(self):
item_format = ItemFormat(
length=self._display_length,
justification=DisplayJustification.Right,
can_have_spaces=True,
decoration=None
)
return item_format
# ------------------------------------------------------------
def get_classname(self):
return __class__.__name__
def __repr__(self):
itemformat = self.get_item_format()
return DateSpan.format_date_span(self._np, itemformat)
def __str__(self):
itemformat = self.get_item_format()
return DateSpan.format_date_span(self._np, itemformat)
# ------------------------------------------------------------
@property
def _np(self):
return self.view(np.int32)
# ------------------------------------------------------------
@property
def _fa(self):
return self.view(np.int32)
# used in adding a scalar to a Dataset
def repeat(self, repeats, axis=None):
return DateSpan(self._np.repeat(repeats, axis=axis))
def tile(self, repeats):
return DateSpan(self._np.tile(repeats))
# ==========================================================
class DateTimeNanoScalar(np.int64, DateTimeCommon, TimeStampBase):
'''
Derived from np.int64
NOTE: np.int64 is a SLOT wrapper and does not have a __dict__
Number of nanoseconds since unix epoch 1970 in UTC
'''
__slots__ = '_display_length', '_timezone'
# ------------------------------------------------------------
def __new__(cls, arr, **kwargs):
return super().__new__(cls, arr)
# ------------------------------------------------------------
def __init__(*args, **kwargs):
# This needs more work, especially when init with a string
self = args[0]
_from = kwargs.get('_from', None)
if _from is not None and hasattr(_from, '_timezone'):
self._timezone = _from._timezone
else:
to_tz = kwargs.get('to_tz', None)
from_tz = kwargs.get('from_tz', None)
if from_tz is None:
from_tz = 'UTC'
if isinstance(self, TypeRegister.Date):
if to_tz is None:
to_tz = 'UTC'
# will automatically flip to int64, send through as nanosecond integer array
self = np.int64(self) * NANOS_PER_DAY
else:
if to_tz is None:
to_tz = 'NYC'
# create a timezone object to handle daylight savings, any necessary conversion, etc.
_timezone = TypeRegister.TimeZone(from_tz=from_tz, to_tz=to_tz)
self._timezone = _timezone
self._display_length = DisplayLength.Long
if _from is not None and hasattr(_from, '_display_length'):
self._display_length = _from._display_length
def get_item_format(self):
item_format = ItemFormat(
length=self._display_length,
justification=DisplayJustification.Right,
can_have_spaces=True,
decoration=None,
timezone_str=self._timezone._timezone_str
)
return item_format
# ------------------------------------------------------------
def isnan(self):
return self <= 0
# ------------------------------------------------------------
@property
def _np(self):
return self.view(np.int64)
# ------------------------------------------------------------
@property
def _fa(self):
return self.view(np.int64)
# ------------------------------------------------------------
def get_classname(self):
return __class__.__name__
# ------------------------------------------------------------
def __repr__(self):
itemformat = self.get_item_format()
# return DateTimeNano.format_nano_time(self._np, itemformat)
return f"{self.get_classname()}('{DateTimeNano.format_nano_time(self._np, itemformat)}')"
def __str__(self):
itemformat = self.get_item_format()
return DateTimeNano.format_nano_time(self._np, itemformat)
# --BINARY OPERATIONS------------------------------------------
# -------------------------------------------------------------
def __add__(self, value):
# reroute this back to the nonscalar
return DateTimeNano.__add__(self, value)
def __sub__(self, value):
# reroute this back to the nonscalar
return DateTimeNano.__sub__(self, value)
# used in adding a scalar to a Dataset
def repeat(self, repeats, axis=None):
return DateTimeNano(self._np.repeat(repeats, axis=axis), to_tz=self._timezone._to_tz, from_tz=self._timezone._from_tz)
def tile(self, repeats):
return DateTimeNano(self._np.tile(repeats), to_tz=self._timezone._to_tz, from_tz=self._timezone._from_tz)
# ==========================================================
class TimeSpanScalar(np.float64, TimeSpanBase):
'''
Derived from np.float64
************ not implemented
Holds single float values for TimeSpan arrays.
These will be returned from operations that currently return a TimeSpan of a single item.
'''
__slots__ = '_display_length'
# ------------------------------------------------------------
def __new__(cls, arr, **kwargs):
return super().__new__(cls, arr)
def __new__(cls, scalar, **kwargs):
# strings must be in format HH:MM / HH:MM:SS / HH:MM:SS.ffffff
if isinstance(scalar, (str, bytes, np.bytes_, np.str_)):
# send to wrapper for strptime
scalar = timestring_to_nano(np.asarray([scalar]))[0]
return super(TimeSpanScalar, cls).__new__(cls, scalar, **kwargs)
def __init__(*args, **kwargs):
self = args[0]
_from = kwargs.get('_from', None)
# TimeSpan has no timezone
if _from is not None:
self._display_length = _from._display_length
else:
self._display_length = DisplayLength.Long
def get_item_format(self):
item_format = ItemFormat(
length=self._display_length,
justification=DisplayJustification.Right,
can_have_spaces=True,
decoration=None
)
return item_format
# ------------------------------------------------------------
@property
def _fa(self):
# must go to numpy or it will flip back to an array
return self.view(np.float64)
# ------------------------------------------------------------
@property
def _np(self):
return self.view(np.float64)
# ------------------------------------------------------------
def get_classname(self):
return __class__.__name__
# ------------------------------------------------------------
def __repr__(self):
itemformat = self.get_item_format()
return f"{self.get_classname()}('{TimeSpan.display_item_clock(self._np)}')"
def __str__(self):
itemformat = self.get_item_format()
return TimeSpan.display_item_clock(self._np)
# because np.float64 is first, it hooks these before TimeSpanBase
def __abs__(self): return self._unary_ufunc_builder('__abs__')
def __neg__(self): return self._unary_ufunc_builder('__neg__')
def __pos__(self): return self._unary_ufunc_builder('__pos__')
def abs(self): return self.__abs__()
#--BINARY OPERATIONS------------------------------------------
#-------------------------------------------------------------
def __add__(self, value): return TimeSpanBase.__add__(self, value)
def __radd__(self, value): return TimeSpanBase.__radd__(self, value)
def __sub__(self, value): return TimeSpanBase.__sub__(self, value)
def __rsub__(self, value): return TimeSpanBase.__rsub__(self, value)
def __mul__(self, value): return TimeSpanBase.__mul__(self, value)
def __rmul__(self, other): return TimeSpanBase.__rmul__(self, value)
def __floordiv__(self, value): return TimeSpanBase.__floordiv__(self, value)
def __truediv__(self, value): return TimeSpanBase.__truediv__(self, value)
def __eq__(self, other):
return self._timespan_compare_check('__eq__', other)
# used in adding a scalar to a Dataset
def repeat(self, repeats, axis=None):
return TimeSpan(self._np.repeat(repeats, axis=axis))
def tile(self, repeats):
return TimeSpan(self._np.tile(repeats))
# -----------------------------------------------------
# keep this at end of file
TypeRegister.DateTimeBase = DateTimeBase
TypeRegister.DateTimeNano = DateTimeNano
TypeRegister.TimeSpan = TimeSpan
TypeRegister.DateBase = DateBase
TypeRegister.Date = Date
TypeRegister.DateSpan = DateSpan
TypeRegister.DateTimeNanoScalar = DateTimeNanoScalar
TypeRegister.TimeSpanScalar = TimeSpanScalar
TypeRegister.DateScalar = DateScalar
TypeRegister.DateSpanScalar = DateSpanScalar
```
#### File: riptable/riptable/rt_groupbynumba.py
```python
__all__ = ['GroupbyNumba' ]
import numpy as np
import numba as nb
from .rt_fastarray import FastArray
from .rt_numpy import empty_like, empty
from .rt_enum import TypeRegister, GB_FUNC_NUMBA, GB_PACKUNPACK, GB_FUNCTIONS, INVALID_DICT
from .rt_groupbyops import GroupByOps
# NOTE YOU MUST INSTALL tbb
# conda install tbb
# to confirm...
# >>> from numba import threading_layer()
# >>> threading_layer()
# >>> 'tbb'
#
# See Table at end
#-------------------------------------------------------------------------------------------------
@nb.jit(nopython=True, cache=True)
def build_core_list(cores, unique_rows, binLow, binHigh):
dividend = unique_rows // cores
remainder = unique_rows % cores
high =0
low =0
for i in range(cores):
# Calculate band range
high = low + dividend
# add in any remainder until nothing left
if (remainder > 0):
high+=1
remainder-=1
binLow[i] = low
binHigh[i] = high
# next low bin is the previous high bin
low = high
class GroupbyNumba(GroupByOps):
# how many cores to cap the computation at
# NOTE: this is not how many cores the system has but a number where
# we believe thrashing takes place. This number could be dynamic per algo in the future.
CORE_COUNT = 12
#-------------------------------------------------------------------------------------------------
def _nb_groupbycalculateall(
values,
ikey,
unique_rows,
funcList,
binLowList,
binHighList,
func_param):
results= []
unique_rows += 1
corecount = min (GroupbyNumba.CORE_COUNT, unique_rows)
binLow = np.empty(corecount, dtype=np.int32)
binHigh = np.empty(corecount, dtype=np.int32)
build_core_list(corecount, unique_rows, binLow, binHigh)
for funcnum, inputdata in zip(funcList, values):
nbrev = NUMBA_REVERSE_TABLE[funcnum]
# lookup function to call
nbfunc = nbrev['func_back']
# lookup return dtype requested
dtypefunc = nbrev['func_dtype']
if dtypefunc is None:
# output is same dtype as input
dtype = inputdata.dtype
else:
dtype = dtypefunc(inputdata.dtype)
# allocate for numba
ret = empty( unique_rows, dtype=dtype)
nbfunc(ikey, unique_rows, binLow, binHigh, inputdata, ret, *func_param)
results.append(ret)
return results
#-------------------------------------------------------------------------------------------------
# This routine is called before the numba routines
def _nb_groupbycalculateallpack(
values, # list of arrays (the data to be calculated)
ikey, # bin numbers (integer array)
iGroup, # used to go over
iFirstGroup,
nCountGroup,
unique_rows, # often the same as len(iFirstGroup)
funcList, # support aggregation
binLowList, # start bin to work on for prange
binHighList, # high bin to work on for prange
func_param): # parameters
results= []
# TODO: add enumerate here
for funcnum, inputdata in zip(funcList, values):
nbrev = NUMBA_REVERSE_TABLE[funcnum]
# lookup function to call
nbfunc = nbrev['func_back']
# lookup return dtype requested
dtypefunc = nbrev['func_dtype']
if dtypefunc is None:
# output is same dtype as input
dtype = inputdata.dtype
else:
dtype = dtypefunc(inputdata.dtype)
# allocate for numba
ret = empty( len(inputdata), dtype=dtype)
#print("sending data", inputdata)
#print("binlow", binLowList[0])
nbfunc(iGroup, iFirstGroup, nCountGroup, binLowList[0], binHighList[0], inputdata, ret, *func_param)
results.append(ret)
return results
#-------------------------------------------------------------------------------------------------
@nb.jit(parallel=True, nopython=True, cache=True)
def _numbasum(ikey, unique_rows, binLow, binHigh, data, ret):
datacount = len(ikey)
# binLow and binHigh are arrays (same length)
# they divide up the work for prange while also allowing selective group filtering
for core in nb.prange(len(binLow)):
binlow = binLow[core]
binhigh = binHigh[core]
# zero out summation counters before we begin
for i in range(binlow, binhigh):
ret[i] = 0
# concurrently loop over all the data
for index in range(datacount):
grpIdx = ikey[index]
# make sure assigned to our range (concurrency issue)
if grpIdx >= binlow and grpIdx < binhigh:
ret[grpIdx] += data[index]
#-------------------------------------------------------------------------------------------------
@nb.jit(parallel=True, cache=True)
def _numbamin(ikey, unique_rows, binLow, binHigh, data, ret):
inv = INVALID_DICT[ret.dtype.num]
datacount = len(ikey)
for core in nb.prange(len(binLow)):
binlow = binLow[core]
binhigh = binHigh[core]
# mark all initial values as invalid we begin
for i in range(binlow, binhigh):
ret[i] = inv
# concurrently loop over all the data
for index in range(datacount):
grpIdx = ikey[index]
# make sure assigned to our range (concurrency issue)
if grpIdx >= binlow and grpIdx < binhigh:
val = data[index]
# set the min, use not >= to handle nan comparison
if ret[grpIdx] == inv or not val >= ret[grpIdx]:
ret[grpIdx] = val
#-------------------------------------------------------------------------------------------------
@nb.jit(parallel=True, nopython=True, cache=True)
def _numbaEMA(iGroup, iFirstGroup, nCountGroup, binLow, binHigh, data, ret, time, decayRate):
for grpIdx in nb.prange(binLow, binHigh):
start = iFirstGroup[grpIdx]
last = start + nCountGroup[grpIdx]
# init per group data
lastEma = 0.0
lastTime = time[iGroup[start]]
for index in range(start, last):
rowIdx=iGroup[index]
# ema calculation
timeDelta = time[rowIdx]-lastTime
lastTime = time[rowIdx]
lastEma = data[rowIdx] + lastEma * np.exp(-decayRate * timeDelta)
# store the return result
ret[rowIdx]=lastEma
#-------------------------------------------------------------------------------------------------#
@nb.njit(parallel=True)
def _numbaEMA2(iGroup, iFirstGroup, nCountGroup, data, ret, time, decayRate):
'''
For each group defined by the grouping arguments, sets 'ret' to a true EMA of the 'data'
argument using the time argument as the time and the 'decayRate' as the decay rate.
Arguments:
iGroup, iFirstGroup, nCountGroup: from a groupby object's 'get_groupings' method
data: the original data to be opperated on
ret: a blank array the same size as 'data' which will return the processed data
time: a list of times associated to the rows of data
decayRate: the decay rate (e based)
TODO: Error checking.
'''
for grpIdx in nb.prange(1, iFirstGroup.shape[0]):
startIdx = iFirstGroup[grpIdx]
nInGrp = nCountGroup[grpIdx]
endIdx = startIdx + nInGrp
rowIdx = iGroup[startIdx : endIdx]
if nInGrp > 0:
rows = data[ rowIdx ]
times = time[ rowIdx ]
totalWeight = 0.0
totalValues = 0.0
pEMA = np.nan
pTime = times[0]
for (idx, (t, v)) in enumerate(zip(times, rows)):
if not np.isnan(v):
deltaT = t - pTime
decay = np.exp(-decayRate*deltaT)
totalWeight = totalWeight*decay + 1
totalValues = totalValues*decay + v
pTime = t
pEMA = totalValues / totalWeight
rows[idx] = pEMA
ret[rowIdx] = rows
return
### Trim (an example which returns a dataset the same size as the original) ###
#-------------------------------------------------------------------------------------------------#
@nb.njit(parallel=True)
def _numbaTrim(iGroup, iFirstGroup, nCountGroup, data, ret, x, y):
'''
For each group defined by the grouping arguments, sets 'ret' to be a copy of the 'data'
with elements below the 'x'th percentile or above the 'y'th percentile of the group set to nan.
Arguments:
iGroup, iFirstGroup, nCountGroup: from a groupby object's 'get_groupings' method
data: the original data to be opperated on
ret: a blank array the same size as 'data' which will return the processed data
x: the lower percentile bound
y: the upper percentile bound
'''
for grpIdx in nb.prange(1, iFirstGroup.shape[0]):
startIdx = iFirstGroup[grpIdx]
endIdx = startIdx + nCountGroup[grpIdx]
rowIdx = iGroup[startIdx : endIdx]
rows = data[ rowIdx ]
(a, b) = np.nanpercentile(rows, [x,y])
mask = (rows <= a) | (rows >= b)
rows[mask] = np.nan
ret[ rowIdx ] = rows
return
def grpTrim(grp, x, y):
'''
For each column, for each group, determine the x'th and y'th percentile of the data
and set data below the x'th percentile or above the y'th percentile to nan.
Arguments:
grp: a groupby object
x: lower percentile
y: uppper percentile
Return: a dataset with the values outside the given percentiles set to np.nan
TODO: Test column types to make sure that the numba code will work nicely
'''
g = grp.get_groupings()
iGroup = g['iGroup']
iFirstGroup = g['iFirstGroup']
nCountGroup = g['nCountGroup']
#retData = rt.Dataset(tmp.grp.gbkeys)
retData = grp._dataset[ list(grp.gbkeys.keys()) ]
for colName in grp._dataset:
if colName not in grp.gbkeys.keys():
ret = empty( grp._dataset.shape[0] )
grp._numbaTrim(iGroup, iFirstGroup, nCountGroup, grp._dataset[colName], ret, x, y)
retData[colName] = ret
return retData
#-------------------------------------------------------------------------------------------------#
# FillForward
#-------------------------------------------------------------------------------------------------#
@nb.njit(parallel=True)
def _numbaFillForward(iGroup, iFirstGroup, nCountGroup, data, ret):
'''
propogate forward non-NaN values within a group, overwriting NaN values.
TODO: better documentation
'''
for grpIdx in nb.prange(1, iFirstGroup.shape[0]):
startIdx = iFirstGroup[grpIdx]
endIdx = startIdx + nCountGroup[grpIdx]
rowIdx = iGroup[startIdx : endIdx]
rows = data[rowIdx]
fill = np.nan
for idx in range(rows.shape[0]):
if np.isnan(rows[idx]):
rows[idx] = fill
else:
fill = rows[idx]
ret[rowIdx] = rows
return
@nb.njit(parallel=True)
def _numbaFillBackward(iGroup, iFirstGroup, nCountGroup, data, ret):
'''
propogate backward non-NaN values within a group, overwriting NaN values.
TODO: better documentation
'''
for grpIdx in nb.prange(1, iFirstGroup.shape[0]):
startIdx = iFirstGroup[grpIdx]
endIdx = startIdx + nCountGroup[grpIdx]
rowIdx = iGroup[startIdx : endIdx]
rows = data[ rowIdx ]
fill = np.nan
for idx in range(rows.shape[0]):
if np.isnan(rows[-idx-1]):
rows[-idx-1] = fill
else:
fill = rows[-idx-1]
ret[rowIdx] = rows
return
def grpFillForward(grp):
'''
propogate forward non-NaN values within a group, overwriting NaN values.
TODO: better documentation
'''
g = grp.get_groupings()
iGroup = g['iGroup']
iFirstGroup = g['iFirstGroup']
nCountGroup = g['nCountGroup']
#retData = rt.Dataset(tmp.grp.gbkeys)
retData = grp._dataset[ list(grp.gbkeys.keys()) ]
for colName in grp._dataset:
if colName not in grp.gbkeys.keys():
ret = empty( grp._dataset.shape[0] )
grp._numbaFillForward(iGroup, iFirstGroup, nCountGroup, grp._dataset[colName], ret)
retData[colName] = ret
return retData
def grpFillBackward(grp):
'''
propogate backward non-NaN values within a group, overwriting NaN values.
TODO: better documentation
'''
g = grp.get_groupings()
iGroup = g['iGroup']
iFirstGroup = g['iFirstGroup']
nCountGroup = g['nCountGroup']
#retData = rt.Dataset(tmp.grp.gbkeys)
retData = grp._dataset[ list(grp.gbkeys.keys()) ]
for colName in grp._dataset:
if colName not in grp.gbkeys.keys():
ret = empty( grp._dataset.shape[0] )
grp._numbaFillBackward(iGroup, iFirstGroup, nCountGroup, grp._dataset[colName], ret)
retData[colName] = ret
return retData
def grpFillForwardBackward(grp):
'''
propogate forward, then backward, non-NaN values within a group, overwriting NaN values.
TODO: better documentation
'''
g = grp.get_groupings()
iGroup = g['iGroup']
iFirstGroup = g['iFirstGroup']
nCountGroup = g['nCountGroup']
#retData = rt.Dataset(tmp.grp.gbkeys)
retData = grp._dataset[ list(grp.gbkeys.keys()) ]
for colName in grp._dataset:
if colName not in grp.gbkeys.keys():
forwardFilled = empty( grp._dataset.shape[0] )
grp._numbaFillForward(iGroup, iFirstGroup, nCountGroup, grp._dataset[colName], forwardFilled)
ret = empty( grp._dataset.shape[0] )
grp._numbaFillBackward(iGroup, iFirstGroup, nCountGroup, forwardFilled, ret)
retData[colName] = ret
return retData
#---------------------------------------------------------------
def nb_ema(self, *args, time=None, decay_rate = None, **kwargs):
'''
Other Parameters
----------------
time: an array of times (often in nanoseconds) associated to the rows of data
decayRate: the scalar decay rate (e based)
'''
if time is None:
raise KeyError("time cannot be none")
if len(time) != self._dataset.shape[0]:
raise TypeError(f"time array must be the same size as the dataset")
if decay_rate is None:
raise KeyError("decay_rate cannot be none")
if not np.isscalar(decay_rate):
raise TypeError(f"decay_rate must be a scalar not type {type(decay_rate)}")
# Lookup our function to get a function_number
return self._calculate_all(NUMBA_REVERSE_FUNC[GroupbyNumba.nb_ema], *args, func_param=(time, decay_rate), **kwargs)
#---------------------------------------------------------------
def nb_sum_punt_test(self, *args, **kwargs):
"""Compute sum of group"""
return self._calculate_all(GB_FUNCTIONS.GB_SUM, *args, **kwargs)
#---------------------------------------------------------------
def nb_sum(self, *args, **kwargs):
"""Compute sum of group"""
return self._calculate_all(NUMBA_REVERSE_FUNC[GroupbyNumba.nb_sum], *args, **kwargs)
#---------------------------------------------------------------
def nb_min(self, *args, **kwargs):
"""Compute sum of group"""
return self._calculate_all(NUMBA_REVERSE_FUNC[GroupbyNumba.nb_min], *args, **kwargs)
#----------------------------------------------------
# add more routines here to determine the output dtype from the input dtype
def NUMBA_DTYPE_FLOATS(dtype):
if isinstance(dtype, np.float64):
return np.float64
return np.float32
def NUMBA_DTYPE_SUM(dtype):
#upcast most ints to int64
if isinstance(dtype, np.uint64):
return np.uint64
if dtype.num <=10:
return np.int64
return dtype
CALC_PACK = GroupbyNumba._nb_groupbycalculateallpack
CALC_UNPACK = GroupbyNumba._nb_groupbycalculateall
#------------------------------------------------------------
# name: (basic/packing, func_frontend, func_backend, gb_function, dtype, return_full True/False)
# ------ ------------------- ------------------------ ------------------ ----------- ---------------- ------------------
NUMBA_GB_TABLE= {
'nb_ema' : (GB_PACKUNPACK.PACK, GroupbyNumba.nb_ema, GroupbyNumba._numbaEMA, CALC_PACK, NUMBA_DTYPE_FLOATS, True),
'nb_sum' : (GB_PACKUNPACK.UNPACK, GroupbyNumba.nb_sum, GroupbyNumba._numbasum, CALC_UNPACK, NUMBA_DTYPE_SUM, False),
'nb_min' : (GB_PACKUNPACK.UNPACK, GroupbyNumba.nb_min, GroupbyNumba._numbamin, CALC_UNPACK, None, False),
'nb_sum_punt' :(GB_PACKUNPACK.UNPACK, GroupbyNumba.nb_sum_punt_test, None, None, None, False),
}
# key is a function number
# key : (funcname, requirespacking, frontend, backend, grouper)
NUMBA_REVERSE_TABLE={}
NUMBA_REVERSE_FUNC={}
# start assigning funcnum values at 1000
for i,(k,v)in enumerate(NUMBA_GB_TABLE.items()):
NUMBA_REVERSE_TABLE[i + GB_FUNC_NUMBA]={'name': k, 'packing': v[0], 'func_front': v[1], 'func_back': v[2], 'func_gb':v[3], 'func_dtype': v[4], 'return_full': v[5]}
NUMBA_REVERSE_FUNC[v[1]] = i + GB_FUNC_NUMBA
# register our custom functions
GroupByOps.register_functions(NUMBA_REVERSE_TABLE)
TypeRegister.Grouping.register_functions(NUMBA_REVERSE_TABLE)
```
#### File: riptable/riptable/rt_groupbyops.py
```python
__all__ = ['GroupByOps']
#import abc
from typing import TYPE_CHECKING, Optional
import warnings
import numpy as np
import riptide_cpp as rc
from .rt_enum import GB_FUNCTIONS, GB_STRING_ALLOWED, GB_FUNC_COUNT, GB_PACKUNPACK, TypeRegister, ApplyType
from .rt_grouping import Grouping
from .rt_numpy import zeros_like, bool_to_fancy, empty_like, groupbyhash
if TYPE_CHECKING:
from .rt_dataset import Dataset
#=====================================================================================================
#=====================================================================================================
class GroupByOps(object):
"""
Holds all the functions for groupby
Only used when inherited
Child class must have self.grouping
Child class must override apply() func --> internal_apply
Child class must override count()
Child class must have its own _calculate_all
"""
DebugMode=False
AggNames = {'count',
'cumsum',
'first',
'last',
'max',
'mean',
'median',
'min',
'nanmax',
'nanmean',
'nanmedian',
'nanmin',
'nanstd',
'nansum',
'nanvar',
'nth',
'std',
'sum',
'var'}
# after pulling name from numpy method, route to different groupbyops method
NumpyAggNames = {
'amin' : 'min',
'amax' : 'max',
}
# TODO: Consider making GroupByOps an abc, and defining .grouping as a property;
# that'll be a bit cleaner and more robust than using a type annotation here
# to indicate the derived classes are expected to have a property/attribute
# with that name (per the docstring for GroupByOps).
# Maybe also include 'gb_keychain' and '_dataset', they're both used within this class.
grouping: Grouping
def __init__(self):
self._gbkeys = None
self._groups = None
#---------------------------------------------------------------
@classmethod
def register_functions(cls, functable):
'''
Registration should follow the NUMBA_REVERSE_TABLE layout at the bottom of rt_groupbynumba.py
If we register again, the last to register will be executed.
NUMBA_REVERSE_TABLE[i + GB_FUNC_NUMBA]={'name': k, 'packing': v[0], 'func_front': v[1], 'func_back': v[2], 'func_gb':v[3], 'func_dtype': v[4], 'return_full': v[5]}
'''
for v in functable.values():
# dict looks like --> name : {packing, func_front, func_back, ...}
# use the func_frontend
setattr(cls, v['name'], v['func_front'])
#---------------------------------------------------------------
def as_filter(self,index):
''' return an index filter for a given unique key'''
return self.grouping.as_filter(index)
#---------------------------------------------------------------
@property
def groups(self):
'''
Returns a dictionary of unique key values -> their fancy indices of occurrence in the original data.
'''
# make sure we get the unsorted list
#gbkeys = self.gb_keychain.gbkeys
gbkeys = self.grouping.gbkeys
col = list(gbkeys.values())
unique_count = self.grouping.unique_count
# make tuples from multikey values
if len(gbkeys) > 1:
col = [ tuple( c[i] for c in col ) for i in range(unique_count) ]
# use single values from array
else:
col = col[0]
fancy_idx = [self.as_filter(i+1) for i in range(unique_count)]
return dict(zip(col, fancy_idx))
#---------------------------------------------------------------
def _dict_val_at_index(self, index):
'''
Returns the value of the group label for a given index.
A single-key grouping will return a single value.
A multi-key grouping will return a tuple of values.
'''
keycols = list(self.grouping.gbkeys.values())
labels = []
for c in keycols:
labels.append(c[index])
if len(labels) == 1:
return labels[0]
else:
return tuple(labels)
#---------------------------------------------------------------
def key_from_bin(self, bin):
'''
Returns the value of the group label for a given index. (uses zero-based indexing)
A single-key grouping will return a single value.
A multi-key grouping will return a tuple of values.
'''
return self._dict_val_at_index(bin)
#---------------------------------------------------------------
def iter_groups(self):
'''
Very similar to the 'groups' property, but uses a generator instead of building the entire dictionary.
Returned pairs will be group label value (or tuple of multikey group label values) --> fancy index for that group (base-0).
'''
return self._iter_internal()
#---------------------------------------------------------------
def _iter_internal(self, dataset: Optional['Dataset'] = None):
'''
Generates pairs of labels and the stored dataset sliced by their fancy indices.
Right now, this is only called by categorical. Groupby has a faster way of return dataset slices.
'''
self.grouping.pack_by_group()
igroup = self.grouping.iGroup
ifirstgroup = self.grouping.iFirstGroup
ncountgroup = self.grouping.nCountGroup
for i in range(self.grouping.unique_count):
key = self.key_from_bin(i)
first=ifirstgroup[i+1]
last=first + ncountgroup[i+1]
fancy_idx = igroup[first:last]
if dataset is None:
yield key, fancy_idx
else:
yield key, dataset[fancy_idx,:]
#---------------------------------------------------------------
def _iter_internal_contiguous(self):
'''
Sorts the data by group to create contiguous memory.
Returns key + dataset view of key's rows for each group.
'''
self.grouping.pack_by_group()
sortidx = self.grouping.iGroup
ifirst = self.grouping.iFirstGroup[1:]
ncountgroup = self.grouping.nCountGroup[1:]
# perform a sort up front so the dataset can be sliced contiguously
cds = self._dataset[sortidx,:]
# the original columns, to make the views (no copies will be made)
full_columns = list(cds.values())
# the lists in this tuple will change
item_tup = cds._all_items.get_dict_values()
ncols = len(item_tup)
# iterate over every group
for i, glen in enumerate(ncountgroup):
start = ifirst[i]
end = start + glen
# change the array slice
for ci in range(ncols):
item_tup[ci][0] = full_columns[ci][start:end]
cds._nrows = glen
yield self.key_from_bin(i), cds
#---------------------------------------------------------------
def get_groupings(self, filter: Optional[np.ndarray] = None):
'''
Parameters
----------
filter : ndarray of bools, optional
pass in a boolean filter
Returns
-------
dict containing ndarrays calculated in ``pack_by_group()``.
iGroup - the fancy indices for all groups, sorted by group. see iFirstGroup and nCountGroup for how to walk this.
iFirstGroup - first index for each group in the igroup array. the first index is invalid
nCountGroup - count for each unique group. the first count in this array is the invalid count.
'''
self.grouping.pack_by_group(filter=filter, mustrepack=True)
return_dict = {'iGroup' : self.grouping.iGroup, 'iFirstGroup': self.grouping.iFirstGroup, 'nCountGroup':self.grouping.nCountGroup}
return return_dict
#---------------------------------------------------------------
@property
def first_fancy(self):
'''
Return a fancy index mask of the first occurrence
Notes
-----
NOTE: not optimized for groupby which has grouping.ikey always set
NOTE: categorical needs to lazy evaluate ikey
Examples
--------
>>> c = rt.Cat(['this','this','that','that','this'])
>>> c.first
FastArray([0, 2])
>>> c=Cat(['this','this','that','that','this'], ordered=False)
>>> c.first
FastArray([2, 0])
'''
# note, cache this value?
# fancy index
self.grouping.pack_by_group()
return self.grouping.iGroup[self.grouping.iFirstGroup[1:]]
#---------------------------------------------------------------
@property
def first_bool(self):
'''
Return a boolean mask of the first occurrence.
Examples
--------
>>> c = rt.Cat(['this','this','that','that','this'])
>>> c.first_bool
FastArray([ True, False, True, False, False])
'''
# boolean mask set to False
fancy = self.first_fancy
result=zeros_like(self.grouping.iGroup,dtype='?')
# set boolean mask to True for only the first occurrence
result[fancy]=True
return result
#---------------------------------------------------------------
def _possibly_transform(self, gb_ds, label_keys=None, **kwargs):
'''
Called after a reduce operation to possibly re-expand back.
Check transform flag.
'''
transform = kwargs.get("transform", False)
# check if transform was called earlier
if getattr(self, '_transform', False) or transform:
ikey=self.grouping.ikey
showfilter = kwargs.get("showfilter", False)
if not showfilter and self.grouping.base_index == 1:
ikey = ikey - 1
# use fancy indexing to pull the values from the cells, back to original array
newds= { }
isort = None
# a two key groupby (not gbu) often has display sort turned on
if hasattr(self,'_sort_display'):
if self._sort_display and self.grouping.Ordered is True:
# transform will put numbers back in original order
isort = rc.ReverseShuffle(self.isortrows)
ikey = isort[ikey]
# no need to re-expand the labels keys or return them
for colname,arr in gb_ds.items():
if colname not in label_keys:
newds[colname] = arr[ikey]
# turn transform back off in case used again
self._transform = False
return TypeRegister.Dataset(newds)
return gb_ds
#---------------------------------------------------------------
def apply_reduce(self, userfunc, *args, dataset=None, label_keys=None, nokeys=False, func_param=None, dtype=None, transform=False, **kwargs):
'''
GroupByOps:apply_reduce calls Grouping:apply_reduce
Parameters
----------
userfunc : callable
A callable that takes a contiguous array as its first argument, and returns a scalar
In addition the callable may take positional and keyword arguments.
args
Used to pass in columnar data from other datasets
Other Parameters
----------------
dataset: None
User may pass in an entire dataset to compute.
label_keys: None
Not supported, will use the existing groupby keys as labels.
func_param : tuple, optional
Set to a tuple to pass as arguments to the routine.
dtype : str or np.dtype, optional
Change to a numpy dtype to return an array with that dtype. Defaults to None.
transform : bool
Set to True to re-expand the results of the calculation. Defaults to False.
filter:
kwargs
Optional positional and keyword arguments to pass to ``userfunc``
Notes
-----
Grouping apply_reduce (for Categorical, groupby, accum2)
For every column of data to be computed:
The userfunc will be called back per group as a single array. The order of the groups is either:
1) Order of first apperance (when coming from a hash)
2) Lexigraphical order (when ``lex=True`` or a Categorical with ordered=True)
The function passed to apply must take an array as its first argument and return back a single scalar value.
Examples
--------
From a Dataset groupby:
>>> ds.gb(['Symbol'])['TradeSize'].apply_reduce(np.sum)
From an existing categorical:
>>> ds.Symbol.apply_reduce(np.sum, ds.TradeSize)
Create your own with forced dtype:
>>> def mycumprodsum(arr):
>>> return arr.cumprod().sum()
>>> ds.Symbol.apply_reduce(mycumprodsum, ds.TradeSize, dtype=np.float32)
'''
if not callable(userfunc):
raise TypeError(f'the first argument to apply_reduce must be callable not type {type(userfunc)!r}')
args, kwargs, origdict, tups = self._pop_gb_data('apply_reduce'+type(self).__name__, userfunc, *args, **kwargs, dataset=dataset)
# accum2 does not want any keys, it will set nokeys to True
if label_keys is None and not nokeys:
label_keys = self.gb_keychain
#NOTE: apply_helper will take a filter= and use it
result= self.grouping.apply_helper(True, origdict, userfunc, *args, tups=tups, label_keys=label_keys, func_param=func_param, dtype=dtype, **kwargs)
if transform:
kwargs['transform']=True
return self._possibly_transform(result, label_keys=label_keys.keys(), **kwargs)
else:
return result
#---------------------------------------------------------------
def apply_nonreduce(self, userfunc, *args, dataset=None, label_keys=None, func_param=None, dtype=None, **kwargs):
'''
GroupByOps:apply_nonreduce calls Grouping:apply_reduce
Parameters
----------
userfunc : callable
A callable that takes a contiguous array as its first argument, and returns a scalar.
In addition the callable may take positional and keyword arguments.
args
used to pass in columnar data from other datasets
dataset : None
User may pass in an entire dataset to compute.
label_keys : None.
Not supported, will use the existing groupby keys as labels.
dtype: str or np.dtype, optional
Change to a numpy dtype to return an array with that dtype. Defaults to None.
kwargs
Optional positional and keyword arguments to pass to `userfunc`
Notes
-----
Grouping apply_reduce (for Categorical, groupby, accum2)
For every column of data to be computed:
The userfunc will be called back per group as a single array. The order of the groups is either:
1) Order of first apperance (when coming from a hash)
2) Lexigraphical order (when lex=True or a Categorical with ordered=True)
The function passed to apply must take an array as its first argument and return back a single scalar value.
Examples
--------
From a Dataset groupby:
>>> ds.gb(['Symbol'])['TradeSize'].apply_reduce(np.sum)
From an existing categorical:
>>> ds.Symbol.apply_reduce(np.sum, ds.TradeSize)
Create your own with forced dtype:
>>> def mycumprodsum(arr):
>>> return arr.cumprod().sum()
>>> ds.Symbol.apply_reduce(mycumprodsum, ds.TradeSize, dtype=np.float32)
'''
if not callable(userfunc):
raise TypeError(f'the first argument to apply_nonreduce must be callable not type {type(userfunc)!r}')
args, kwargs, origdict, tups = self._pop_gb_data('apply_nonreduce'+type(self).__name__, userfunc, *args, **kwargs, dataset=dataset)
return self.grouping.apply_helper(False, origdict, userfunc, *args, tups=tups, label_keys=self.gb_keychain, func_param=func_param, dtype=dtype, **kwargs)
#---------------------------------------------------------------
def apply(self, userfunc, *args, dataset=None, label_keys=None, **kwargs):
'''
GroupByOps:apply calls Grouping:apply
Parameters
----------
userfunc : callable
userfunction to call
dataset: None
label_keys: None
'''
# pop inplace data args first, put in dataset kwarg (might use groupby's stored dataset)
if not callable(userfunc):
raise TypeError(f'the first argument to apply must be callable not type {type(userfunc)!r}')
args, kwargs, origdict,tups = self._pop_gb_data('apply'+type(self).__name__, userfunc, *args, **kwargs, dataset=dataset)
result= self.grouping.apply(origdict, userfunc, *args, tups=tups, label_keys=self.gb_keychain, **kwargs)
return self._possibly_transform(result, label_keys=self.gb_keychain.keys(), **kwargs)
#---------------------------------------------------------------
def _keys_as_list(self):
gbkeys = self.grouping.uniquedict
# return tuple of column values for multikey
if len(gbkeys) > 1:
return list(zip(*gbkeys.values()))
return list(gbkeys.values())[0]
#---------------------------------------------------------------
def _calculate_all(self, funcNum, *args, func_param=0, gbkeys=None, isortrows=None, **kwargs):
raise TypeError("_calculate_all should have been overriden!")
#---------------------------------------------------------------
@staticmethod
def contains_np_arrays(container):
'''
Check to see if all items in a list-like container are numpy arrays.
'''
has_np = False
if len(container) > 0:
container_instance = [isinstance(item, np.ndarray) for item in container]
if all(container_instance):
has_np = True
return has_np
# ------------------------------------------------------------
@classmethod
def get_header_names(cls, columns, default='col_'):
#---------------------------------------------------------------
def get_array_name(arr, default, i):
name = None
try:
name = arr.get_name()
except:
pass
if name is None:
return default+str(i)
return name
if isinstance(columns, dict):
final_headers = list(columns.keys())
else:
# user friendly names for fast arrays if present
headers = [ get_array_name(c, default, i) for i, c in enumerate(columns) ]
# make sure there are no conflicts in friendly names, fix them up (columns only, shouldn't be too slow)
# TODO: find a faster way of doing this
unique_dict = {}
final_headers = []
for name in headers:
new_name = name
if name in unique_dict:
counter = unique_dict[name]
new_name = name+str(counter)
# make sure name+number is not also in the dict
while(new_name in headers):
counter+=1
new_name = name+str(counter)
# adjust the counter for that name
unique_dict[name]=counter+1
else:
unique_dict[name]=1
final_headers.append(new_name)
return final_headers
# ------------------------------------------------------------
def _pop_gb_data(self, calledfrom, userfunc, *args, **kwargs):
"""
Pop the groupby data from the args and keyword args, possibly combining.
Avoid repeating this step when the data doesn't change.
Parameters
----------
calledfrom : {'apply_reduce', 'apply_nonreduce', 'apply', 'agg'}
userfunc : callable or int (function number)
Returns
-------
4 return values:
any user arguments
the kwargs (with 'dataset' removed)
the dictionary of numpy arrays to operarte on
tups: 0 or 1 or 2 depending on whether the first argument was a tuple of arrays
See Also
--------
GroupByOps.agg()
"""
kwargs['dataset'], user_args, tups = self._prepare_gb_data(calledfrom, userfunc, *args, **kwargs)
origdict = kwargs.pop('dataset')
return user_args, kwargs, origdict, tups
#---------------------------------------------------------------
def _prepare_gb_data(self, calledfrom, userfunc, *args, dataset=None, **kwargs):
'''
Parameters
----------
calledfrom: 'Accum2', 'Categorical','GroupBy','apply_reduce','apply_nonreduce','apply','agg'
userfunc: a callable function or a function number
args or dataset must be present (both also allowed)
if just args: make a dictionary from that
if just dataset: make dictionary
if both: make a new dataset, then make a dictionary from that
if neither: error
from Grouping, normally just a dataset
from Categorical, normally just args (but user can use kwarg 'dataset' to supply one)
This routine normalizes input from Grouping, Accum2, Categorical
GroupBy defaults to use the _dataset variable that it sets after being constructed from a dataset.
Accum2 and Categorical default to using input data for the calculation methods.
Accum2 and Categorical can also set _dataset just like Groupby. See Dataset.accum2 and Dataset.cat for
examples.
If a _dataset has been set, no input data is required for the calculation methods.
internal function to parse argument and search for numpy arrays
Returns
-------
a dictionary of arrays to be used as input to many groupby algorithms
user_args if any (the first argument might be removed)
tups: 0 or or 2. Will be set to T> 0 if the first argument is a tuple
Raises
------
ValueError
'''
# Autodetect what is in args
# if dataset exists (from a groupby) and the first param in args
# is a LIST of arrays, then those are additional arrays to add
# **a list of scalars, then that is an argument to the function
# **an array: argument to function if 'apply*'
# a tuple: additional array constants to add together
# not a list or tuple, then those are arguments to the function
#
# if dataset does not exist, the first param
# a list of arrays: additional arrays to add
# an array: a single array to add
# a list of scalars: a single array to add
# possibly second parameter:
# a tuple: additional array constants to add together
# in the new code, any args after the first argument MUST be user_args
# to pass in multiple arrays to operate a function on, pass them in as a list [arr1, arr2, arr3]
ds = None
zip_dict = False
tups = 0
user_args = args
first_arg = None
if len(args) >= 1:
user_args = args[1:]
# pop off first argument
first_arg=args[0]
if isinstance(first_arg, (list, tuple)):
if len(first_arg) ==0:
# backout
first_arg = None
user_args = args
if isinstance(first_arg, tuple):
# user_args has moved over
tups=1
if first_arg is not None:
if isinstance(first_arg, (list, tuple)):
first_element = first_arg[0]
if isinstance(first_element, np.ndarray):
#print('first_arg was list/tuple of ndarrays')
if len(first_arg)>=1 and tups > 0 and (self._dataset is not None) and calledfrom.endswith('GroupBy'):
#print("special mode", dataset, ds, self._dataset)
#first_arg = None
#user_args = args
tups=2
#zip_dict = True
else:
zip_dict = True
elif isinstance(first_element, list):
# list of lists? convert the lists to arrays
first_arg = [np.asarray(v) for v in first_arg]
zip_dict = True
elif isinstance(first_element, TypeRegister.Dataset):
#print('first_element was single dataset')
ds = {name:arr for name,arr in first_element.items()}
elif isinstance(first_element, dict):
# shallow copy, might be modified by key later
ds = first_element.copy()
elif np.isscalar(first_element):
if dataset is None or not calledfrom.startswith('apply'):
zip_dict = True
# assume a list or tuple of scalars
first_arg = [np.asarray(first_arg)]
elif isinstance(first_arg, np.ndarray):
#print('first_arg was single ndarray')
if dataset is None or not calledfrom.startswith('apply'):
zip_dict = True
first_arg = [first_arg]
else:
# assume userfunc argument
pass
elif isinstance(first_arg, TypeRegister.Dataset):
#print('first_arg was single dataset')
ds = {name:arr for name,arr in first_arg.items()}
elif isinstance(first_arg, dict):
# shallow copy, might be modified by key later
ds = first_arg.copy()
# check for a tuple passed as second argument
# check if we ate the first argument
if (ds is not None or zip_dict) and tups ==0:
# move over one argument, we ate it
args= args[1:]
user_args = args
if len(user_args) > 0:
# check for tuples
# pop off first argument
addl_arg=user_args[0]
if isinstance(addl_arg, tuple) and len(addl_arg) > 0:
tups=1
first_element = addl_arg[0]
if isinstance(first_element, np.ndarray):
# passing in array constants after a list or dict or dataset
tups=2
if ds is None and not zip_dict and first_arg is not None:
# recombine
user_args = args
if zip_dict:
headers = self.get_header_names(first_arg)
ds = dict(zip(headers, first_arg))
if dataset is not None:
final_dict = {name:col for name,col in dataset.items()}
# in special mode, remove the arrays in the user arguments names
if tups ==2:
# remove extra names
for ua in user_args[0]:
try:
name = ua.get_name()
if name in final_dict:
# is this the same array?
if id(final_dict[name]) == id(ua):
del final_dict[name]
except Exception:
pass
if ds is not None:
# combine dataset with extra data
for name, col in ds.items():
# if calling from a groupby, and the user created a tuple then we are in tups==2 mode
# in this mode, the arguments are constants that are passed in for each column
# so we want to remove the constant arrays
alreadyexists = name in final_dict
if alreadyexists:
warnings.warn(f'Found conflicting items for name {name}. Using item from arguments.')
final_dict[name] = col
else:
# extra data only, already a dict
if ds is None:
final_dict = self._dataset
else:
final_dict = ds
# no data found
if final_dict is None:
funcname = CPP_REVERSE_TABLE.get(userfunc, None)
if funcname is None:
try:
funcname = userfunc.__name__
except Exception:
pass
if funcname is None:
if np.isscalar(funcname):
funcname = str(userfunc)
else:
funcname = 'somefunc'
errorstring = f"Useable data for the function {calledfrom!r} has not been specified in {args!r}. Pass in array data to operate on.\n"
if calledfrom.startswith('apply'):
errorstring+=f"For example: call .{calledfrom}({funcname}, array_data)"
else:
errorstring+=f"For example: call {calledfrom}.{funcname}(array_data)"
raise ValueError(errorstring)
return final_dict, user_args, tups
#---------------------------------------------------------------
def aggregate(self, func):
return self.agg(func)
#---------------------------------------------------------------
def _get_agg_func(self, item):
"""
Translates user input into name and method for groupby aggregations.
Parameters
----------
item : str or function
String or supported numpy math function. See GroupByOps.AggNames.
Returns
-------
name : str
Lowercase name for aggregation function.
func : function
GroupByOps method.
"""
if callable(item):
# pull function name
item = item.__name__
# in case we need to route a numpy func to a different name
item = self.NumpyAggNames.get(item, item)
if item in self.AggNames:
return item, getattr(self.__class__, item)
raise ValueError(f"{item} is not a valid function to aggregate.")
#---------------------------------------------------------------
def agg(self, func=None, *args, dataset=None, **kwargs):
"""
Parameters
----------
func : callable, string, dictionary, or list of string/callables
Function to use for aggregating the data. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. For
a DataFrame, can pass a dict, if the keys are DataFrame column names.
Accepted Combinations are:
- string function name
- function
- list of functions
- dict of column names -> functions (or list of functions)
Returns
-------
aggregated : Multiset
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
Examples
--------
Aggregate these functions across all columns
>>> gb.agg(['sum', 'min'])
A B C
sum -0.182253 -0.614014 -2.909534
min -1.916563 -1.460076 -1.568297
Different aggregations per column
>>> gb.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 1.514318
min -1.916563 -1.460076
sum -0.182253 NaN
>>> gb.agg({'C': np.sum, 'D': lambda x: np.std(x,ddof=1)})
"""
if dataset is None:
try:
dataset=self._dataset
except Exception:
pass
# tups will be false since we pass in a list as first argument
args, kwargs, data, tups = self._pop_gb_data('agg', func, [*args], **kwargs, dataset=dataset)
# put back in dataset that got popped because kwargs is passed to aggfunc
kwargs['dataset'] = data
if func is None or len(func) == 0:
raise ValueError("The first argument to the agg function is a dictionary or list, such as gb.agg({'data':np.sum})")
# create blank multiset class
multiset = TypeRegister.Multiset({})
if isinstance(func,str):
func = [func]
if isinstance(func,list):
# run through list -- we do not check for duplicates
for item in func:
name, aggfunc = self._get_agg_func(item)
caps=name.capitalize()
multiset[caps] = aggfunc(self, *args, **kwargs)
elif isinstance(func, dict):
# two passes, build dictionary first
func_dict = {}
for col_name, operations in func.items():
if col_name in data.keys():
if not isinstance(operations, (list, tuple)):
operations = [operations]
if isinstance(operations,(list,tuple)):
for op in operations:
name, aggfunc = self._get_agg_func(op)
f_list = func_dict.setdefault(aggfunc, [])
f_list.append(col_name)
else:
raise ValueError(f"{col_name} is not a valid column name")
# second pass, loop through dictionary
for aggfunc, col_list in func_dict.items():
name = aggfunc.__name__.capitalize()
multiset[name] = aggfunc(self, *args, col_idx=col_list, **kwargs)
multiset._gbkeys = self.gb_keychain.gbkeys
return multiset
#---------------------------------------------------------------
def null(self, showfilter=False):
'''
Performs a reduced no-op. No operation is performed.
Parameters
----------
showfilter: bool, False
Returns
-------
Dataset with grouping keys. No operation is performed.
Examples
--------
>>> rt.Cat(np.random.choice(['SPY','IBM'], 100)).null(showfilter=True)
'''
return self.grouping._finalize_dataset(TypeRegister.Dataset({}),self.gb_keychain, None, addkeys=True, showfilter=showfilter)
#---------------------------------------------------------------
def count_uniques(self, *args, **kwargs):
"""
Compute unique count of group
Returns
-------
Dataset with grouped key plus the unique count for each column by group.
Examples
--------
>>> N = 17; np.random.seed(1)
>>> ds =Dataset(
dict(
Symbol = Cat(np.random.choice(['SPY','IBM'], N)),
Exchange = Cat(np.random.choice(['AMEX','NYSE'], N)),
TradeSize = np.random.choice([1,5,10], N),
TradePrice = np.random.choice([1.1,2.2,3.3], N),
))
>>> ds.cat(['Symbol','Exchange']).count_uniques()
*Symbol *Exchange TradeSize TradePrice
------- --------- --------- ----------
IBM NYSE 2 2
. AMEX 2 3
SPY AMEX 3 2
. NYSE 1 2
"""
origdict, user_args, tups = self._prepare_gb_data('count_uniques', None, *args, **kwargs)
label_keys = self.gb_keychain
g=self.grouping
# get way to make groups contiguous
igroup=g.igroup
cutoffs =g.ncountgroup.cumsum(dtype=np.int64)[1:]
newdict={}
for colname, arr in origdict.items():
gbk= label_keys.gbkeys
if colname not in gbk:
ifirstkey = groupbyhash(arr[igroup], cutoffs=cutoffs)['iFirstKey'][1]
# the cutoffs will generate iFirstKey cutoffs that help us determine the unique counts
result=ifirstkey.diff()
result[0]=ifirstkey[0]
newdict[colname]= result
return g._finalize_dataset(newdict,label_keys, None, addkeys=True, **kwargs)
#---------------------------------------------------------------
def count(self):
"""Compute count of group"""
raise ValueError("subclass must take over count")
# make a new dataset with the same number of rows
#origdict = self._dataset.as_ordered_dictionary()
#return self.grouping.count(gbkeys, isortrows)
#---------------------------------------------------------------
def sum(self, *args, **kwargs):
"""Compute sum of group"""
return self._calculate_all(GB_FUNCTIONS.GB_SUM, *args, **kwargs)
#---------------------------------------------------------------
def mean(self, *args, **kwargs):
"""
Compute mean of groups
"""
return self._calculate_all(GB_FUNCTIONS.GB_MEAN, *args, **kwargs)
#---------------------------------------------------------------
def mode(self, *args, **kwargs):
"""
Compute mode of groups (auto handles nan)
"""
return self._calculate_all(GB_FUNCTIONS.GB_MODE, *args, **kwargs)
#---------------------------------------------------------------
def trimbr(self, *args, **kwargs):
"""
Compute trimmed mean br of groups (auto handles nan)
"""
return self._calculate_all(GB_FUNCTIONS.GB_TRIMBR, *args, **kwargs)
#---------------------------------------------------------------
def nanmean(self, *args, **kwargs):
"""Compute mean of group, excluding missing values"""
return self._calculate_all(GB_FUNCTIONS.GB_NANMEAN, *args, **kwargs)
#---------------------------------------------------------------
def nanmedian(self, *args, **kwargs):
"""Compute median of group, excluding missing values"""
return self._calculate_all(GB_FUNCTIONS.GB_MEDIAN, *args, **kwargs)
#---------------------------------------------------------------
def nanmin(self, *args, **kwargs):
"""Compute min of group, excluding missing values"""
return self._calculate_all(GB_FUNCTIONS.GB_NANMIN, *args, **kwargs)
#---------------------------------------------------------------
def nanmax(self, *args, **kwargs):
"""Compute max of group, excluding missing values"""
return self._calculate_all(GB_FUNCTIONS.GB_NANMAX, *args, **kwargs)
#---------------------------------------------------------------
def nansum(self, *args, **kwargs):
"""Compute sum of group, excluding missing values"""
return self._calculate_all(GB_FUNCTIONS.GB_NANSUM, *args, **kwargs)
#---------------------------------------------------------------
def min(self, *args, **kwargs):
"""Compute min of group"""
return self._calculate_all(GB_FUNCTIONS.GB_MIN, *args, **kwargs)
#---------------------------------------------------------------
def max(self, *args, **kwargs):
"""Compute max of group"""
return self._calculate_all(GB_FUNCTIONS.GB_MAX, *args, **kwargs)
#---------------------------------------------------------------
def first(self, *args, **kwargs):
"""First value in the group"""
return self._calculate_all(GB_FUNCTIONS.GB_FIRST, *args, **kwargs)
#---------------------------------------------------------------
def last(self, *args, **kwargs):
"""Last value in the group"""
return self._calculate_all(GB_FUNCTIONS.GB_LAST, *args, **kwargs)
#---------------------------------------------------------------
def median(self, *args, **kwargs):
"""
Compute median of groups
For multiple groupings, the result will be a MultiSet
"""
return self._calculate_all(GB_FUNCTIONS.GB_MEDIAN, *args, **kwargs)
#---------------------------------------------------------------
def std(self, *args, **kwargs):
"""
Compute standard deviation of groups
For multiple groupings, the result will be a MultiSet
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self._calculate_all(GB_FUNCTIONS.GB_STD, *args, **kwargs)
#---------------------------------------------------------------
def nanstd(self, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
"""
return self._calculate_all(GB_FUNCTIONS.GB_NANSTD, *args, **kwargs)
#---------------------------------------------------------------
def var(self, *args, **kwargs):
"""
Compute variance of groups
For multiple groupings, the result will be a MultiSet
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self._calculate_all(GB_FUNCTIONS.GB_VAR, *args, **kwargs)
#---------------------------------------------------------------
def nanvar(self, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result will be a MultiSet
"""
return self._calculate_all(GB_FUNCTIONS.GB_NANVAR, *args, **kwargs)
#---------------------------------------------------------------
def rolling_sum(self, *args, window=3, **kwargs):
"""rolling sum for each group
Parameters
----------
window: optional, window size, defaults to 3
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_SUM, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_nansum(self, *args, window=3, **kwargs):
"""rolling nan sum for each group
Parameters
----------
window: optional, window size, defaults to 3
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_NANSUM, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_mean(self, *args, window=3, **kwargs):
"""rolling mean for each group
Parameters
----------
window: optional, window size, defaults to 3
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_MEAN, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_nanmean(self, *args, window=3, **kwargs):
"""rolling nan mean for each group
Parameters
----------
window: optional, window size, defaults to 3
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_NANMEAN, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_count(self, *args, window=3, **kwargs):
"""rolling count for each group
Parameters
----------
window: optional, window size, defaults to 3
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_COUNT, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_shift(self, *args, window=1, **kwargs):
"""rolling shift for each group
Parameters
----------
window: optional, window size, defaults to 1
windows can be negative
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_SHIFT, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_diff(self, *args, window=1, **kwargs):
"""rolling diff for each group
Parameters
----------
window: optional, window size, defaults to 1
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_DIFF, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def cumcount(self, *args, ascending=True, **kwargs):
"""rolling count for each group
Number each item in each group from 0 to the length of that group - 1.
Parameters
----------
ascending : bool, default True
Returns
-------
A single array, same size as the original grouping dict/categorical.
If a filter was applied, integer sentinels will appear in those slots.
"""
param=1
if not ascending:
param=-1
# cumcount doesn't need an origdict, pass it in empty
result= self.grouping._calculate_all({}, GB_FUNCTIONS.GB_ROLLING_COUNT, func_param=(param), keychain=self.gb_keychain, **kwargs)
return result
#---------------------------------------------------------------
def cumsum(self, *args, filter = None, reset_filter=None, **kwargs):
"""Cumulative sum for each group
Parameters
----------
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
"""
if filter is None:
filter = self._filter
return self._calculate_all(GB_FUNCTIONS.GB_CUMSUM, *args, func_param=(0.0, None, filter, reset_filter),**kwargs)
#---------------------------------------------------------------
def cumprod(self, *args, filter = None, reset_filter=None, **kwargs):
"""Cumulative product for each group
Parameters
----------
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
"""
if filter is None:
filter = self._filter
return self._calculate_all(GB_FUNCTIONS.GB_CUMPROD, *args, func_param=(0.0, None, filter, reset_filter),**kwargs)
#---------------------------------------------------------------
def findnth(self, *args, filter = None, **kwargs):
"""FindNth
Parameters
----------
filter: optional, boolean mask array of included
TAKES NO ARGUMENTS -- operates on bin
Returns
-------
Dataset same rows as original dataset
"""
if filter is None:
filter = self._filter
return self._calculate_all(GB_FUNCTIONS.GB_FINDNTH, *args, func_param=(0.0, None, filter, None),**kwargs)
#---------------------------------------------------------------
def _ema_op(self, function, *args, time=None, decay_rate = 1.0, filter = None, reset_filter=None, **kwargs):
"""
Ema base function for time based ema functions
Formula
-------
grp loops over each item in a groupby group
i loops over eachitem in the original dataset
Output[i] = <some formula>
Parameters
----------
time: float or int array used to calculate time difference
decay_rate: see formula, used a half life
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
"""
if time is None:
raise ValueError("The 'time' kwarg is required when calling ema functions")
if filter is None:
filter = self._filter
if filter is not None:
if len(time) != len(filter):
raise ValueError(f"The 'time' array length {len(time)} must match the length of the filter")
return self._calculate_all(function, *args, func_param=(decay_rate, time, filter, reset_filter), **kwargs)
#---------------------------------------------------------------
def ema_decay(self, *args, time=None, decay_rate = None, filter = None, reset_filter=None, **kwargs):
"""
Ema decay for each group
Formula
-------
grp loops over each item in a groupby group
i loops over eachitem in the original dataset
Output[i] = Column[i] + LastEma[grp] * exp(-decay_rate * (Time[i] - LastTime[grp]));
LastEma[grp] = Output[i]
LastTime[grp] = Time[i]
Parameters
----------
time: float or int array used to calculate time difference
decay_rate: see formula, used a half life
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
Example
-------
>>> aapl
# delta sym org time
- ------ ---- ------ -----
0 -3.11 AAPL -3.11 25.65
1 210.54 AAPL 210.54 38.37
2 49.97 AAPL 42.11 41.66
>>> np.log(2)/(1e3*100)
6.9314718055994526e-06
>>> aapl.groupby('sym')['delta'].ema_decay(time=aapl.time, decay_rate=np.log(2)/(1e3*100))[0]
FastArray([ -3.11271882, 207.42784495, 257.39155897])
"""
if decay_rate is None:
raise ValueError("ema_decay function requires a kwarg 'decay_rate' floating point value as input")
return self._ema_op(GB_FUNCTIONS.GB_EMADECAY, *args, time=time, decay_rate=decay_rate, filter=filter, reset_filter=reset_filter, **kwargs)
#---------------------------------------------------------------
def ema_normal(self, *args, time=None, decay_rate = None, filter = None, reset_filter=None, **kwargs):
"""
Ema decay for each group
Formula
-------
grp loops over each item in a groupby group
i loops over eachitem in the original dataset
decayedWeight = exp(-decayRate * (Time[i] - LastTime[grp]));
LastEma[grp] = Column[i] * (1 - decayedWeight) + LastEma[grp] * decayedWeight
Output[i] = LastEma[grp]
LastTime[grp] = Time[i]
Parameters
----------
time: float or int array used to calculate time difference
decay_rate: see formula, used a half life (defaults to 1.0)
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
Example
-------
>>> ds = rt.Dataset({'test': rt.arange(10), 'group2': rt.arange(10) % 3})
>>> ds.normal = ds.gb('group2')['test'].ema_normal(decay_rate=1.0, time = rt.arange(10))['test']
>>> ds.weighted = ds.gb('group2')['test'].ema_weighted(decay_rate=0.5)['test']
>>> ds
# test group2 normal weighted
- ---- ------ ------ --------
0 0 0 0.00 0.00
1 1 1 1.00 1.00
2 2 2 2.00 2.00
3 3 0 2.85 1.50
4 4 1 3.85 2.50
5 5 2 4.85 3.50
6 6 0 5.84 3.75
7 7 1 6.84 4.75
8 8 2 7.84 5.75
9 9 0 8.84 6.38
See Also
--------
ema_weighted
ema_decay
"""
if decay_rate is None:
raise ValueError('ema_normal function requires a decay_rate floating point value')
if time is None:
raise ValueError('ema_normal function requires a time array. Use the "time" kwarg')
if not isinstance(time, np.ndarray):
raise ValueError('ema_normal function requires a time numpy array.')
# cannot support int16/uint16
if time.dtype.num < 5:
time = time.astype(np.int32)
return self._ema_op(GB_FUNCTIONS.GB_EMANORMAL, *args, time=time, decay_rate=decay_rate, filter=filter, reset_filter=reset_filter, **kwargs)
#---------------------------------------------------------------
def ema_weighted(self, *args, decay_rate = None, filter = None, reset_filter=None, **kwargs):
"""
Ema decay for each group with constant decay value (no time parameter)
Formula
-------
grp loops over each item in a groupby group
i loops over eachitem in the original dataset
LastEma[grp] = Column[i] * (1 - decay_rate) + LastEma[grp] * decay_rate
Output[i] = LastEma[grp]
Parameters
----------
time: <not used>
decay_rate: see formula, used a half life
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
Example
-------
>>> ds = rt.Dataset({'test': rt.arange(10), 'group2': rt.arange(10) % 3})
>>> ds.normal = ds.gb('group2')['test'].ema_normal(decay_rate=1.0, time=rt.arange(10))['test']
>>> ds.weighted = ds.gb('group2')['test'].ema_weighted(decay_rate=0.5)['test']
>>> ds
# test group2 normal weighted
- ---- ------ ------ --------
0 0 0 0.00 0.00
1 1 1 1.00 1.00
2 2 2 2.00 2.00
3 3 0 2.85 1.50
4 4 1 3.85 2.50
5 5 2 4.85 3.50
6 6 0 5.84 3.75
7 7 1 6.84 4.75
8 8 2 7.84 5.75
9 9 0 8.84 6.38
See Also
--------
ema_normal
ema_decay
"""
if decay_rate is None:
raise ValueError('ema_weighted function requires a decay_rate floating point value')
# put in fake time array
time_array = np.arange(self._dataset.shape[0])
return self._ema_op(GB_FUNCTIONS.GB_EMAWEIGHTED, *args, time=time_array, decay_rate=decay_rate, filter=filter, reset_filter=reset_filter, **kwargs)
#-------------------------------------------------------
def sem(self, **kwargs):
"""
Compute standard error of the mean of groups
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
raise NotImplementedError
#return self.std(ddof=ddof) / np.sqrt(self.count())
#-------------------------------------------------------
def ohlc(self, **kwargs):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
raise NotImplementedError
#return self._apply_to_column_groupbys(
# lambda x: x._cython_agg_general('ohlc'))
#-------------------------------------------------------
def describe(self, **kwargs):
raise NotImplementedError
#self._set_group_selection()
#result = self.apply(lambda x: x.describe(**kwargs))
#if self.axis == 1:
# return result.T
#return result.unstack()
#-------------------------------------------------------
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper
Return a new grouper with our resampler appended
"""
raise NotImplementedError
#from pandas.core.resample import get_resampler_for_grouping
#return get_resampler_for_grouping(self, rule, *args, **kwargs)
#-------------------------------------------------------
def nth(self, *args, n=1, **kwargs):
"""
Take the nth row from each group if `n` is an int, or a subset of rows if `n` is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame);
this is equivalent to calling dropna(how=dropna) before the
groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying ``dropna`` allows count ignoring NaN
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying ``as_index=False`` in ``groupby`` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
return self._calculate_all(GB_FUNCTIONS.GB_NTH, *args, func_param=(n), **kwargs)
#raise NotImplementedError
##-------------------------------------------------------
def diff(self, period=1, **kwargs):
"""rolling diff for each group
Parameters
----------
period: optional, period size, defaults to 1
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_DIFF, tuple(), func_param=(period), **kwargs)
#-------------------------------------------------------
def ngroup(self, ascending=True, **kwargs):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
See also
--------
cumcount : Number the rows in each group.
"""
raise NotImplementedError
#self._set_group_selection()
#index = self._selected_obj.index
#result = Series(self.grouper.group_info[0], index)
#if not ascending:
# result = self.ngroups - 1 - result
#return result
#-------------------------------------------------------
def rank(self, method='average', ascending=True, na_option='keep',
pct=False, axis=0, **kwargs):
"""
Provides the rank of values within each group
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
method : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
ascending : boolean, default True
False for ranks by high (1) to low (N)
pct : boolean, default False
Compute percentage rank of data within each group
Returns
-----
DataFrame with ranking of values within each group
"""
raise NotImplementedError
#-------------------------------------------------------
def cummin(self, axis=0, **kwargs):
"""Cumulative min for each group"""
raise NotImplementedError
#-------------------------------------------------------
def cummax(self, axis=0, **kwargs):
"""Cumulative max for each group"""
raise NotImplementedError
#-------------------------------------------------------
def shift(self, window=1, **kwargs):
"""
Shift each group by periods observations
Parameters
----------
window : integer, default 1 number of periods to shift
periods: optional support, same as window
"""
# support for pandas periods keyword
window = kwargs.get('periods',window)
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_SHIFT, tuple(), func_param=(window), **kwargs)
#-------------------------------------------------------
def head(self, n=5, **kwargs):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores `as_index` flag.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
raise NotImplementedError
#self._reset_group_selection()
#mask = self._cumcount_array() < n
#return self._selected_obj[mask]
#-------------------------------------------------------
def tail(self, n=5, **kwargs):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores `as_index` flag.
Examples
--------
>>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]], columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').head(1)
A B
0 a 1
2 b 1
"""
raise NotImplementedError
#self._reset_group_selection()
#mask = self._cumcount_array(ascending=False) < n
#return self._selected_obj[mask]
#------------------------------------------------------------
# cppnum name: (basic/packing, func_frontend, func_backend, gb_function, dtype, return_full True/False)
# ----- ------ ------------------- ------------------ ------------------ ----------- ---------------- ------------------
GBF = GB_FUNCTIONS
# GB_FUNC_COUNT is special right now
CPP_GB_TABLE= [
(GBF.GB_SUM, 'sum', GB_PACKUNPACK.UNPACK, GroupByOps.sum, None, None, None, False),
(GBF.GB_MEAN, 'mean', GB_PACKUNPACK.UNPACK, GroupByOps.mean, None, None, None, False),
(GBF.GB_MIN, 'min', GB_PACKUNPACK.UNPACK, GroupByOps.min, None, None, None, False),
(GBF.GB_MAX, 'max', GB_PACKUNPACK.UNPACK, GroupByOps.max, None, None, None, False),
# STD uses VAR with the param set to 1
(GBF.GB_VAR, 'var', GB_PACKUNPACK.UNPACK, GroupByOps.var, None, None, None, False),
(GBF.GB_STD, 'std', GB_PACKUNPACK.UNPACK, GroupByOps.std, None, None, None, False),
(GBF.GB_NANSUM, 'nansum', GB_PACKUNPACK.UNPACK, GroupByOps.nansum, None, None, None, False),
(GBF.GB_NANMEAN, 'nanmean', GB_PACKUNPACK.UNPACK, GroupByOps.nanmean, None, None, None, False),
(GBF.GB_NANMIN, 'nanmin', GB_PACKUNPACK.UNPACK, GroupByOps.nanmin, None, None, None, False),
(GBF.GB_NANMAX, 'nanmax', GB_PACKUNPACK.UNPACK, GroupByOps.nanmax, None, None, None, False),
(GBF.GB_NANVAR, 'nanvar', GB_PACKUNPACK.UNPACK, GroupByOps.nanvar, None, None, None, False),
(GBF.GB_NANSTD, 'nanstd', GB_PACKUNPACK.UNPACK, GroupByOps.nanstd, None, None, None, False),
(GBF.GB_FIRST, 'first', GB_PACKUNPACK.PACK, GroupByOps.first, None, None, None, False),
(GBF.GB_NTH, 'nth', GB_PACKUNPACK.PACK, GroupByOps.nth, None, None, None, False),
(GBF.GB_LAST, 'last', GB_PACKUNPACK.PACK, GroupByOps.last, None, None, None, False),
# requires parallel qsort
(GBF.GB_MEDIAN, 'median', GB_PACKUNPACK.PACK, GroupByOps.median, None, None, None, False), # auto handles nan
(GBF.GB_MODE, 'mode', GB_PACKUNPACK.PACK, GroupByOps.mode, None, None, None, False), # auto handles nan
(GBF.GB_TRIMBR, 'trimbr', GB_PACKUNPACK.PACK, GroupByOps.trimbr, None, None, None, False), # auto handles nan
# All int/uints output upgraded to INT64
# Output is all elements (not just grouped)
# takes window= as parameter
(GBF.GB_ROLLING_SUM, 'rolling_sum', GB_PACKUNPACK.PACK, GroupByOps.rolling_sum, None, None, None, True),
(GBF.GB_ROLLING_NANSUM, 'rolling_nansum', GB_PACKUNPACK.PACK, GroupByOps.rolling_nansum, None, None, None, True),
(GBF.GB_ROLLING_DIFF, 'rolling_diff', GB_PACKUNPACK.PACK, GroupByOps.rolling_diff, None, None, None, True),
(GBF.GB_ROLLING_SHIFT, 'rolling_shift', GB_PACKUNPACK.PACK, GroupByOps.rolling_shift, None, None, None, True),
(GBF.GB_ROLLING_COUNT, 'rolling_count', GB_PACKUNPACK.PACK, GroupByOps.rolling_count, None, None, None, True),
(GBF.GB_ROLLING_MEAN, 'rolling_mean', GB_PACKUNPACK.PACK, GroupByOps.rolling_mean, None, None, None, True),
(GBF.GB_ROLLING_NANMEAN,'rolling_nanmean', GB_PACKUNPACK.PACK, GroupByOps.rolling_nanmean, None, None, None, True),
# In ema.cpp
(GBF.GB_CUMSUM, 'cumsum', GB_PACKUNPACK.PACK, GroupByOps.cumsum, None, None, None, True),
(GBF.GB_CUMPROD, 'cumprod', GB_PACKUNPACK.PACK, GroupByOps.cumprod, None, None, None, True),
# returns x elements ahead
(GBF.GB_FINDNTH, 'findnth', GB_PACKUNPACK.PACK, GroupByOps.findnth, None, None, None, True),
# takes
(GBF.GB_EMADECAY, 'ema_decay', GB_PACKUNPACK.PACK, GroupByOps.ema_decay, None, None, None, True),
(GBF.GB_EMANORMAL, 'ema_normal', GB_PACKUNPACK.PACK, GroupByOps.ema_normal, None, None, None, True),
(GBF.GB_EMAWEIGHTED, 'ema_weighted', GB_PACKUNPACK.PACK, GroupByOps.ema_weighted, None, None, None, True),
]
# NOT DONE YET
# cummin
# cummax
# sem
# ohlc
# resample
# describe
# head
# tail
# rank
# ngroup
CPP_REVERSE_TABLE={}
# Build CPP funcnum table
for v in CPP_GB_TABLE:
funcnum = int(v[0])
CPP_REVERSE_TABLE[funcnum]={
'name': v[1],
'packing': v[2],
'func_front': v[3],
'func_back': v[4],
'func_gb':v[5],
'func_dtype': v[6],
'return_full': v[7]
}
```
#### File: riptable/riptable/rt_itemcontainer.py
```python
__all__ = [ 'ItemContainer', ]
import numpy as np
import warnings
import re
from riptable.rt_enum import ColumnAttribute
ATTRIBUTE_LABEL = "Label"
ATTRIBUTE_SUMMARY = "Right"
ATTRIBUTE_FOOTER = "Footer"
ATTRIBUTE_MARGIN_COLUMN = "MarginColumn"
ATTRIBUTE_NUMBER_OF_FOOTER_ROWS = "NumberOfFooterRows"
class ItemAttribute():
'''
An attribute about an item which, in turn, contains attributes in the
form of Python attributes, set and retrieved using setattr() and getattr()
'''
ATTRIB_EXCLUSION_LIST = 'copy'
def __repr__(self, indent=2):
result = self.__class__.__name__ + '\n'
for k,v in self._attribs():
result += ' '*indent + k + ': ' + str(v)
result += '\n'
return result
def _attribs(self):
'''
Returns all attributes dynamically set for this ItemAttribute..
NOTE: Add to the ATTRIB_EXCLUSION_LIST all method or property names statically
added to ItemAttribute that don't begin with '_'.
:return:
'''
return [(k, getattr(self, k)) for k in dir(self) if
(not k.startswith('_') and k not in ItemAttribute.ATTRIB_EXCLUSION_LIST)]
def copy(self):
'''
Performs a deep copy of the ItemAttribute, including all values
of any dynamically added attributes.
:return:
'''
attrib = ItemAttribute()
for k, v in self._attribs():
setattr(attrib, k, v.copy() if hasattr(v, 'copy') else v)
return attrib
class ItemContainer():
'Container for items in Struct -- all values are tuples with an attribute'
def __init__(self, *args, **kwds):
'''Initialize an IC
'''
self._items={}
self._items.update(*args, **kwds)
def __getitem__(self, key):
return self._items[key]
def __setitem__(self, key, value):
'ic.__setitem__(i, y) <==> ic[i]=y'
self._items[key] = value
def __delitem__(self, key):
'ic.__delitem__(y) <==> del ic[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
del self._items[key]
#def __iter__(self):
# 'od.__iter__() <==> iter(od)'
# # Traverse the linked list in order.
# root = self.__root
# curr = root[NEXT]
# while curr is not root:
# yield curr[KEY]
# curr = curr[NEXT]
#def __reversed__(self):
# 'od.__reversed__() <==> reversed(od)'
# # Traverse the linked list in reverse order.
# root = self.__root
# curr = root[PREV]
# while curr is not root:
# yield curr[KEY]
# curr = curr[PREV]
#def __reduce__(self):
# 'Return state information for pickling'
# items = [[k, self[k]] for k in self]
# tmp = self.__map, self.__root
# del self.__map, self.__root
# inst_dict = vars(self).copy()
# self.__map, self.__root = tmp
# if inst_dict:
# return (self.__class__, (items,), inst_dict)
# return self.__class__, (items,)
def clear(self):
self._items.clear()
def __contains__(self,*args):
return self._items.__contains__(*args)
def __next__(self):
return self._items.__next__()
def __len__(self):
return self._items.__len__()
def __iter__(self):
#return self._items.__iter__()
return iter(self._items)
def items(self):
return self._items.items()
def values(self):
return self._items.values()
def keys(self):
# how to best do this?
return list(self._items.keys())
def setdefault(self, *args):
return self._items.setdefault(*args)
def update(self, *args):
return self._items.update(*args)
def pop(self, *args):
return self._items.pop(*args)
#setdefault = MutableMapping.setdefault
#update = MutableMapping.update
#pop = MutableMapping.pop
#keys = MutableMapping.keys
#values = MutableMapping.values
#items = MutableMapping.items
#__ne__ = MutableMapping.__ne__
#def popitem(self, last=True):
# '''od.popitem() -> (k, v), return and remove a (key, value) pair.
# Pairs are returned in LIFO order if last is true or FIFO order if false.
# '''
# if not self:
# raise KeyError('dictionary is empty')
# key = next(reversed(self) if last else iter(self))
# value = self.pop(key)
# return key, value
#-----------------------------------------
def __repr__(self):
'ic.__repr__() <==> repr(ic)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self._items.items())
#-----------------------------------------
def copy_inplace(self, rowmask):
'''
inplace rowmask applied
'''
for v in self._items.values():
# first item in tuple is the array
arr = v[0]
# preserve name when copying inplace
name = arr.get_name()
arr=arr[rowmask]
arr.set_name(name)
v[0] = arr
#-----------------------------------------
def copy(self, cols=None, deep=False):
'''
Returns a shallow copy of the item container.
cols list can be provided for specific selection.
'''
newcontainer = ItemContainer()
if cols is None:
newcontainer._items = self._items.copy()
for k,v in newcontainer._items.items():
newcontainer._items[k] = v.copy()
else:
for k in cols:
newcontainer._items[k] = self._items[k].copy()
return newcontainer
#-----------------------------------------
def copy_apply(self, func, *args, cols=None):
'''
Returns a copy of the itemcontainer, applying a function to items before swapping them out in the new ItemContainer object.
Used in Dataset row masking.
'''
newcontainer = ItemContainer()
if cols is None:
for k, v in self._items.items():
# tuple copy
v = v.copy()
newcontainer._items[k] = v
v[0] = func(v[0], *args)
else:
for k in cols:
# tuple copy
v= self._items[k].copy()
newcontainer._items[k] = v
v[0] = func(v[0], *args)
return newcontainer
#-----------------------------------------
def apply(self, func, *args, cols=None):
'''
Performs a possibly inplace operation on items in the itemcontainer
'''
if cols is None:
for v in self._items.values():
func(v[0], *args)
else:
for k in cols:
v = self._items[k]
func(v[0], *args)
#-----------------------------------------
def __eq__(self, other):
if isinstance(other, ItemContainer):
return self._items == other._items
return self._items == other
def __ne__(self, other):
if isinstance(other, ItemContainer):
return self._items != other._items
return self._items != other
#def __del__(self):
# self._items.clear() # eliminate cyclical references
#-----------------------------------------
def items_as_dict(self):
'''
Return dictionary of items without attributes.
'''
return {k:v[0] for k,v in self._items.items()}
#-----------------------------------------
def items_tolist(self):
return [v[0] for v in self._items.values()]
#-----------------------------------------
def item_delete(self, key):
del self._items[key]
# -------------------------------------------------------
def item_get_dict(self):
'''
return the underlying dict
values are stored in the first tuple, attributes in the second tuple
'''
return self._items
# -------------------------------------------------------
def iter_values(self):
'''
This will yield the full values in _items dict (lists with item, attribute)
'''
for v in self._items.values():
yield v
# -------------------------------------------------------
def item_get_value(self, key):
'''
return the value for the given key
NOTE: a good spot to put a counter for debugging
'''
return self._items[key][0]
# -------------------------------------------------------
def item_get_values(self, keylist):
'''
return list of value for the given key
used for fast dataset slicing/copy with column selection
'''
return [ self.item_get_value(i) for i in keylist ]
# -------------------------------------------------------
def item_set_value(self, key, value, attr=None):
# check if already exists...
temp = [value, attr]
v = self._items.setdefault(key, temp)
if v is not temp:
v[0] = value
def item_set_value_internal(self, key, value):
# no checks, go to dict
self._items[key] = value
# -------------------------------------------------------
def item_get_attribute(self, key, attrib_name, default=None):
'''
Params
------
Arg1: key: name of the item
Arg2: attrib_name: name of the attribute
Retrieves the value of the attribute previously assigned with item_set_attribute
'''
item = self._items.get(key, None)
if item is None:
return None
attrib = item[1]
if attrib is None:
return None
return getattr(attrib, attrib_name, default)
# -------------------------------------------------------
def _set_attribute(self, item, name, value):
attrib = item[1]
if attrib is None:
attrib = ItemAttribute()
setattr(attrib, name, value)
item[1]=attrib
# -------------------------------------------------------
def item_set_attribute(self, key, attrib_name, attrib_value):
'''
Params
------
Arg1: key: name of the item
Arg2: attrib_name: name of the attribute
Arg3: attrib_value: value of the attribute
Attaches an attribute (name,value) pair to the item
Any valid dictionary name and any object can be assigned.
Note: see item_get_attribute to retrieve
'''
# check if already exists...
if self.item_exists(key):
self._set_attribute(self._items[key], attrib_name, attrib_value)
else:
raise KeyError(f"{key!r} does not already exist, thus cannot add attribute")
# -------------------------------------------------------
def item_get_len(self):
return len(self._items)
# -------------------------------------------------------
def item_exists(self, item):
return item in self._items
# -------------------------------------------------------
def get_dict_values(self):
'''
Returns a tuple of items in the item dict. Each item is a list.
'''
return tuple(self._items.values())
# -------------------------------------------------------
def item_replace_all(self, newdict, check_exists=True):
'''
Replace the data for each item in the item dict. Original attributes
will be retained.
Parameters
----------
newdict : dictionary of item names -> new item data (can also be a dataset)
check_exists : if True, all newdict keys and old item keys will be compared to ensure a match
'''
# for intenal routines, an existance check can often be skipped
if check_exists:
for k in newdict:
if self.item_exists(k) is False:
raise ValueError(f"Item {k} not found in original item dictionary.")
for k in self._items:
if k not in newdict:
raise ValueError(f"Item {k} in original item dictionary not found in new items.")
# replace the data, keep any attributes if set
for k, v in newdict.items():
self._items[k][0] = v
# -------------------------------------------------------
def item_rename(self, old, new):
"""
Rename a single column.
:param old: Current column name.
:param new: New column name.
:return: value portion of item that was renamed
"""
if old == new:
return None
if old not in self._items:
raise ValueError(f'Invalid column to rename: {old!r} cannot rename column that does not exit in itemcontainer.')
if new in self._items:
raise ValueError(f'Invalid column name: {new!r}; already exists in itemcontainer, cannot rename to it.')
newdict = self._items.copy()
return_val = None
self._items.clear()
for k,v in newdict.items():
if k == old:
k = new
# return the value portion
return_val = v[0]
self._items[k]=v
return return_val
# -------------------------------------------------------
def _get_move_cols(self, cols):
'''
Possibly convert list/array/dictionary/string/index of items to move
for item_move_to_front(), item_move_to_back()
'''
if isinstance(cols, (str, bytes)):
cols = [cols]
elif isinstance(cols, (int, np.integer)):
try:
cols = [list(self._items.keys())[cols]]
except:
raise ValueError(f"Items could not be indexed by {cols}")
if not isinstance(cols, (np.ndarray, list, tuple, dict)):
raise TypeError(f"Item(s) to move must be list, tuple, ndarray, dictionary (keys), single unicode or byte string, or single index. Got {type(cols)}")
else:
if len(cols) > len(self._items):
raise ValueError(f"Found {len(cols)} items to move to front - more than {len(self._items)} in container.")
return cols
# -------------------------------------------------------
def item_move_to_front(self, cols):
"""
Move single column or group of columns to front of list for iteration/indexing/display.
Values of columns will remain unchanged.
:param cols: list of column names to move.
:return: None
"""
cols = self._get_move_cols(cols)
new_all_items = {}
for cn in cols:
if cn in self._items:
new_all_items[cn] = self._items[cn]
else:
warnings.warn(f"Column {cn} not found. Could not move to front.")
for cn in self._items:
if cn not in new_all_items:
new_all_items[cn] = self[cn]
self._items = new_all_items
# -------------------------------------------------------
def item_move_to_back(self, cols):
"""
Move single column or group of columns to front of list for iteration/indexing/display.
Values of columns will remain unchanged.
:param cols: list of column names to move.
:return: None
"""
cols = self._get_move_cols(cols)
all_names = self._items
for cn in cols:
if cn in all_names:
all_names[cn] = all_names.pop(cn)
else:
warnings.warn(f"Column {cn} not found. Could not move to back.")
# -------------------------------------------------------
def item_add_prefix(self, prefix):
'''
inplace operation.
adds prefix in front of existing item name
faster than calling rename
'''
newdict = self._items.copy()
self._items.clear()
for k,v in newdict.items():
self._items[f'{prefix}{k}']=v
# --------------------------------------------------------
def item_str_match(self, expression, flags=0):
"""
Create a boolean mask vector for items whose names match the regex.
NB Uses re.match(), not re.search().
:param expression: regular expression
:param flags: regex flags (from re module).
:return: list array of bools (len ncols) which is true for columns which match the regex.
"""
match_fun = re.compile(expression, flags=flags).match
return [bool(match_fun(x)) for x in self._items.keys()]
def item_str_replace(self, old, new, maxr=-1):
'''
:param old: string to look for within individual names of columns
:param new: string to replace old string in column names
If an item name contains the old string, the old string will be replaced with the new one.
If replacing the string conflicts with an existing item name, an error will be raised.
returns True if column names were replaced
'''
new_names = []
replace_count = 0
for item in self._items:
r = item.replace(old, new, maxr)
if r != item:
replace_count += 1
# prevent name conflict from overwriting existing column
if r in self._items:
raise ValueError(f"Item {r} already existed, cannot make replacement in item name {item}.")
new_names.append(r)
# only do this if necessary
if replace_count != 0:
newdict = self._items.copy()
self._items.clear()
for i, v in enumerate(newdict.values()):
self._items[new_names[i]] = v
return True
return False
# -------------------------------------------------------
def footer_get_value(self, key):
return self.item_get_attribute(key, ATTRIBUTE_FOOTER)
def footer_set_value(self, key, value):
self.item_set_attribute(key, ATTRIBUTE_FOOTER, value)
# --LABEL (LEFT FOR DISPLAY)-----------------------------
# -------------------------------------------------------
def label_as_dict(self):
return self._tagged_as_dict(ATTRIBUTE_LABEL)
def label_get_names(self):
return self._tagged_get_names(ATTRIBUTE_LABEL)
def label_set_names(self, listnames):
self._tagged_set_names(listnames, ATTRIBUTE_LABEL)
def label_remove(self):
self._tagged_remove(ATTRIBUTE_LABEL)
# --RIGHT FOR DISPLAY------------------------------------
# -------------------------------------------------------
def summary_as_dict(self):
return self._tagged_as_dict(ATTRIBUTE_SUMMARY)
def summary_get_names(self):
return self._tagged_get_names(ATTRIBUTE_SUMMARY)
def summary_set_names(self, listnames):
self._tagged_set_names(listnames, ATTRIBUTE_SUMMARY)
def summary_remove(self):
self._tagged_remove(ATTRIBUTE_SUMMARY)
# --GENERAL ATTRIBUTE FUNCTIONS--------------------------
# -------------------------------------------------------
def _tagged_get_names(self, attrname):
'''
Returns a list of item names tagged with attrname in order.
'''
tagged_names=[]
max, tagged_dict = self._tagged_get_dict_max(attrname)
if max >= 0:
for i in range(max+1):
if i in tagged_dict:
tagged_names.append(tagged_dict[i])
return tagged_names
def _tagged_set_names(self, listnames, attrname):
'''
Removes existing items tagged with attrname.
If items in listnames exist, they will be tagged with attrname.
'''
if not isinstance(listnames, list):
listnames = [listnames]
self._tagged_remove(attrname)
for i, tagged in enumerate(listnames):
if self.item_exists(tagged):
self._set_attribute(self._items[tagged], attrname, i)
def _tagged_remove(self, attrname):
'''
Removes existing items tagged with attrname.
'''
for v in self._items.values():
# get the attribute tuple
attr = v[1]
if attr is not None and hasattr(attr, attrname):
delattr(attr, attrname)
def _tagged_as_dict(self, attrname):
'''
Returns dictionary of columns tagged with attrname.
'''
return_dict = {}
max, tagged_dict = self._tagged_get_dict_max(attrname)
if max >= 0:
for i in range(max+1):
if i in tagged_dict:
name = tagged_dict[i]
return_dict[name] = self.item_get_value(name)
if len(return_dict) > 0:
return return_dict
return None
def _tagged_get_dict_max(self, attrname):
'''
Returns unordered dictionary of columns tagged with attrname, max value for order.
'''
tagged_dict={}
max = -1
for k, v in self._items.items():
# get the attribute tuple
attr = v[1]
if attr is not None and hasattr(attr, attrname):
val = getattr(attr, attrname)
if val > max: max = val
tagged_dict[val] = k
return max, tagged_dict
```
#### File: riptable/riptable/rt_pgroupby.py
```python
from .rt_groupbyops import GroupByOps
from .rt_groupby import GroupBy
#=====================================================================================================
#=====================================================================================================
class PGroupBy(GroupBy):
"""
Parameters
----------
dataset: Dataset
The dataset object
keys: list
List of column names to groupby
filter: array of bools
Boolean mask array applied as filter before grouping
return_all: bool
Default to False. When set to True will return all the dataset columns for every operation.
hintSize: int
Hint size for the hash
sort: bool
Default to True. Indicates
Notes
-----
None at this time.
Properties
----------
gbkeys: dictionary of numpy arrays binned from
isortrows: sorted index or None
"""
DebugMode=False
ShowEmpty =True
TestCatGb = True
def __init__(self, *args, **kwargs):
super()._init(self, *args, **kwargs)
#---------------------------------------------------------------
def copy(self, deep = True):
pass
```
#### File: riptable/riptable/rt_timers.py
```python
__all__ = [
'GetNanoTime', 'GetTSC',
'tic', 'toc',
'ticx', 'tocx',
'tt', 'ttx',
'ticp', 'tocp',
'ticf', 'tocf',
'utcnow'
]
'''
Timing and profiling functionality
'''
import logging
from typing import TYPE_CHECKING, Optional
import numpy as np
import riptide_cpp as rc
from riptable.rt_enum import TypeRegister
if TYPE_CHECKING:
from .rt_datetime import DateTimeNano
def GetNanoTime() -> int:
'''
Returns: a long integer in unix epoch nanoseconds
Note: this function is written as fast as possible for both Windows and Linux
'''
return rc.GetNanoTime()
def GetTSC() -> int:
'''
Returns: a long integer from the CPUs's current time stamp counter
time stamp counter (TSC) are based on the CPUs's clock cycle, which is often above 1GHz
thus GetTSC return values are guaranteed to be both unique and subsample below 1 nanosecond
Note: this function is written as fast as possible for both Windows and Linux
'''
return rc.GetTSC()
def utcnow(count: int = 1) -> 'DateTimeNano':
'''
Call `GetNanoTime` one or more times and return the timestamps in a :class:`~rt.rt_datetime.DateTimeNano` array.
Parameters
----------
count : int, default to 1
The number of timestamp samples to collect.
Returns
-------
DateTimeNano
A DateTimeNano array containing the sampled timestamps (representing the current time in UTC nanoseconds).
Examples
--------
>>> import riptable as rt
>>> rt.utcnow()
DateTimeNano([20190215 11:29:44.022382600])
>>> rt.utcnow()._fa
FastArray([1550248297734812800], dtype=int64)
To make an array containing multiple timestamps:
>>> len(rt.utcnow(1_000_000))
1000000
See Also
--------
GetNanoTime()
datetime.datetime.utcnow()
'''
if count == 1:
return TypeRegister.DateTimeNano([rc.GetNanoTime()], from_tz='GMT')
else:
x=[rc.GetNanoTime() for i in range(count)]
return TypeRegister.DateTimeNano(x, from_tz='GMT')
# Timing code below
def tic():
'''
Call tic() followed by code followed by toc() to time a routine in nanoseconds.
See Also
--------
toc, ticx, ticp, ticf
'''
global TicStartTime
TicStartTime = GetNanoTime()
def toc(logger: Optional[logging.Logger] = None) -> None:
'''
Call tic() followed by code followed by toc() to time a routine in nanoseconds.
Parameters
----------
logger : logging.Logger, optional
An optionally-specified logger where the collected timing information is recorded.
If not specified (the default), the timing information is written to stdout.
See Also
--------
toc, ticx, ticp, ticf
'''
global TicStartTime
global TocEndTime
TocEndTime = GetNanoTime()
delta_ns = TocEndTime - TicStartTime
if logger:
logger.debug("Elapsed time (ns): %d", delta_ns)
else:
delta = delta_ns / 1_000_000_000.0
deltaTime = float("{0:.6f}".format(delta))
print("Elapsed time",deltaTime,"seconds.")
# even more accurate cycle counting
def ticx():
'''
Call ticx() followed by code followed by tocx() to time a routine in TSC
See also: toc, ticx, ticp, ticf
'''
global TicStartTimeX
TicStartTimeX = GetTSC()
def tocx(logger: Optional[logging.Logger] = None) -> None:
'''
Call ticx() followed by code followed by tocx() to time a routine in TSC
Parameters
----------
logger : logging.Logger, optional
An optionally-specified logger where the collected timing information is recorded.
If not specified (the default), the timing information is written to stdout.
See also: toc, ticx, ticp, ticf
'''
global TicStartTimeX
global TocEndTimeX
TocEndTimeX = GetTSC()
# TODO: Need to handle TSC wraparound here
delta_cycles = TocEndTimeX - TicStartTimeX
if logger:
logger.debug("Elapsed time (cycles): %d", delta_cycles)
else:
print("Elapsed time", delta_cycles,"cycles.")
def ticf():
'''
Call ticf() followed by code followed by tocf() to time fastarrays
See also: toc, ticx, ticp, ticf
'''
FA=TypeRegister.FastArray
FA._LCLEAR()
FA._LON()
def tocf(dataset=True):
'''
Call ticf() followed by code followed by tocf() to time fastarrays
Parameters
----------------
dataset: bool, defaults to True.
If specified, returns a Dataset. Set to False to print out instead.
'''
FA=TypeRegister.FastArray
FA._LOFF()
return TypeRegister.MathLedger._LDUMP()
def ticp():
'''
Call ticp() followed by code followed by tocp() to profile function calls
See also: toc, ticx, ticp, ticf
'''
import cProfile
global pr
pr = cProfile.Profile()
pr.enable()
def tocp(dataset=True, logfile=None, sort='time', strip=True, stats=False, calls=False, find=None):
'''
Call ticp() followed by code followed by tocp() to profile anything between the ticp/tocp
tocp() may be called again to retrieve data in a different manner
Examples
--------
ticp(); ds.sort_copy(by='Symbol'); tocp()._H
ticp(); ds.sort_copy(by='Symbol'); tocp().sort_view('cumtime')._A
ticp(); ds.sort_copy(by='Symbol'); tocp(find='rt_fastarray.py:332')._H
ticp(); ds.sort_copy(by='Symbol'); tocp(find='rt_fastarray.py')._H
ticp(); ds.sort_copy(by='Symbol'); ds=tocp(calls=True); ds.gb('filepath').sum()._H
tocp(calls=True).gb(['function','filepath'])['tottime'].sum().sort_view('tottime')._A
ticp(); ds.sort_copy(by='Symbol'); stats=tocp(stats=True);
ticp(); ds.sort_copy(by='Symbol'); tocp(False);
ticp(); ds.sort_copy(by='Symbol'); tocp(False, strip=False);
ticp(); ds.sort_copy(by='Symbol'); tocp(False, sort='cumtime');
Parameters
----------------
dataset=False. set to True to return a Dataset otherwise use pstats output
logfile=None. set to filename to save the Dataset in SDS format
NOTE: consider pickling the result when stats=True to save for later analysis
strip=True. set to False to return full path for the filename when dataset=False
calls=False. set to True to include 'callee' and 'filepath' to determine caller info
find=None. set to a string with 'filename:lineno' to drill into those specific calls
sort='time' by default when dataset=False, other options include
"calls" --> "call count"
"ncalls" --> "call count"
"cumtime" --> "cumulative time"
"cumulative"--> "cumulative time"
"file" --> "file name"
"filename" --> "file name"
"line" --> "line number"
"module" --> "file name"
"name" --> "function name"
"nfl" --> "name/file/line"
"pcalls" --> "primitive call count"
"stdname" --> "standard name"
"time" --> "internal time"
"tottime" --> "internal time"
stats=False. set to True to return all stats collected by _lsprof.c
return all information collected by the profiler.
Each profiler_entry is a tuple-like object with the
following attributes:
code code object
callcount how many times this was called
reccallcount how many times called recursively
totaltime total time in this entry
inlinetime inline time in this entry (not in subcalls)
calls details of the calls
The calls attribute is either None or a list of
profiler_subentry objects:
code called code object
callcount how many times this is called
reccallcount how many times this is called recursively
totaltime total time spent in this call
inlinetime inline time (not in further subcalls)
'''
global pr
pr.disable()
if stats:
return pr.getstats()
if dataset:
if sort == 'time': sort='tottime'
ds = snapshot_stats(pr, sort=sort, calls=calls, findfunc=find)
if logfile is not None:
ds.save(logfile)
return ds
else:
import pstats
if strip:
pstats.Stats(pr).strip_dirs().sort_stats(sort).print_stats()
else:
pstats.Stats(pr).sort_stats(sort).print_stats()
def snapshot_stats(pr, sort='tottime', calls=True, findfunc=None):
'''
Parameters
----------
pr:
Other Parameters
----------------
sort:
calls:
findfunc: must be in form filename:lineno such as 'rt_fastarray:423'
or in the form 'rt_fastarray'
Returns
-------
a Dataset
'''
import os
if findfunc is not None:
try:
funcname, linenum = findfunc.split(':')
linenum = int(linenum)
except Exception:
funcname = findfunc
linenum = None
def parse_func_info(tup):
func_str = []
filepath = '~'
# module
if tup[0] != '~':
# parse file name
normpath = os.path.normpath(tup[0])
basename = os.path.basename(normpath)
func_str.append(basename)
filepath=normpath[:-len(basename)]
# line_number
if tup[1] != 0:
func_str.append(':'+str(tup[1]))
# func name
if len(func_str) != 0:
func_str.append('('+tup[2]+')')
# python func
else:
func_str.append(tup[2])
# to match pstats display
func_str[0].replace('<','{')
func_str[0].replace('>','}')
return "".join(func_str), filepath
entries = pr.getstats()
stats = {}
callersdicts = {}
#def get_top_level_stats(self):
# for func, (cc, nc, tt, ct, callers) in self.stats.items():
# self.total_calls += nc
# self.prim_calls += cc
# self.total_tt += tt
# call information
# NOTE consider cython or C since this can be huge
for entry in entries:
code = entry.code
if isinstance(code, str):
func= ('~', 0, code) # built-in functions ('~' sorts at the end)
else:
func= (code.co_filename, code.co_firstlineno, code.co_name)
nc = entry.callcount # ncalls column of pstats (before '/')s
cc = nc - entry.reccallcount # ncalls column of pstats (after '/')
tt = entry.inlinetime # tottime column of pstats
ct = entry.totaltime # cumtime column of pstats
callers = {}
callersdicts[id(entry.code)] = callers
stats[func] = cc, nc, tt, ct, callers
# subcall information
for entry in entries:
if entry.calls:
code = entry.code
if isinstance(code, str):
func= ('~', 0, code) # built-in functions ('~' sorts at the end)
else:
func= (code.co_filename, code.co_firstlineno, code.co_name)
for subentry in entry.calls:
try:
callers = callersdicts[id(subentry.code)]
except KeyError:
continue
nc = subentry.callcount
cc = nc - subentry.reccallcount
tt = subentry.inlinetime
ct = subentry.totaltime
if func in callers:
prev = callers[func]
nc += prev[0]
cc += prev[1]
tt += prev[2]
ct += prev[3]
callers[func] = nc, cc, tt, ct
if findfunc is not None:
# this path is taken when user wants to drill into
callcount = []
ncalls = []
tottime = []
cumtime = []
names = []
for func_info, (cc, nc, tt, ct, callers) in stats.items():
callercount = len(callers)
if callercount > 0:
name, filepath =(parse_func_info(func_info))
if name[0] != '<':
funcn, stuff = name.split(':')
lineno, stuff = stuff.split('(')
lineno = int(lineno)
if (linenum is None or lineno == linenum) and funcn == funcname:
# NOTE: not sure this is
for k,v in callers.items():
name, filepathc = (parse_func_info(k))
cc1, nc1, tt1, ct1 = (v)
callcount.append(cc1)
ncalls.append(nc1)
tottime.append(tt1)
cumtime.append(ct1)
names.append(name)
ds = TypeRegister.Dataset({
'ncalls' : ncalls,
'tottime' : tottime,
'cumtime' : cumtime,
'callers' : callcount,
'function' : names})
else:
ncalls = []
tottime = []
cumtime = []
names = []
callcount = []
ncallers = []
firstcaller = []
path = []
pathc = []
for func_info, (cc, nc, tt, ct, callers) in stats.items():
ncalls.append(nc)
tottime.append(tt)
cumtime.append(ct)
callcount.append(cc)
callercount = len(callers)
ncallers.append(callercount)
name, filepath =(parse_func_info(func_info))
names.append(name)
# does user want more information?
if calls:
filepathc = '~'
if callercount > 0:
firstcall = next(iter(callers))
firstcall, filepathc =(parse_func_info(firstcall))
#firstcall = f'{firstcall[0]}:{firstcall[1]}({firstcall[2]})'
else:
firstcall = '~'
firstcaller.append(firstcall)
path.append(filepath)
pathc.append(filepathc)
ds = TypeRegister.Dataset({
'ncalls' : ncalls,
'tottime' : tottime,
'cumtime' : cumtime,
'callers' : ncallers,
'function' : names})
if calls:
ds['filepath'] = path
arr_callcount=np.asanyarray(callcount)
ds.percallT = ds.tottime / ds.ncalls
ds.percallC = ds.cumtime / arr_callcount
total_tt = ds.tottime.sum()
# check if they want information on the caller
if calls:
ds['callee'] = firstcaller
ds['filepathc'] = pathc
# NOTE: need an option for this to not SORT because that is the order
return ds.sort_inplace(sort, ascending=False)
def tt(expression:str, loops=1, return_time=False):
'''
tictoc time an expression in nanoseconds. use ; to separate lines
Args:
arg1 is a string of code to execute
arg2 is optional and is how many loops to execute
'''
#import __builtin__
#__builtin__.__dict__.update(locals())
import inspect
frame = inspect.currentframe()
# allow callee to use ; for new lines
codestr=expression.replace('; ','\n')
codestr=codestr.replace(';','\n')
# compile to byte code first to eliminate compile time in calculation
code=compile(codestr,'<string>','exec')
#preallocate array of floats
aTimers=loops*[0.0]
for i in range(loops):
startTime = GetNanoTime()
exec(code, frame.f_back.f_globals, frame.f_back.f_locals)
endTime = GetNanoTime()
aTimers[i]=(endTime - startTime) / 1000000000.0
if loops==1:
deltaTime = float("{0:.6f}".format(aTimers[0]))
if return_time:
return deltaTime
print("Elapsed time",deltaTime,"seconds.")
else:
mTime=np.median(aTimers)
deltaTime = float("{0:.6f}".format(mTime))
if return_time:
return deltaTime
print("Median",loops,"runs",deltaTime,"seconds.")
def ttx(expression:str, loops=1):
'''
tictoc time an expression in TSC (time stamp counters). use ; to separate lines
Args:
arg1 is a string of code to execute
arg2 is optional and is how many loops to execute
'''
import inspect
frame = inspect.currentframe()
# allow callee to use ; for new lines
codestr=expression.replace(';','\n')
# compile to byte code first to eliminate compile time in calculation
code=compile(codestr,'<string>','exec')
#preallocate array of floats
aTimers=loops*[0]
for i in range(loops):
startTime = GetTSC()
exec(code, frame.f_back.f_globals, frame.f_back.f_locals)
endTime = GetTSC()
aTimers[i]=(endTime - startTime)
if loops==1:
deltaTime = aTimers[0]
print("Elapsed time",deltaTime,"cycles.")
else:
mTime=np.median(aTimers)
deltaTime = mTime
print("Median",loops,"runs",deltaTime,"cycles.")
```
#### File: riptable/riptable/rt_timezone.py
```python
__all__ = [
'TimeZone'
]
import numpy as np
import riptide_cpp as rc
from .rt_fastarray import FastArray
from .rt_enum import TypeRegister
from .rt_datetime import NANOS_PER_HOUR
from .rt_numpy import putmask, searchsorted, zeros, where
DST_CUTOFFS_NYC = FastArray([
'1970-04-26 02:00:00', '1970-10-25 02:00:00',
'1971-04-25 02:00:00', '1971-10-31 02:00:00',
'1972-04-30 02:00:00', '1972-10-29 02:00:00',
'1973-04-29 02:00:00', '1973-10-28 02:00:00',
'1974-01-06 02:00:00', '1974-10-27 02:00:00',
'1975-02-23 02:00:00', '1975-10-26 02:00:00',
'1976-04-25 02:00:00', '1976-10-31 02:00:00',
'1977-04-24 02:00:00', '1977-10-30 02:00:00',
'1978-04-30 02:00:00', '1978-10-29 02:00:00',
'1979-04-29 02:00:00', '1979-10-28 02:00:00',
'1980-04-27 02:00:00', '1980-10-26 02:00:00',
'1981-04-26 02:00:00', '1981-10-25 02:00:00',
'1982-04-25 02:00:00', '1982-10-31 02:00:00',
'1983-04-24 02:00:00', '1983-10-30 02:00:00',
'1984-04-29 02:00:00', '1984-10-28 02:00:00',
'1985-04-28 02:00:00', '1985-10-27 02:00:00',
'1986-04-27 02:00:00', '1986-10-26 02:00:00',
'1987-04-05 02:00:00', '1987-10-25 02:00:00',
'1988-04-03 02:00:00', '1988-10-30 02:00:00',
'1989-04-02 02:00:00', '1989-10-29 02:00:00',
'1990-04-01 02:00:00', '1990-10-28 02:00:00',
'1991-04-07 02:00:00', '1991-10-27 02:00:00',
'1992-04-05 02:00:00', '1992-10-25 02:00:00',
'1993-04-04 02:00:00', '1993-10-31 02:00:00',
'1994-04-03 02:00:00', '1994-10-30 02:00:00',
'1995-04-02 02:00:00', '1995-10-29 02:00:00',
'1996-04-07 02:00:00', '1996-10-27 02:00:00',
'1997-04-06 02:00:00', '1997-10-26 02:00:00',
'1998-04-05 02:00:00', '1998-10-25 02:00:00',
'1999-04-04 02:00:00', '1999-10-31 02:00:00',
'2000-04-02 02:00:00', '2000-10-29 02:00:00',
'2001-04-01 02:00:00', '2001-10-28 02:00:00',
'2002-04-07 02:00:00', '2002-10-27 02:00:00',
'2003-04-06 02:00:00', '2003-10-26 02:00:00',
'2004-04-04 02:00:00', '2004-10-31 02:00:00',
'2005-04-03 02:00:00', '2005-10-30 02:00:00',
'2006-04-02 02:00:00', '2006-10-29 02:00:00',
'2007-03-11 02:00:00', '2007-11-04 02:00:00',
'2008-03-09 02:00:00', '2008-11-02 02:00:00',
'2009-03-08 02:00:00', '2009-11-01 02:00:00',
'2010-03-14 02:00:00', '2010-11-07 02:00:00',
'2011-03-13 02:00:00', '2011-11-06 02:00:00',
'2012-03-11 02:00:00', '2012-11-04 02:00:00',
'2013-03-10 02:00:00', '2013-11-03 02:00:00',
'2014-03-09 02:00:00', '2014-11-02 02:00:00',
'2015-03-08 02:00:00', '2015-11-01 02:00:00',
'2016-03-13 02:00:00', '2016-11-06 02:00:00',
'2017-03-12 02:00:00', '2017-11-05 02:00:00',
'2018-03-11 02:00:00', '2018-11-04 02:00:00',
'2019-03-10 02:00:00', '2019-11-03 02:00:00',
'2020-03-08 02:00:00', '2020-11-01 02:00:00',
'2021-03-14 02:00:00', '2021-11-07 02:00:00',
'2022-03-13 02:00:00', '2022-11-06 02:00:00',
'2023-03-12 02:00:00', '2023-11-05 02:00:00',
'2024-03-10 02:00:00', '2024-11-03 02:00:00',
'2025-03-09 02:00:00', '2025-11-02 02:00:00',
'2026-03-08 02:00:00', '2026-11-01 02:00:00',
'2027-03-14 02:00:00', '2027-11-07 02:00:00',
'2028-03-12 02:00:00', '2028-11-05 02:00:00',
'2029-03-11 02:00:00', '2029-11-04 02:00:00',
'2030-03-10 02:00:00', '2030-11-03 02:00:00',
'2031-03-09 02:00:00', '2031-11-02 02:00:00',
'2032-03-14 02:00:00', '2032-11-07 02:00:00',
'2033-03-13 02:00:00', '2033-11-06 02:00:00',
'2034-03-12 02:00:00', '2034-11-05 02:00:00',
'2035-03-11 02:00:00', '2035-11-04 02:00:00',
'2036-03-09 02:00:00', '2036-11-02 02:00:00',
'2037-03-08 02:00:00', '2037-11-01 02:00:00',
'2038-03-14 02:00:00', '2038-11-07 02:00:00',
'2039-03-13 02:00:00', '2039-11-06 02:00:00',
'2040-03-11 02:00:00', '2040-11-04 02:00:00',
])
DST_REVERSE_NYC = FastArray([
'1970-04-26 07:00:00', '1970-10-25 06:00:00',
'1971-04-25 07:00:00', '1971-10-31 06:00:00',
'1972-04-30 07:00:00', '1972-10-29 06:00:00',
'1973-04-29 07:00:00', '1973-10-28 06:00:00',
'1974-01-06 07:00:00', '1974-10-27 06:00:00',
'1975-02-23 07:00:00', '1975-10-26 06:00:00',
'1976-04-25 07:00:00', '1976-10-31 06:00:00',
'1977-04-24 07:00:00', '1977-10-30 06:00:00',
'1978-04-30 07:00:00', '1978-10-29 06:00:00',
'1979-04-29 07:00:00', '1979-10-28 06:00:00',
'1980-04-27 07:00:00', '1980-10-26 06:00:00',
'1981-04-26 07:00:00', '1981-10-25 06:00:00',
'1982-04-25 07:00:00', '1982-10-31 06:00:00',
'1983-04-24 07:00:00', '1983-10-30 06:00:00',
'1984-04-29 07:00:00', '1984-10-28 06:00:00',
'1985-04-28 07:00:00', '1985-10-27 06:00:00',
'1986-04-27 07:00:00', '1986-10-26 06:00:00',
'1987-04-05 07:00:00', '1987-10-25 06:00:00',
'1988-04-03 07:00:00', '1988-10-30 06:00:00',
'1989-04-02 07:00:00', '1989-10-29 06:00:00',
'1990-04-01 07:00:00', '1990-10-28 06:00:00',
'1991-04-07 07:00:00', '1991-10-27 06:00:00',
'1992-04-05 07:00:00', '1992-10-25 06:00:00',
'1993-04-04 07:00:00', '1993-10-31 06:00:00',
'1994-04-03 07:00:00', '1994-10-30 06:00:00',
'1995-04-02 07:00:00', '1995-10-29 06:00:00',
'1996-04-07 07:00:00', '1996-10-27 06:00:00',
'1997-04-06 07:00:00', '1997-10-26 06:00:00',
'1998-04-05 07:00:00', '1998-10-25 06:00:00',
'1999-04-04 07:00:00', '1999-10-31 06:00:00',
'2000-04-02 07:00:00', '2000-10-29 06:00:00',
'2001-04-01 07:00:00', '2001-10-28 06:00:00',
'2002-04-07 07:00:00', '2002-10-27 06:00:00',
'2003-04-06 07:00:00', '2003-10-26 06:00:00',
'2004-04-04 07:00:00', '2004-10-31 06:00:00',
'2005-04-03 07:00:00', '2005-10-30 06:00:00',
'2006-04-02 07:00:00', '2006-10-29 06:00:00',
'2007-03-11 07:00:00', '2007-11-04 06:00:00',
'2008-03-09 07:00:00', '2008-11-02 06:00:00',
'2009-03-08 07:00:00', '2009-11-01 06:00:00',
'2010-03-14 07:00:00', '2010-11-07 06:00:00',
'2011-03-13 07:00:00', '2011-11-06 06:00:00',
'2012-03-11 07:00:00', '2012-11-04 06:00:00',
'2013-03-10 07:00:00', '2013-11-03 06:00:00',
'2014-03-09 07:00:00', '2014-11-02 06:00:00',
'2015-03-08 07:00:00', '2015-11-01 06:00:00',
'2016-03-13 07:00:00', '2016-11-06 06:00:00',
'2017-03-12 07:00:00', '2017-11-05 06:00:00',
'2018-03-11 07:00:00', '2018-11-04 06:00:00',
'2019-03-10 07:00:00', '2019-11-03 06:00:00',
'2020-03-08 07:00:00', '2020-11-01 06:00:00',
'2021-03-14 07:00:00', '2021-11-07 06:00:00',
'2022-03-13 07:00:00', '2022-11-06 06:00:00',
'2023-03-12 07:00:00', '2023-11-05 06:00:00',
'2024-03-10 07:00:00', '2024-11-03 06:00:00',
'2025-03-09 07:00:00', '2025-11-02 06:00:00',
'2026-03-08 07:00:00', '2026-11-01 06:00:00',
'2027-03-14 07:00:00', '2027-11-07 06:00:00',
'2028-03-12 07:00:00', '2028-11-05 06:00:00',
'2029-03-11 07:00:00', '2029-11-04 06:00:00',
'2030-03-10 07:00:00', '2030-11-03 06:00:00',
'2031-03-09 07:00:00', '2031-11-02 06:00:00',
'2032-03-14 07:00:00', '2032-11-07 06:00:00',
'2033-03-13 07:00:00', '2033-11-06 06:00:00',
'2034-03-12 07:00:00', '2034-11-05 06:00:00',
'2035-03-11 07:00:00', '2035-11-04 06:00:00',
'2036-03-09 07:00:00', '2036-11-02 06:00:00',
'2037-03-08 07:00:00', '2037-11-01 06:00:00',
'2038-03-14 07:00:00', '2038-11-07 06:00:00',
'2039-03-13 07:00:00', '2039-11-06 06:00:00',
'2040-03-11 07:00:00', '2040-11-04 06:00:00'
])
DST_CUTOFFS_DUBLIN = FastArray([
'1972-03-19 02:00:00', '1972-10-29 02:00:00',
'1973-03-18 02:00:00', '1973-10-28 02:00:00',
'1974-03-17 02:00:00', '1974-10-27 02:00:00',
'1975-03-16 02:00:00', '1975-10-26 02:00:00',
'1976-03-21 02:00:00', '1976-10-24 02:00:00',
'1977-03-20 02:00:00', '1977-10-23 02:00:00',
'1978-03-19 02:00:00', '1978-10-29 02:00:00',
'1979-03-18 02:00:00', '1979-10-28 02:00:00',
'1980-03-16 02:00:00', '1980-10-26 02:00:00',
'1981-03-29 02:00:00', '1981-10-25 02:00:00',
'1982-03-28 02:00:00', '1982-10-24 02:00:00',
'1983-03-27 02:00:00', '1983-10-23 02:00:00',
'1984-03-25 02:00:00', '1984-10-28 02:00:00',
'1985-03-31 02:00:00', '1985-10-27 02:00:00',
'1986-03-30 02:00:00', '1986-10-26 02:00:00',
'1987-03-29 02:00:00', '1987-10-25 02:00:00',
'1988-03-27 02:00:00', '1988-10-23 02:00:00',
'1989-03-26 02:00:00', '1989-10-29 02:00:00',
'1990-03-25 02:00:00', '1990-10-28 02:00:00',
'1991-03-31 02:00:00', '1991-10-27 02:00:00',
'1992-03-29 02:00:00', '1992-10-25 02:00:00',
'1993-03-28 02:00:00', '1993-10-24 02:00:00',
'1994-03-27 02:00:00', '1994-10-23 02:00:00',
'1995-03-26 02:00:00', '1995-10-22 02:00:00',
'1996-03-31 02:00:00', '1996-10-27 02:00:00',
'1997-03-30 02:00:00', '1997-10-26 02:00:00',
'1998-03-29 02:00:00', '1998-10-25 02:00:00',
'1999-03-28 02:00:00', '1999-10-31 02:00:00',
'2000-03-26 02:00:00', '2000-10-29 02:00:00',
'2001-03-25 02:00:00', '2001-10-28 02:00:00',
'2002-03-31 02:00:00', '2002-10-27 02:00:00',
'2003-03-30 02:00:00', '2003-10-26 02:00:00',
'2004-03-28 02:00:00', '2004-10-31 02:00:00',
'2005-03-27 02:00:00', '2005-10-30 02:00:00',
'2006-03-26 02:00:00', '2006-10-29 02:00:00',
'2007-03-25 02:00:00', '2007-10-28 02:00:00',
'2008-03-30 02:00:00', '2008-10-26 02:00:00',
'2009-03-29 02:00:00', '2009-10-25 02:00:00',
'2010-03-28 02:00:00', '2010-10-31 02:00:00',
'2011-03-27 02:00:00', '2011-10-30 02:00:00',
'2012-03-25 02:00:00', '2012-10-28 02:00:00',
'2013-03-31 02:00:00', '2013-10-27 02:00:00',
'2014-03-30 02:00:00', '2014-10-26 02:00:00',
'2015-03-29 02:00:00', '2015-10-25 02:00:00',
'2016-03-27 02:00:00', '2016-10-30 02:00:00',
'2017-03-26 02:00:00', '2017-10-29 02:00:00',
'2018-03-25 02:00:00', '2018-10-28 02:00:00',
'2019-03-31 02:00:00', '2019-10-27 02:00:00',
'2020-03-29 02:00:00', '2020-10-25 02:00:00',
'2021-03-28 02:00:00', '2021-10-31 02:00:00',
'2022-03-27 02:00:00', '2022-10-30 02:00:00',
'2023-03-26 02:00:00', '2023-10-29 02:00:00',
'2024-03-31 02:00:00', '2024-10-27 02:00:00',
'2025-03-30 02:00:00', '2025-10-26 02:00:00',
'2026-03-29 02:00:00', '2026-10-25 02:00:00',
'2027-03-28 02:00:00', '2027-10-31 02:00:00',
'2028-03-26 02:00:00', '2028-10-29 02:00:00',
'2029-03-25 02:00:00', '2029-10-28 02:00:00',
'2030-03-31 02:00:00', '2030-10-27 02:00:00',
'2031-03-30 02:00:00', '2031-10-26 02:00:00',
'2032-03-28 02:00:00', '2032-10-31 02:00:00',
'2033-03-27 02:00:00', '2033-10-30 02:00:00',
'2034-03-26 02:00:00', '2034-10-29 02:00:00',
'2035-03-25 02:00:00', '2035-10-28 02:00:00',
'2036-03-30 02:00:00', '2036-10-26 02:00:00',
'2037-03-29 02:00:00', '2037-10-25 02:00:00',
'2038-03-28 02:00:00', '2038-10-31 02:00:00',
'2039-03-27 02:00:00', '2039-10-30 02:00:00',
'2040-03-25 02:00:00', '2040-10-28 02:00:00'
])
DST_REVERSE_DUBLIN = FastArray([
'1972-03-19 01:00:00', '1972-10-29 01:00:00',
'1973-03-18 01:00:00', '1973-10-28 01:00:00',
'1974-03-17 01:00:00', '1974-10-27 01:00:00',
'1975-03-16 01:00:00', '1975-10-26 01:00:00',
'1976-03-21 01:00:00', '1976-10-24 01:00:00',
'1977-03-20 01:00:00', '1977-10-23 01:00:00',
'1978-03-19 01:00:00', '1978-10-29 01:00:00',
'1979-03-18 01:00:00', '1979-10-28 01:00:00',
'1980-03-16 01:00:00', '1980-10-26 01:00:00',
'1981-03-29 01:00:00', '1981-10-25 01:00:00',
'1982-03-28 01:00:00', '1982-10-24 01:00:00',
'1983-03-27 01:00:00', '1983-10-23 01:00:00',
'1984-03-25 01:00:00', '1984-10-28 01:00:00',
'1985-03-31 01:00:00', '1985-10-27 01:00:00',
'1986-03-30 01:00:00', '1986-10-26 01:00:00',
'1987-03-29 01:00:00', '1987-10-25 01:00:00',
'1988-03-27 01:00:00', '1988-10-23 01:00:00',
'1989-03-26 01:00:00', '1989-10-29 01:00:00',
'1990-03-25 01:00:00', '1990-10-28 01:00:00',
'1991-03-31 01:00:00', '1991-10-27 01:00:00',
'1992-03-29 01:00:00', '1992-10-25 01:00:00',
'1993-03-28 01:00:00', '1993-10-24 01:00:00',
'1994-03-27 01:00:00', '1994-10-23 01:00:00',
'1995-03-26 01:00:00', '1995-10-22 01:00:00',
'1996-03-31 01:00:00', '1996-10-27 01:00:00',
'1997-03-30 01:00:00', '1997-10-26 01:00:00',
'1998-03-29 01:00:00', '1998-10-25 01:00:00',
'1999-03-28 01:00:00', '1999-10-31 01:00:00',
'2000-03-26 01:00:00', '2000-10-29 01:00:00',
'2001-03-25 01:00:00', '2001-10-28 01:00:00',
'2002-03-31 01:00:00', '2002-10-27 01:00:00',
'2003-03-30 01:00:00', '2003-10-26 01:00:00',
'2004-03-28 01:00:00', '2004-10-31 01:00:00',
'2005-03-27 01:00:00', '2005-10-30 01:00:00',
'2006-03-26 01:00:00', '2006-10-29 01:00:00',
'2007-03-25 01:00:00', '2007-10-28 01:00:00',
'2008-03-30 01:00:00', '2008-10-26 01:00:00',
'2009-03-29 01:00:00', '2009-10-25 01:00:00',
'2010-03-28 01:00:00', '2010-10-31 01:00:00',
'2011-03-27 01:00:00', '2011-10-30 01:00:00',
'2012-03-25 01:00:00', '2012-10-28 01:00:00',
'2013-03-31 01:00:00', '2013-10-27 01:00:00',
'2014-03-30 01:00:00', '2014-10-26 01:00:00',
'2015-03-29 01:00:00', '2015-10-25 01:00:00',
'2016-03-27 01:00:00', '2016-10-30 01:00:00',
'2017-03-26 01:00:00', '2017-10-29 01:00:00',
'2018-03-25 01:00:00', '2018-10-28 01:00:00',
'2019-03-31 01:00:00', '2019-10-27 01:00:00',
'2020-03-29 01:00:00', '2020-10-25 01:00:00',
'2021-03-28 01:00:00', '2021-10-31 01:00:00',
'2022-03-27 01:00:00', '2022-10-30 01:00:00',
'2023-03-26 01:00:00', '2023-10-29 01:00:00',
'2024-03-31 01:00:00', '2024-10-27 01:00:00',
'2025-03-30 01:00:00', '2025-10-26 01:00:00',
'2026-03-29 01:00:00', '2026-10-25 01:00:00',
'2027-03-28 01:00:00', '2027-10-31 01:00:00',
'2028-03-26 01:00:00', '2028-10-29 01:00:00',
'2029-03-25 01:00:00', '2029-10-28 01:00:00',
'2030-03-31 01:00:00', '2030-10-27 01:00:00',
'2031-03-30 01:00:00', '2031-10-26 01:00:00',
'2032-03-28 01:00:00', '2032-10-31 01:00:00',
'2033-03-27 01:00:00', '2033-10-30 01:00:00',
'2034-03-26 01:00:00', '2034-10-29 01:00:00',
'2035-03-25 01:00:00', '2035-10-28 01:00:00',
'2036-03-30 01:00:00', '2036-10-26 01:00:00',
'2037-03-29 01:00:00', '2037-10-25 01:00:00',
'2038-03-28 01:00:00', '2038-10-31 01:00:00',
'2039-03-27 01:00:00', '2039-10-30 01:00:00',
'2040-03-25 01:00:00', '2040-10-28 01:00:00'
])
NYC_OFFSET_DST = 4
NYC_OFFSET = 5
DUBLIN_OFFSET_DST = -1
DUBLIN_OFFSET = 0
class TimeZone:
"""
Stores daylight savings cutoff information so UTC times can be translated to zone-specific times.
Every `DateTimeNano` object holds a `TimeZone` object.
All timezone-related conversions / fixups will be handled by the `TimeZone` class.
Parameters
----------
from_tz : str, defaults to None
to_tz : str
Attributes
----------
_from_tz : str
shorthand timezone string from the constructor - the timezone that the time originates from
_dst_cutoffs : numpy.ndarray
lookup array for converting times from constructor to UTC nano in GMT time
_to_tz : str
shorthand timezone string from the constructor - the timezone that the time will be displayed in
_timezone_str
Python-friendly timezone string used for displaying individual times.
NOTE: This is actually a property, not a regular attribute.
_dst_reverse : numpy.ndarray
lookup array for DateTimeNano to display time in the correct timezone, accounting for daylight savings.
_offset
offset from GMT for display (non daylight savings)
_fix_offset
the offset from the timezone of origin
Notes
-----
'UTC' is not a timezone, but accepted as an alias for GMT
"""
valid_timezones = ('NYC', 'DUBLIN', 'GMT', 'UTC')
timezone_long_strings = {
'NYC' : 'America/New_York',
'DUBLIN' : 'Europe/Dublin',
'GMT' : 'GMT',
'UTC' : 'GMT'
}
tz_error_msg = f"If constructing from strings specify a timezone in from_tz keyword. Valid options: {valid_timezones}. Example: dtn = DateTimeNano(['2018-12-13 10:30:00'], from_tz='NYC')"
#------------------------------------------------------------
def __init__(self, from_tz: str = None, to_tz: str = 'NYC'):
if from_tz is None:
raise ValueError(self.tz_error_msg)
# might not need these, hang on to them for now
self._from_tz = from_tz
self._to_tz = to_tz
# get appropriate daylight savings dictionaries
self._dst_cutoffs, self._fix_offset = self._init_from_tz(from_tz)
self._dst_reverse, self._offset = self._init_to_tz(to_tz)
#------------------------------------------------------------
@classmethod
def _init_from_tz(cls, from_tz):
# TODO: as we add more timezone support, put into a dictionary
if from_tz == 'NYC':
_dst_cutoffs = DST_CUTOFFS_NYC
_fix_offset = NYC_OFFSET
elif from_tz == 'DUBLIN':
_dst_cutoffs = DST_CUTOFFS_DUBLIN
_fix_offset = DUBLIN_OFFSET
elif from_tz in ('GMT', 'UTC'):
_dst_cutoffs = None
_fix_offset = 0
else:
raise ValueError(f"{from_tz} is not a valid entry for from_tz keyword. Valid options: {cls.valid_timezones}.")
# fix_offset is different than display offset
# fix_offset is only used in initial conversion to UTC
return _dst_cutoffs, _fix_offset
#------------------------------------------------------------
@classmethod
def _init_to_tz(cls, to_tz):
'''
Return daylight savings information, timezone string for correctly displaying the datetime
based on the to_tz keyword in the constructor.
'''
# TODO: as we add more timezone support, put into a dictionary
# probably dont need _timezone_str
if to_tz == 'NYC':
_dst_reverse = DST_REVERSE_NYC
_timezone_offset = NYC_OFFSET
elif to_tz == 'DUBLIN':
_dst_reverse = DST_REVERSE_DUBLIN
_timezone_offset = DUBLIN_OFFSET
elif to_tz in ('GMT', 'UTC'):
_dst_reverse = None
_timezone_offset = 0
else:
raise ValueError(f"{to_tz} is not a valid entry for from_tz keyword. Valid options: {cls.valid_timezones}.")
return _dst_reverse, _timezone_offset
#------------------------------------------------------------
@property
def _timezone_str(self):
return self.timezone_long_strings[self._to_tz]
#------------------------------------------------------------
def _set_timezone(self, tz):
'''
See DateTimeNano.set_timezone()
'''
self._dst_reverse, self._offset = self._init_to_tz(tz)
self._to_tz = tz
#------------------------------------------------------------
def _mask_dst(self, arr, cutoffs=None):
'''
:param arr: int64 UTC nanoseconds
:param cutoffs: an array containing daylight savings time starts/ends at midnight
possibly a reverse array for GMT that compensates for New York timezone (see DST_REVERSE_NYC)
'''
if cutoffs is None:
cutoffs = self._dst_cutoffs
if cutoffs is None:
return zeros(len(arr), dtype=np.bool)
#is_dst = (FastArray(np.searchsorted(DST_CUTOFFS, arr)) & 1).astype(np.bool)
#is_dst = (rc.BinsToCutsBSearch(arr, cutoffs, 0) & 1).astype(np.bool)
is_dst = (searchsorted(cutoffs, arr) & 1).astype(np.bool_)
return is_dst
#------------------------------------------------------------
def _is_dst(self, arr):
return self._mask_dst(arr, self._dst_reverse)
#------------------------------------------------------------
def _tz_offset(self, arr):
if self._dst_reverse is None:
result = zeros(len(arr), dtype=np.int32)
else:
is_dst = self._mask_dst(arr, self._dst_reverse)
reg_offset = -1 * self._offset
dst_offset = reg_offset + 1
result = where(is_dst, dst_offset, reg_offset)
return result
def __repr__(self):
return f"{type(self).__qualname__}(from_tz='{self._from_tz}', to_tz='{self._to_tz}')"
def __eq__(self, other: 'TimeZone'):
return \
self.__class__ == other.__class__ and \
self._from_tz == other._from_tz and \
self._to_tz == other._to_tz
#------------------------------------------------------------
def copy(self):
"""A shallow copy of the TimeZone - all attributes are scalars or references
to constants.
"""
new_tz = TimeZone(from_tz=self._from_tz, to_tz=self._to_tz)
# other attributes may have been changed
new_tz._dst_cutoffs = self._dst_cutoffs
new_tz._fix_offset = self._fix_offset
new_tz._dst_reverse = self._dst_reverse
new_tz._offset = self._offset
return new_tz
#------------------------------------------------------------
def fix_dst(self, arr, cutoffs=None):
'''
Called by DateTimeNano routines that need to adjust time for timezone.
Also called by DateTimeNanoScalar
Parameters:
-----------
arr : underlying array of int64, UTC nanoseconds OR a scalar np.int64
cutoffs : lookup array for daylight savings time cutoffs for the active timezone
Notes:
------
There is a difference in daylight savings fixup for Dublin timezone. The python
datetime.astimezone() routine works differently than fromutctimestamp(). Python datetime
may set a 'fold' attribute, indicating that the time is invalid, within an ambiguous daylight
savings hour.
>>> import datetime
>>> from dateutil import tz
>>> zone = tz.gettz('Europe/Dublin')
>>> pdt0 = datetime.datetime(2018, 10, 28, 1, 59, 0, tzinfo=zone)
>>> pdt1 = datetime.datetime(2018, 10, 28, 2, 59, 0, tzinfo=zone)
>>> dtn = DateTimeNano(['2018-10-28 01:59', '2018-10-28 02:59'], from_tz='DUBLIN', to_tz='DUBLIN')
>>> utc = datetime.timezone.utc
>>> pdt0.astimezone(utc)
datetime.datetime(2018, 10, 28, 0, 59, tzinfo=datetime.timezone.utc)
>>> pdt1.astimezone(utc)
datetime.datetime(2018, 10, 28, 1, 59, tzinfo=datetime.timezone.utc)
>>> dtn.astimezone('GMT')
DateTimeNano([20181028 00:59:00.000000000, 20181028 02:59:00.000000000])
'''
if cutoffs is None:
cutoffs = self._dst_reverse
if cutoffs is None:
return arr
# get whether or not daylight savings
is_dst = self._mask_dst(arr, cutoffs=cutoffs)
arr = arr - (NANOS_PER_HOUR * self._offset)
# scalar check
if isinstance(is_dst, np.bool_):
if is_dst:
arr += NANOS_PER_HOUR
else:
arr[is_dst] += NANOS_PER_HOUR
return arr
#------------------------------------------------------------
def to_utc(self, dtn, inv_mask=None):
'''
Called in the DateTimeNano constructor. If necessary, integer arrays of nanoseconds
are converted from their timezone of origin to UTC nanoseconds in GMT.
Restores any invalids (0) from the original array.
This differs from fix_dst() because it adds the offset to the array.
'''
if self._from_tz not in ('GMT', 'UTC'):
#print('was not gmt or utc', self._from_tz)
# create an invalid mask before adjusting
if inv_mask is None:
inv_mask = dtn == 0
# adjust the times so they are in UTC nanoseconds
is_dst = self._mask_dst(dtn, cutoffs=self._dst_cutoffs)
# future optimization: offet might be zero - don't bother with the first addition
dtn = dtn + (NANOS_PER_HOUR * self._fix_offset)
dtn[is_dst] -= NANOS_PER_HOUR
# restore invalid times
putmask(dtn, inv_mask, 0)
return dtn
#========================================================
class Calendar():
'''
*** not implemented
Holds information regarding holidays, trade days, etc. depending on market/country.
Every TimeZone object holds a Calendar object.
'''
def __init__(self):
raise NotImplementedError
## flip timestring DST arrays to UTC nano ints
DST_CUTOFFS_NYC = rc.DateTimeStringToNanos(DST_CUTOFFS_NYC)
DST_REVERSE_NYC = rc.DateTimeStringToNanos(DST_REVERSE_NYC)
DST_CUTOFFS_DUBLIN = rc.DateTimeStringToNanos(DST_CUTOFFS_DUBLIN)
DST_REVERSE_DUBLIN = rc.DateTimeStringToNanos(DST_REVERSE_DUBLIN)
TypeRegister.TimeZone = TimeZone
TypeRegister.Calendar = Calendar
```
#### File: riptable/tests/runall.py
```python
import unittest
import sys, os
def get_all_tests():
"""
:return:
"""
tests = (fname[:-3] for fname in os.listdir(os.path.dirname(__file__) or '.')
#if (fname.startswith('test_datetime.py') or fname.startswith('test_categorical.py')) and fname.endswith('.py'))
if fname.startswith('test_') and fname.endswith('.py'))
return sorted(tests)
def run_all( argv, verbosity = 3 ):
if len( argv ) > 1:
try:
verbosity = int(argv[1])
except:
raise SystemExit( "Usage: %s [ verbosity_int (def=%d) ]" % ( argv[ 0 ], verbosity ) )
pkg_name = os.path.basename(os.path.dirname(__file__))
failures = []
for test in get_all_tests():
print("==> Running tests: %s <==\n" % test, file=sys.stderr)
module_name = 'riptable.{pkg_name}.{mod_name}'.format(pkg_name=pkg_name, mod_name=test)
module = __import__(module_name, fromlist=[''])
utest = unittest.main(module=module, exit=False, argv=[module_name], verbosity=verbosity).result
if not utest.wasSuccessful():
failures.append(module_name)
if failures:
raise SystemExit('Failure: {}'.format(', '.join(failures)))
if __name__ == "__main__":
run_all( sys.argv )
```
#### File: riptable/tests/test_accumtable.py
```python
import unittest
import riptable as rt
class AccumTable_Test(unittest.TestCase):
def test_accum_cols(self):
num_rows = 10
data = rt.Dataset(
{
'Symb': rt.Cat(['A', 'B'] * int(num_rows / 2)),
'Count': rt.full(num_rows, 1.0),
'PlusMinus': [1.0, -1.0]
* int(num_rows / 2), # Added to handle edge case of zero footer
}
)
accum = rt.accum_cols(
data.Symb, [data.Count, data.PlusMinus], ['Count', 'PlusMinus']
)
accum_expected = rt.Dataset(
{'Symb': ['A', 'B'], 'Count': [5.0, 5.0], 'PlusMinus': [5.0, -5.0]}
)
accum_expected.footer_set_values(
'Total', {'Symb': 'Total', 'Count': 10.0, 'PlusMinus': 0.0}
)
self.assertTrue((accum == accum_expected).all(axis=None))
def test_accum_cols_multikey(self):
num_rows = 12
data = rt.Dataset(
{
'Symb': rt.Cat(['A', 'B'] * int(num_rows / 2)),
'Exch': rt.Cat(['X', 'Y', 'Y', 'X'] * int(num_rows / 4)),
'Count': rt.full(num_rows, 1.0),
'PlusMinus': [1.0, -1.0] * int(num_rows / 2),
}
)
data.MultiKeyCat = rt.Cat([data.Symb, data.Exch])
accum = rt.accum_cols(
data.MultiKeyCat, [data.Count, data.PlusMinus], ['Count', 'PlusMinus']
)
accum_expected = rt.Dataset(
{
'Symb': ['A', 'B', 'A', 'B'],
'Exch': ['X', 'Y', 'Y', 'X'],
'Count': [3.0, 3.0, 3.0, 3.0],
'PlusMinus': [3.0, -3.0, 3.0, -3.0],
}
)
accum_expected.footer_set_values(
'Total', {'Exch': 'Total', 'Count': 12.0, 'PlusMinus': 0.0}
)
self.assertTrue((accum == accum_expected).all(axis=None))
# When a raw FA is passed as the pointers instead of Categorical
def test_accum_cols_noncat(self):
num_rows = 10
pointer = rt.FA([0, 1] * int(num_rows / 2))
count = rt.full(num_rows, 1.0)
accum = rt.accum_cols(pointer, count)
accum_expected = rt.Dataset({'YLabel': [0, 1], 'col0': [5.0, 5.0]})
accum_expected.footer_set_values('Total', {'YLabel': 'Total', 'col0': 10.0})
self.assertTrue((accum == accum_expected).all(axis=None))
# Test basic accum_ratiop
def test_accum_ratiop(self):
num_rows = 12
data = rt.Dataset(
{
'Symb': rt.Cat(['A', 'A', 'A', 'B'] * int(num_rows / 4)),
'Exch': rt.Cat(['Z', 'Z', 'X', 'X'] * int(num_rows / 4)),
'Count': rt.full(num_rows, 1.0),
}
)
# Invalid input
with self.assertRaises(
ValueError, msg=f'Failed to raise an error when passing invalid norm_by arg'
):
rt.accum_ratiop(data.Symb, data.Exch, data.Count, norm_by='z')
# Ratio within total
accum = rt.accum_ratiop(data.Symb, data.Exch, data.Count, norm_by='T')
accum_expected = rt.Dataset(
{
'Symb': ['A', 'B'],
'X': [25.0, 25.0],
'Z': [50.0, 0.0],
'TotalRatio': [75.0, 25.0],
'Total': [9.0, 3.0],
}
)
accum_expected.footer_set_values(
'TotalRatio',
{'Symb': 'TotalRatio', 'X': 50.0, 'Z': 50.0, 'TotalRatio': 100.0},
)
accum_expected.footer_set_values(
'Total', {'Symb': 'Total', 'X': 6.0, 'Z': 6.0, 'Total': 12.0}
)
self.assertTrue((accum == accum_expected).all(axis=None))
# Ratio within columns
accum = rt.accum_ratiop(data.Symb, data.Exch, data.Count, norm_by='c')
accum_expected = rt.Dataset(
{
'Symb': ['A', 'B'],
'X': [50.0, 50.0],
'Z': [100.0, 0.0],
'TotalRatio': [75.0, 25.0],
'Total': [9.0, 3.0],
}
)
accum_expected.footer_set_values(
'TotalRatio',
{'Symb': 'TotalRatio', 'X': 100.0, 'Z': 100.0, 'TotalRatio': 100.0},
)
accum_expected.footer_set_values(
'Total', {'Symb': 'Total', 'X': 6.0, 'Z': 6.0, 'Total': 12.0}
)
self.assertTrue((accum == accum_expected).all(axis=None))
if __name__ == "__main__":
tester = unittest.main()
```
#### File: riptable/tests/test_conversion_utils.py
```python
import unittest
import numpy
from riptable import Dataset, Categorical
from riptable.rt_datetime import DateTimeNano
from riptable.Utils.conversion_utils import (
dataset_as_matrix,
numpy2d_to_dict,
dset_dict_to_list,
append_dataset_dict,
possibly_convert_to_nanotime,
numpy_array_to_dict,
)
class Conversion_Utility_Test(unittest.TestCase):
def test_as_matrix(self):
error_tol = 0.00001
ds = Dataset({'A': [1.2, 3.1, 9.6], 'B': [-1.6, 2.7, 4.6]})
X, _ = dataset_as_matrix(ds)
self.assertIsInstance(X, numpy.ndarray)
self.assertEqual(X.shape[0], ds.shape[0])
self.assertEqual(X.shape[1], ds.shape[1]) # we may break this later
self.assertTrue((numpy.abs(ds.A._np - X[:, 0]) < error_tol).all())
self.assertTrue((numpy.abs(ds.B._np - X[:, 1]) < error_tol).all())
def test_as_matrix_metadata(self):
error_tol = 0.00001
ds = Dataset(
{
'A': ['EXCH1', 'EXCH2', 'EXCH1', 'EXCH3', 'EXCH3'],
'B': [-1.6, 2.7, 4.6, 5.7, 8.9],
'C': Categorical([0, 0, 1, 0, 2], ['CPTYA', 'CPTYB', 'CPTYC']),
}
)
X, X_data = dataset_as_matrix(ds)
self.assertIsInstance(X, numpy.ndarray)
self.assertEqual(X.shape[0], ds.shape[0])
self.assertEqual(X.shape[1], ds.shape[1]) # we may break this later
self.assertEqual(X_data['A']['dtype'], ds.A.dtype)
self.assertEqual(X_data['B']['dtype'], ds.B.dtype)
self.assertEqual(X_data['C']['dtype'], ds.C.dtype)
self.assertEqual(X_data['A']['is_categorical'], False)
self.assertEqual(X_data['B']['is_categorical'], False)
self.assertEqual(X_data['C']['is_categorical'], True)
self.assertTrue(
(numpy.abs(X[:, 0] - numpy.array([0., 1., 0., 2., 2.])) < error_tol).all(),
msg=f"got {X[:, 0]}"
)
self.assertTrue(
(numpy.abs(X[:, 2] - numpy.array([0, 0, 1, 0, 2])) < error_tol).all(),
msg=f"got {X[:, 2]}"
)
self.assertTrue(
(X_data['A']['category_values'][numpy.array([0, 1, 0, 2, 2])] == ds.A).all(),
msg=f"X_data {X_data['A']['category_values'][numpy.array([0, 1, 0, 2, 2])]}\nds.A {ds.A}"
)
def test_as_matrix_int(self):
error_tol = 0.00001
ds = Dataset(
{
_k: list(range(_i * 10, (_i + 1) * 10))
for _i, _k in enumerate('ABCDEFGHIJKLMNOP')
}
)
X, _ = dataset_as_matrix(ds)
self.assertIsInstance(X, numpy.ndarray)
self.assertEqual(X.shape[0], ds.shape[0])
self.assertEqual(X.shape[1], ds.shape[1]) # we may break this later
self.assertTrue((numpy.abs(ds.A._np - X[:, 0]) < error_tol).all())
self.assertTrue((numpy.abs(ds.B._np - X[:, 1]) < error_tol).all())
def test_numpy_array_to_dict(self):
arr = numpy.arange(12).reshape((3, 4)).transpose()
cols = ['A', 'C', 'B']
dd = numpy_array_to_dict(arr, cols)
self.assertEqual(list(dd), cols)
self.assertTrue((dd['A'] == numpy.arange(0, 4)).all())
self.assertTrue((dd['C'] == numpy.arange(4, 8)).all())
self.assertTrue((dd['B'] == numpy.arange(8, 12)).all())
arr = numpy.array(
[(1.0, 'Q'), (-3.0, 'Z')], dtype=[('x', numpy.float64), ('y', 'S1')]
)
dd = numpy_array_to_dict(arr)
self.assertEqual(list(dd), ['x', 'y'])
self.assertTrue(
(dd['x'] == numpy.array([1.0, -3.0], dtype=numpy.float64)).all()
)
self.assertTrue((dd['y'] == numpy.array(['Q', 'Z'], dtype='S1')).all())
# TODO: Remove this? -CLH
def test_numpy2d_to_dict(self):
arr = numpy.arange(12).reshape((3, 4)).transpose()
cols = ['A', 'C', 'B']
dd = numpy2d_to_dict(arr, cols)
self.assertEqual(list(dd), cols)
self.assertTrue((dd['A'] == numpy.arange(0, 4)).all())
self.assertTrue((dd['C'] == numpy.arange(4, 8)).all())
self.assertTrue((dd['B'] == numpy.arange(8, 12)).all())
def test_dset_dict_to_list(self):
ds = Dataset(
{
_k: list(range(_i * 10, (_i + 1) * 10))
for _i, _k in enumerate('abcdefghijklmnop')
}
)
ds0 = ds[:3].copy()
ds1 = ds[6:9].copy()
ds2 = ds[11:15].copy()
dd = {'one': ds0, 'two': ds1, 'μεαν': ds2}
with self.assertRaises(ValueError):
_ = dset_dict_to_list(dd, 'keyfield')
dd = {'one': ds0, 'two': ds1, 3: ds2}
with self.assertRaises(ValueError):
_ = dset_dict_to_list(dd, 'keyfield')
dd = {'one': ds0, 'two': ds1, 'three': ds2}
with self.assertRaises(ValueError):
_ = dset_dict_to_list(dd, 'a')
lst1 = dset_dict_to_list(dd, 'keyfield')
self.assertEqual(id(ds0), id(lst1[0]))
self.assertEqual(id(ds1), id(lst1[1]))
self.assertEqual(id(ds2), id(lst1[2]))
self.assertEqual(list(ds0.keys()), ['a', 'b', 'c', 'keyfield'])
self.assertTrue((ds0.a == list(range(10))).all())
self.assertTrue((ds0.keyfield == 'one').all())
lst2 = dset_dict_to_list(dd, 'a', allow_overwrite=True)
self.assertEqual(id(ds0), id(lst1[0]))
self.assertEqual(list(ds0.keys()), ['a', 'b', 'c', 'keyfield'])
self.assertTrue((ds0.a == 'one').all())
self.assertTrue((ds0.b == list(range(10, 20))).all())
self.assertTrue((ds0.keyfield == 'one').all())
def test_append_dataset_dict(self):
ds = Dataset(
{
_k: list(range(_i * 10, (_i + 1) * 10))
for _i, _k in enumerate('abcdefghijklmnop')
}
)
ds0 = ds[:3].copy()
ds1 = ds[6:9].copy()
ds2 = ds[11:15].copy()
dd = {'one': ds0, 'two': ds1, 'three': ds2}
ds = append_dataset_dict(dd, 'keyfield')
ucols = set()
for _d in dd.values():
ucols.update(_d)
self.assertEqual(set(ds.keys()), ucols)
self.assertEqual(ds.get_nrows(), sum(_d.get_nrows() for _d in dd.values()))
keyfield = []
for _d in dd.values():
keyfield.extend(_d.keyfield)
self.assertTrue((ds.keyfield == keyfield).all())
self.assertTrue((ds.a[:10] == range(10)).all())
self.assertTrue((ds.g[10:20] == range(60, 70)).all())
self.assertTrue((ds.l[20:30] == range(110, 120)).all())
def test_possibly_convert_to_nanotime(self):
ns1 = numpy.int64(1528295695919153408)
arr1 = numpy.array([ns1 + 12 * _i for _i in range(25)])
nt1, okay1 = possibly_convert_to_nanotime(arr1)
self.assertTrue(okay1)
self.assertIsInstance(nt1, DateTimeNano)
arr2 = arr1.astype(numpy.uint64)
nt2, okay2 = possibly_convert_to_nanotime(arr2)
self.assertFalse(okay2)
self.assertNotIsInstance(nt2, DateTimeNano)
self.assertTrue((nt2 == arr2).all())
ns3 = numpy.int64(1070376029353467904)
arr3 = numpy.array([ns3 + 12 * _i for _i in range(25)])
nt3, okay3 = possibly_convert_to_nanotime(arr3)
self.assertFalse(okay3)
self.assertNotIsInstance(nt3, DateTimeNano)
self.assertTrue((nt3 == arr3).all())
if __name__ == "__main__":
tester = unittest.main()
```
#### File: riptable/tests/test_dataset_slicing.py
```python
import numpy as np
import random as rnd
import unittest
from riptable import Dataset, Cat
from riptable.rt_enum import INVALID_DICT
master_type_dict = {
'bool': np.array([1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0], dtype=np.bool),
'int8': np.array(
[
26,
1,
-9,
INVALID_DICT[np.dtype('int8').num],
13,
INVALID_DICT[np.dtype('int8').num],
26,
5,
26,
-4,
27,
28,
30,
-32,
15,
],
dtype=np.int8,
),
'uint8': np.array(
[
45,
57,
60,
49,
19,
29,
18,
1,
62,
INVALID_DICT[np.dtype('uint8').num],
55,
47,
31,
INVALID_DICT[np.dtype('uint8').num],
27,
],
dtype=np.uint8,
),
'int16': np.array(
[
-601,
-332,
162,
375,
160,
-357,
-218,
-673,
INVALID_DICT[np.dtype('int16').num],
378,
-175,
-529,
INVALID_DICT[np.dtype('int16').num],
-796,
-365,
],
dtype=np.int16,
),
'uint16': np.array(
[
1438,
1723,
433,
1990,
INVALID_DICT[np.dtype('uint16').num],
1528,
1124,
42,
1316,
1003,
1874,
INVALID_DICT[np.dtype('uint16').num],
1533,
1443,
1170,
],
dtype=np.uint16,
),
'int32': np.array(
[
1896652134,
-1424042309,
INVALID_DICT[np.dtype('int32').num],
503239478,
1067866129,
-1974125613,
-1608929297,
-301645171,
1402604369,
INVALID_DICT[np.dtype('int32').num],
1080040975,
-289078200,
-823277029,
-1383139138,
978724859,
],
dtype=np.int32,
),
'uint32': np.array(
[
337083591,
688548370,
INVALID_DICT[np.dtype('uint32').num],
580206095,
328423284,
211281118,
912676658,
132565336,
399918307,
425384120,
723039073,
252319702,
750186713,
197297577,
INVALID_DICT[np.dtype('uint32').num],
],
dtype=np.uint32,
),
'int64': np.array(
[
INVALID_DICT[np.dtype('int64').num],
-423272446,
-235992796,
-1442995093,
INVALID_DICT[np.dtype('int64').num],
109846344,
-1458628816,
232007889,
-1671608168,
1752532663,
-1545252943,
544588670,
-1385051680,
-137319813,
-195616592,
],
dtype=np.int64,
),
'uint64': np.array(
[
765232401,
398653552,
203749209,
288238744,
INVALID_DICT[np.dtype('uint64').num],
271583764,
985270266,
391578626,
196046134,
916025221,
694962930,
34303390,
647346354,
INVALID_DICT[np.dtype('uint64').num],
334977533,
],
dtype=np.uint64,
),
'float32': np.array(
[
np.nan,
0.6201803850883267,
0.05285394972525459,
0.1435023986327576,
np.nan,
0.32308353808130397,
0.1861463881422203,
0.6366386808076959,
0.7703864299590418,
0.8155206130668257,
0.9588669164271945,
0.2832984888482334,
0.02662158289064087,
0.2591740277624228,
0.28945199094333374,
]
).astype(np.float32),
'float64': np.array(
[
0.264105510380617,
np.nan,
0.9094594817708785,
0.13757414135018453,
0.9997438463622871,
0.1642171078246103,
0.4883940875811662,
0.2819313242616074,
0.7868397473215173,
0.8963052108412053,
0.03571507605557389,
0.6423436033517553,
0.04402603090628798,
0.5619514123321582,
np.nan,
]
).astype(np.float64),
'bytes': np.array(
[
INVALID_DICT[np.dtype('bytes').num],
b'12398dfkw',
b'dlkv;lk3-2',
b'111dkjfj3',
b'e0383hjfns',
b'qwernvldkj',
b'abefgkejf',
b'as777nrn',
b'23dhsjkifuywfwefj',
INVALID_DICT[np.dtype('bytes').num],
b'zkdfjlw',
b'a',
b';][{}[\|||+=_-',
b'qwernvldkj',
b'abefgkejf',
],
dtype=np.bytes_,
),
'unicode': np.array(
[
'asdf233rf',
'12398dfkw',
'dlkv;lk3-2',
'111dkjfj3',
'e0383hjfns',
INVALID_DICT[np.dtype('str_').num],
'abefgkejf',
'as777nrn',
'23dhsjkifuywfwefj',
'rrrrn2fhfewl',
'zkdfjlw',
'a',
';][{}[\|||+=_-',
'qwernvldkj',
INVALID_DICT[np.dtype('str_').num],
],
dtype=np.str_,
),
}
simple_keys = dict(
zip(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm'],
list(master_type_dict.values()),
)
)
ds = Dataset(simple_keys)
num_cols = len(simple_keys)
num_rows = len(simple_keys['a'])
dict_key_list = list(simple_keys.keys())
dict_col_names = np.array(dict_key_list)
# --------------------SLICE DATA---------------------------------------------
# ---------------------------------------------------------------------------
single_slices = {
":2": slice(None, 2, None),
"-2:": slice(-2, None, None),
"2:5": slice(2, 5, None),
"5:": slice(5, None, None),
":": slice(None, None, None),
}
# ---------------------------------------------------------------------------
row_bool_arrays = {
"python_bool": [
True,
False,
True,
False,
False,
True,
True,
True,
False,
True,
False,
False,
True,
False,
True,
],
"numpy_bool": np.array(
[
True,
False,
True,
False,
False,
True,
True,
True,
False,
True,
False,
False,
True,
False,
True,
]
),
}
# ---------------------------------------------------------------------------
col_bool_arrays = {
"python_bool": [
True,
False,
True,
False,
False,
True,
True,
True,
False,
True,
False,
False,
True,
],
"numpy_bool": np.array(
[
True,
False,
True,
False,
False,
True,
True,
True,
False,
True,
False,
False,
True,
]
),
}
# ---------------------------------------------------------------------------
col_string_lists = {
"col_names_size"
+ str(sample_size): [
dict_key_list[i] for i in rnd.sample(range(1, num_cols), sample_size)
]
for sample_size in range(1, num_cols)
}
# ---------------------------------------------------------------------------
row_idx_arrays = {
"int_idx_size"
+ str(idx_size): np.random.randint(low=0, high=num_rows, size=idx_size)
for idx_size in range(1, num_rows)
}
# ---------------------------------------------------------------------------
col_idx_arrays = {
"int_idx_size"
+ str(idx_size): np.random.randint(low=0, high=num_cols, size=idx_size)
for idx_size in range(1, num_cols)
}
# change to True for printouts of slice input
ShowSliceInfo = False
class Dataset_Slice_Accuracy(unittest.TestCase):
# ------------------GENERAL COMPARE FUNCTIONS-------------------------------
# --------------------------------------------------------------------------
def try_conversions(self, ds_item, dict_item):
# nan != nan, and strings get converted from unicode to bytes
# make sure these aren't the same values in a different form
nans = False
try:
if np.isnan(ds_item) and np.isnan(dict_item):
nans = True
except TypeError:
pass
if nans:
pass
elif isinstance(ds_item, bytes):
ds_item_str = ds_item.decode()
if ds_item_str == dict_item:
pass
else:
if ShowSliceInfo is True:
print("Items did not match!")
print("incorrect:", ds_item, "correct:", dict_item)
return 1
return 0
# --------------------------------------------------------------------------
def match_lists(self, ds_list, dict_list):
# compares items in lists
for idx, ds_item in enumerate(ds_list):
dict_item = dict_list[idx]
if ds_item != dict_item:
# further test for nans and string type conversions
try_convert = self.try_conversions(ds_item, dict_item)
self.assertEqual(try_convert, 0)
# -------------------------ROW ONLY-----------------------------------------
# --------------------------------------------------------------------------
def rsingle(self, slice_dict):
# checks list of lists
for input_str, input in slice_dict.items():
ds1 = ds[input, :]
ds_list = [getattr(ds1, col) for col in ds1]
dict_list = [val[input] for val in simple_keys.values()]
if ShowSliceInfo:
print("Checking ds[" + str(input_str) + "]")
for idx, ds_section in enumerate(ds_list):
dict_section = dict_list[idx]
self.match_lists(ds_section, dict_section)
def row_single(self):
# [int]
dict_size = len(simple_keys['a'])
for input in range(dict_size):
# TODO: fix bug where slice is [-1] in ds._getitem how single ints are handled
ds1 = ds[input, :]
ds_list = [getattr(ds1, col)[0] for col in ds1]
dict_list = [val[input] for val in simple_keys.values()]
if ShowSliceInfo:
print("Checking ds[" + str(input) + "]")
self.match_lists(ds_list, dict_list)
# --------------------------------------------------------------------------
def test_row_masks(self):
# [int:int]
# [[bool, bool, bool]]
# [[int, int, int]]
errors = 0
for row_param in [single_slices, row_bool_arrays, row_idx_arrays]:
self.rsingle(row_param)
# ----------------------------COLUMN ONLY-----------------------------------
# --------------------------------------------------------------------------
def col_string(self):
# ['col']
for input, dict_list in simple_keys.items():
if ShowSliceInfo:
print("Checking ds['" + str(input) + "']")
ds_list = ds[input]
self.match_lists(ds_list, dict_list)
# --------------------------------------------------------------------------
def col_string_list(self):
# [['col1', 'col2', 'col3']]
slice_dict = col_string_lists
for input_str, input in slice_dict.items():
ds1 = ds[input]
ds_list = [getattr(ds1, col) for col in ds1]
dict_list = [simple_keys[i] for i in input]
if ShowSliceInfo:
print("Checking ds[" + str(input_str) + "]")
for idx, ds_section in enumerate(ds_list):
dict_section = dict_list[idx]
self.match_lists(ds_section, dict_section)
# --------------------COMBINED SLICING--------------------------------------
# --------------------------------------------------------------------------
def rsingle_cmulti(self, slice_dict):
# [int, slice/bool/idx]
num_rows = len(simple_keys['a'])
for col_slice_str, col_slice in slice_dict.items():
dict_sliced_col_names = dict_col_names[col_slice]
for row_num in range(num_rows):
try:
ds1 = ds[row_num, col_slice]
except IndexError as e:
self.assertEqual(e.args[0], 'Cannot index cols with duplicates.')
continue
if ShowSliceInfo:
print(f"Checking ds[{row_num}, {col_slice_str}]")
dict_list = [
simple_keys[dict_name][row_num]
for dict_name in dict_sliced_col_names
]
ds_list = [
getattr(ds1, dict_name)[0] for dict_name in dict_sliced_col_names
]
self.match_lists(ds_list, dict_list)
# --------------------------------------------------------------------------
def rmulti_cmulti(self, row_multi_arrays, col_multi_arrays):
# [slice/bool/idx, slice/bool/idx]
for col_slice_str, col_slice in col_multi_arrays.items():
dict_sliced_col_names = dict_col_names[col_slice]
for row_slice_str, row_slice in row_multi_arrays.items():
try:
ds1 = ds[row_slice, col_slice]
except IndexError as e:
self.assertEqual(e.args[0], 'Cannot index cols with duplicates.')
continue
if ShowSliceInfo:
print(f"Checking ds[{row_slice_str}, {col_slice_str}]")
if ShowSliceInfo:
print("Checking ds[" + row_slice_str + "," + col_slice_str + "]")
for dict_name in dict_sliced_col_names:
dict_list = simple_keys[dict_name][row_slice]
ds_list = getattr(ds1, dict_name)
self.match_lists(ds_list, dict_list)
# --------------------------------------------------------------------------
def rsingle_csingle(self):
# [int, int]
errors = 0
for col_idx, (col_name, dict_list) in enumerate(simple_keys.items()):
if ShowSliceInfo:
print("Checking rows in column", col_name)
num_rows = len(dict_list)
for row_idx in range(num_rows):
ds1 = ds[row_idx, col_idx]
ds_value = getattr(ds1, col_name)[0]
dict_value = dict_list[row_idx]
if ds_value != dict_value:
try_convert = self.try_conversions(ds_value, dict_value)
self.assertEqual(try_convert, 0)
# --------------------------------------------------------------------------
def rsingle_cstringlist(self):
# [int, ['col1', 'col2', 'col3']]
for col_str, col_stringlist in col_string_lists.items():
for row_num in range(num_rows):
ds1 = ds[row_num, col_stringlist]
if ShowSliceInfo:
print("Checking ds[" + str(row_num) + "," + col_str + "]")
dict_list = [
simple_keys[dict_name][row_num] for dict_name in col_stringlist
]
ds_list = [getattr(ds1, dict_name)[0] for dict_name in col_stringlist]
self.match_lists(ds_list, dict_list)
# --------------------------------------------------------------------------
def rmulti_cstringlist(self, row_multi_arrays):
# [slice/bool/idx, ['col1', 'col2', 'col3']]
for col_str, col_stringlist in col_string_lists.items():
for row_slice_str, row_slice in row_multi_arrays.items():
if ShowSliceInfo:
print("Checking ds[" + row_slice_str + "," + col_str + "]")
ds1 = ds[row_slice, col_stringlist]
for dict_name in col_stringlist:
dict_list = simple_keys[dict_name][row_slice]
ds_list = getattr(ds1, dict_name)
self.match_lists(ds_list, dict_list)
# --------------------------------------------------------------------------
def test_rsingle_cmask_combos(self):
# [int, int:int]
# [int, [bool, bool, bool]]
# [int, [int, int, int]]
for col_param in [single_slices, col_bool_arrays, col_idx_arrays]:
self.rsingle_cmulti(col_param)
# --------------------------------------------------------------------------
def test_rmask_cstringlist_combos(self):
# [int:int, ['col1', 'col2', 'col3']]
# [[bool, bool, bool], ['col1', 'col2', 'col3']]
# [[int, int, int], ['col1', 'col2', 'col3']]
for row_param in [single_slices, row_bool_arrays, row_idx_arrays]:
self.rmulti_cstringlist(row_param)
# --------------------------------------------------------------------------
def test_all_mask_combos(self):
# [int:int, int:int]
# [int:int, [bool, bool, bool]]
# [int:int, [int, int, int]]
# [[bool, bool, bool], int:int]
# [[bool, bool, bool], [bool, bool, bool]]
# [[bool, bool, bool], [int, int, int]]
# [[int, int, int], int:int]
# [[int, int, int], [bool, bool, bool]]
# [[int, int, int], [int, int, int]]
for row_param in [single_slices, row_bool_arrays, row_idx_arrays]:
for col_param in [single_slices, col_bool_arrays, col_idx_arrays]:
self.rmulti_cmulti(row_param, col_param)
# --------------------------------------------------------------------------
def test_slice_accuracy(self):
self.row_single()
self.col_string()
self.col_string_list()
def test_add_dataset(self):
arrsize = 200
numrows = 7
ds = Dataset({'time': np.arange(arrsize * 1.0)})
ds.data = np.random.randint(numrows, size=arrsize)
ds.data2 = np.random.randint(numrows, size=arrsize)
symbols = [
'AAPL',
'AMZN',
'FB',
'GOOG',
'IBM',
'6',
'7',
'8',
'9',
'10',
'11',
'12',
'13',
'14',
'15',
'16',
'17',
'18',
]
symbol2 = ['A', 'X', 'P', 'C', 'D', 'E', 'F', 'G', 'G', 'I', 'J', 'K']
ds.symbol2 = Cat(1 + np.arange(arrsize) % len(symbol2), symbol2)
ds.symbol = Cat(1 + np.arange(arrsize) % len(symbols), symbols)
x = ds.copy()
del x.symbol
del x.data
del x.time
x.label_set_names('data2')
# now x has two columns, and one is labelled so adding an entire dataset should just add x.symbol2
ds.junk = x
if __name__ == '__main__':
tester = unittest.main()
```
#### File: riptable/tests/test_groupby_functions.py
```python
import riptable as rt
import random as rand
import pandas as pd
import unittest
functions_str = [
'count',
'sum',
'mean',
'median',
'min',
'max',
# 'prod',
'var',
# 'quantile',
'cumsum',
'cumprod',
# 'cummax',
# 'cummin'
'first',
'last',
# 'mode'
]
import numpy as np
type_list = [
# np.bool, ## not a numeric type
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
# np.float16, ## not supported
np.float32,
np.float64,
# np.complex64, ## not supported
# np.complex128 ## not supported
]
def safe_equal(ary1, ary2):
assert len(ary1) == len(ary2)
def isNaN(num):
return num != num
for a, b in zip(ary1, ary2):
if not (a == b or (isNaN(a) and isNaN(b))):
return False
return True
def min(a, b):
return a if a < b else b
class GroupbyFunctions_Test(unittest.TestCase):
def groupby_func(self, df, fn):
return getattr(df, functions_str[fn])
def test_single_col_groupby_tests(self):
Values = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Keys = ['a', 'b', 'c', 'a', 'b', 'c', 'd', 'e', 'f']
for type_ in type_list:
data = {'Vs': rt.FastArray(Values, dtype=type_), 'Ks': Keys}
pd_data = pd.DataFrame(data)
sfw_data = rt.Dataset(data)
key = 'Ks'
val = 'Vs'
pd_gb = pd_data.groupby(key)
sfw_gb = sfw_data.groupby(key)
for name in functions_str:
pd_func = getattr(pd_gb, name)
sfw_func = getattr(sfw_gb, name)
pd_out = pd_func()
sfw_out = sfw_func()
pd_col = pd_out[val]._values
if name == 'count':
sfw_col = sfw_out['Count']
else:
sfw_col = sfw_out[val]
is_integer_subttype = np.issubdtype(type_, np.integer)
is_median = name != 'median'
if not safe_equal(pd_col, sfw_col) and (
not is_integer_subttype and not is_median
):
print('data_type_t = ', type_)
print('function =', name)
print('pandas output =', pd_col)
print('sfw output =', sfw_col)
# TODO move as error message following assert
self.assertTrue(False)
# TODO pytest parameterize type_list
def test_multi_col_groupby_tests(self, numb_keys_and_values=5, numb_rows=20):
col_val_names = ['alpha', 'beta', 'gamma', 'sigma', 'zeta']
col_key_names = ['lions', 'tigers', 'bears', 'oh', 'my']
MAX_LENGTH = min(len(col_val_names), len(col_key_names))
assert numb_keys_and_values <= MAX_LENGTH
for type_ in type_list:
vals = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
keys = '<KEY>'.split(' ')
vs = []
ks = []
for i in range(0, numb_keys_and_values):
vs.append(
[vals[rand.randint(0, len(vals) - 1)] for i in range(0, numb_rows)]
)
ks.append(
[keys[rand.randint(0, len(keys) - 1)] for i in range(0, numb_rows)]
)
data = {}
for i in range(0, numb_keys_and_values):
data[col_val_names[i]] = rt.FastArray(vs[i], dtype=type_)
data[col_key_names[i]] = rt.FastArray(vs[i], dtype=type_)
pd_data = pd.DataFrame(data)
sfw_data = rt.Dataset(data)
key = col_key_names[0:numb_keys_and_values]
val = col_val_names[0:numb_keys_and_values]
pd_gb = pd_data.groupby(key)
sfw_gb = sfw_data.groupby(key)
for name in functions_str:
pd_out = getattr(pd_gb, name)()
sfw_out = getattr(sfw_gb, name)()
if name == 'count':
# only compare one column for count
pd_col = pd_out['alpha']
sfw_col = sfw_out.Count
if not safe_equal(pd_col, sfw_col):
print('function =', name)
print('pandas output =', pd_col)
print('sfw output =', sfw_col)
self.assertTrue(False)
else:
for val in col_val_names:
# extract array from pandas series
pd_col = pd_out[val]._values
sfw_col = sfw_out[val]
is_integer_subttype = np.issubdtype(type_, np.integer)
is_median = name != 'median'
if not safe_equal(pd_col, sfw_col) and (
not is_integer_subttype and not is_median
):
print('function =', name)
print('pandas output =', pd_col)
assert False
if __name__ == "__main__":
tester = unittest.main()
```
#### File: riptable/tests/test_lexsort.py
```python
import unittest
from riptable import *
def arr_eq(a, b):
return bool(np.all(a == b))
def arr_all(a):
return bool(np.all(a))
class Lexsort_Test(unittest.TestCase):
'''
TODO: add more tests for different types
also include string types
'''
# TODO pytest parameterize arr_end
def test_numeric_multikey(self):
arr_len = 1_000_000
d = {
'int8': np.random.randint(0, 120, arr_len, dtype=np.int8),
'int16': np.random.randint(0, 32000, arr_len, dtype=np.int16),
'int32': np.random.randint(0, arr_len, arr_len, dtype=np.int32),
'int64': np.random.randint(0, arr_len, arr_len, dtype=np.int64),
'float32': np.random.rand(arr_len).astype(np.float64),
'float64': np.random.rand(arr_len),
'bytes': np.random.choice(
["AAPL\u2080", "AMZN\u2082", "IBM\u2081"], arr_len
),
}
for arr_end in [100, 1000, 10_000, 100_000, 1_000_000]:
arr = list(a[:arr_end] for a in d.values())
np_result = np.lexsort(arr)
sfw_result = lexsort(arr)
diff = np_result - sfw_result
self.assertEqual(
0,
sum(diff),
msg=f"Lexsort results for length {arr_end} did not match numpy. Total off by {sum(diff)}",
)
def test_low_unique(self):
# toggle threading
for i in range(2):
if i == 0:
FA._TOFF()
arr_len = 300_000
ds = Dataset(
{
'flts': np.random.choice([1.11, 2.22, 3.33], arr_len),
'bytes': full(arr_len, 'a'),
'ints': np.random.randint(4, 7, arr_len),
'strs': np.random.choice(['700', '800', '900'], arr_len).view(FA),
}
)
# regular
rt_lex = lexsort(list(ds.values()))
np_lex = np.lexsort(list(ds.values()))
self.assertEqual(sum(rt_lex - np_lex), 0)
# record array
rec_forward = ds.as_recordarray()
rt_lex_f = lexsort([rec_forward])
np_lex_f = lexsort([rec_forward])
self.assertEqual(sum(rt_lex_f - np_lex_f), 0)
# record array
ds2 = ds[list(ds)[::-1]]
rec_backward = ds2.as_recordarray()
rt_lex_b = lexsort([rec_backward])
np_lex_b = lexsort([rec_backward])
self.assertEqual(sum(rt_lex_b - np_lex_b), 0)
FA._TON()
# self.assertEqual( sum(rt_lex - np_lex), 0 )
def test_record_array(self):
# toggle threading
for i in range(2):
if i == 0:
FA._TOFF()
arr_len = 300_000
ds = Dataset(
{
'uint64': np.random.randint(0, 1000, arr_len, dtype=np.uint64),
'uint32': np.random.randint(0, 1000, arr_len, dtype=np.uint32),
'uint16': np.random.randint(0, 1000, arr_len, dtype=np.uint16),
'uint8': np.random.randint(0, 200, arr_len, dtype=np.uint8),
}
)
# if the arrays are in this order (large itemsize -> small, record array results will compare correctly)
rec = ds.as_recordarray()
rt_lex = lexsort([rec])
np_lex = np.lexsort([rec])
self.assertEqual(sum(rt_lex - np_lex), 0)
FA._TON()
def test_gb_lex(self):
length = 30
int_dt = [
np.int8,
np.uint8,
np.int16,
np.uint16,
np.int32,
np.uint32,
np.int64,
np.uint64,
]
flt_dt = [np.float32, np.float64]
str_dt = ['S', 'U']
vals = [1, 2, 3]
for idt in int_dt:
arr = np.random.choice(vals, length).astype(idt)
arr[0] = vals[0]
arr[1] = vals[1]
arr[2] = vals[2]
gbh = groupbyhash(arr)
gblex = groupbylex(arr)
self.assertTrue(
bool(np.all(gbh['iKey'] == gblex['iKey'])), msg=f'failed on {arr.dtype}'
)
vals = [1.1, 2.2, 3.3]
for fdt in flt_dt:
arr = np.random.choice(vals, length).astype(fdt)
arr[0] = vals[0]
arr[1] = vals[1]
arr[2] = vals[2]
gbh = groupbyhash(arr)
gblex = groupbylex(arr)
self.assertTrue(
bool(np.all(gbh['iKey'] == gblex['iKey'])), msg=f'failed on {arr.dtype}'
)
vals = ['a', 'b', 'c']
for sdt in str_dt:
arr = np.random.choice(vals, length).astype(sdt)
arr[0] = vals[0]
arr[1] = vals[1]
arr[2] = vals[2]
gbh = groupbyhash(arr)
gblex = groupbylex(arr)
self.assertTrue(
bool(np.all(gbh['iKey'] == gblex['iKey'])), msg=f'failed on {arr.dtype}'
)
def test_igroup_ifirst_ncount(self):
vals = [1, 2, 3]
vals2 = [1.1, 2.2, 3.3]
vals3 = [b'a', b'b', b'c']
vals4 = ['a', 'b', 'c']
for vals in [vals, vals2, vals3, vals4]:
arr = np.random.choice(vals, 30)
for i in range(len(vals)):
arr[i] = vals[i]
ds = Dataset({'keycol': arr, 'data': np.random.rand(30)})
gbh = ds.gb('keycol')
gbh.grouping.pack_by_group()
gblex = groupbylex(arr)
self.assertTrue(
bool(np.all(gblex['iGroup'] == gbh.grouping.iGroup)),
msg='failed on {arr.dtype}',
)
self.assertTrue(
bool(np.all(gblex['iFirstGroup'] == gbh.grouping.iFirstGroup)),
msg='failed on {arr.dtype}',
)
self.assertTrue(
bool(np.all(gblex['nCountGroup'] == gbh.grouping.nCountGroup)),
msg='failed on {arr.dtype}',
)
def test_gb_lex_multikey(self):
vals_numeric = np.random.randint(0, 3, 100_000)
vals_str = np.random.choice(['a', 'b', 'c'], 100_000)
vals_numeric[:3] = 0
vals_numeric[3:6] = 1
vals_numeric[6:9] = 2
vals_str[[0, 3, 6]] = 'a'
vals_str[[1, 4, 7]] = 'b'
vals_str[[2, 5, 8]] = 'c'
gbh = groupbyhash([vals_numeric, vals_str], pack=True)
gblex = groupbylex([vals_numeric, vals_str], rec=False)
self.assertTrue(
bool(np.all(gblex['iKey'] == gbh['iKey'])),
msg=f'failed on int string multikey',
)
self.assertTrue(
bool(np.all(gblex['iFirstKey'] == gbh['iFirstKey'])),
msg=f'failed on int string multikey',
)
self.assertTrue(
bool(np.all(gblex['unique_count'] == gbh['unique_count'])),
msg=f'failed on int string multikey',
)
self.assertTrue(
bool(np.all(gblex['iGroup'] == gbh['iGroup'])),
msg=f'failed on int string multikey',
)
self.assertTrue(
bool(np.all(gblex['iFirstGroup'] == gbh['iFirstGroup'])),
msg=f'failed on int string multikey',
)
self.assertTrue(
bool(np.all(gblex['nCountGroup'] == gbh['nCountGroup'])),
msg=f'failed on int string multikey',
)
def test_rt_np_igroup(self):
vals_numeric = np.random.randint(0, 5, 100_000)
gbh = groupbyhash(vals_numeric)
gblex = groupbylex(vals_numeric)
nplex = np.lexsort([vals_numeric])
self.assertTrue(bool(np.all(gblex['iGroup'] == nplex)))
def test_lex_nans(self):
arr = np.random.choice([np.nan, 1.11, 2.22, 3.33], 50)
arr[0] = 1.11
arr[1] = 2.22
arr[2] = 3.33
arr[3] = np.nan
gbh = groupbyhash(arr, pack=True)
gblex = groupbylex(arr)
self.assertTrue(
bool(np.all(gblex['iKey'] == gbh['iKey'])),
msg=f'failed on int single float with nans',
)
self.assertTrue(
bool(np.all(gblex['iFirstKey'] == gbh['iFirstKey'])),
msg=f'failed on int single float with nans',
)
self.assertTrue(
bool(np.all(gblex['unique_count'] == gbh['unique_count'])),
msg=f'failed on int single float with nans',
)
self.assertTrue(
bool(np.all(gblex['iGroup'] == gbh['iGroup'])),
msg=f'failed on int single float with nans',
)
self.assertTrue(
bool(np.all(gblex['iFirstGroup'] == gbh['iFirstGroup'])),
msg=f'failed on int single float with nans',
)
self.assertTrue(
bool(np.all(gblex['nCountGroup'] == gbh['nCountGroup'])),
msg=f'failed on int single float with nans',
)
def test_all_unique(self):
arr = np.random.choice(100_000, 50_000, replace=False)
int_dt = [np.int32, np.uint32, np.int64, np.uint64]
flt_dt = [np.float32, np.float64]
str_dt = ['S', 'U']
for dt in int_dt + flt_dt + str_dt:
a = arr.astype(dt)
sortidx = np.lexsort([a])
gbh = groupbyhash(arr, pack=True)
gblex = groupbylex(a)
self.assertTrue(
bool(np.all(gblex['iFirstKey'] == sortidx)),
msg=f'failed on int all unique with dtype {a.dtype}',
)
self.assertTrue(
bool(np.all(gblex['unique_count'] == gbh['unique_count'])),
msg=f'failed on int all unique with dtype {a.dtype}',
)
self.assertTrue(
bool(np.all(gblex['iGroup'] == sortidx)),
msg=f'failed on int all unique with dtype {a.dtype}',
)
arr.astype('S')
def test_lex_hash_categorical(self):
arr = np.random.choice(['a', 'b', 'c'], 20)
c_lex = Categorical(arr, lex=True)
c_hash = Categorical(arr, lex=False)
self.assertTrue(arr_eq(c_lex._fa, c_hash._fa))
self.assertTrue(arr_eq(c_lex.expand_array, c_hash.expand_array))
self.assertEqual(c_lex.base_index, c_hash.base_index)
c_lex_zero = Categorical(arr, lex=True, base_index=0)
c_hash_zero = Categorical(arr, lex=False, base_index=0)
self.assertTrue(arr_eq(c_lex_zero._fa, c_hash_zero._fa))
self.assertTrue(arr_eq(c_lex_zero.expand_array, c_hash_zero.expand_array))
self.assertEqual(c_lex_zero.base_index, c_hash_zero.base_index)
self.assertTrue(arr_eq(c_lex_zero.expand_array, c_lex.expand_array))
def test_lex_categorical_error(self):
with self.assertRaises(TypeError):
c = Categorical([1, 2, 3], {1: 'a', 2: 'b', 3: 'c'}, lex=True)
with self.assertRaises(TypeError):
c = Categorical(['a', 'a', 'b', 'c', 'a'], ['a', 'b', 'c'], lex=True)
def test_lex_filter(self):
arr = np.random.choice(['a', 'b', 'c'], 20)
f = logical(arange(20) % 2)
c_lex = Categorical(arr, filter=f, lex=True)
c_hash = Categorical(arr, filter=f, lex=False)
# ikeys will be different because combine filter uses first occurence numbering
# self.assertTrue(arr_eq(c_lex._fa,c_hash._fa))
self.assertTrue(arr_eq(c_lex.expand_array, c_hash.expand_array))
arr = FA(['a', 'a', 'b', 'c', 'a'])
f = FA([True, True, False, True, True])
c_lex = Categorical(arr, filter=f, lex=True)
c_hash = Categorical(arr, filter=f, lex=False)
self.assertEqual(c_lex.unique_count, c_hash.unique_count)
# self.assertTrue(arr_eq(c_lex._fa,c_hash._fa))
self.assertTrue(arr_eq(c_lex.expand_array, c_hash.expand_array))
def test_reverse_shuffle(self):
arr_len = 300_000
values = FA(np.random.randint(1, 7, arr_len))
sorted_idx = lexsort(values)
reverse_sort = rc.ReverseShuffle(sorted_idx)
sorted_vals = values[sorted_idx]
unsorted_vals = sorted_vals[reverse_sort]
self.assertTrue(arr_eq(unsorted_vals, values))
if __name__ == "__main__":
tester = unittest.main()
```
#### File: riptable/tests/test_restore_subclass.py
```python
import unittest
from riptable import *
from riptable.rt_datetime import NANOS_PER_DAY
def arr_eq(a, b):
return arr_all(a == b)
def arr_all(a):
return bool(np.all(a))
class RestoreSubclass_Test(unittest.TestCase):
def test_tile(self):
dtn = DateTimeNano.random(5)
dtn2 = tile(dtn, 2)
nptile = tile(dtn._np, 2)
self.assertTrue(isinstance(dtn2, DateTimeNano))
self.assertTrue(bool(np.all(dtn2._fa == nptile)))
ts = TimeSpan(np.random.randint(0, NANOS_PER_DAY, 5, dtype=np.int64))
ts2 = tile(ts, 2)
nptile = tile(ts._np, 2)
self.assertTrue(isinstance(dtn2, DateTimeNano))
self.assertTrue(bool(np.all(ts2._fa == nptile)))
d = Date(np.random.randint(15_000, 20_000, 5))
d2 = tile(d, 2)
nptile = tile(d._np, 2)
self.assertTrue(isinstance(d2, Date))
self.assertTrue(arr_eq(nptile, d2._fa))
ds = DateSpan(np.random.randint(0, 365, 5))
ds2 = tile(ds, 2)
nptile = tile(ds._np, 2)
self.assertTrue(isinstance(ds2, DateSpan))
self.assertTrue(arr_eq(nptile, ds2._fa))
c = Categorical(['a', 'a', 'b', 'c', 'a'])
c2 = tile(c, 2)
nptile = tile(c._np, 2)
self.assertTrue(isinstance(c2, Categorical))
self.assertTrue(arr_eq(nptile, c2._fa))
def test_repeat(self):
dtn = DateTimeNano.random(5)
dtn2 = repeat(dtn, 2)
nprep = repeat(dtn._np, 2)
self.assertTrue(isinstance(dtn2, DateTimeNano))
self.assertTrue(bool(np.all(dtn2._fa == nprep)))
dtn2 = dtn.repeat(2)
nprep = dtn._np.repeat(2)
self.assertTrue(isinstance(dtn2, DateTimeNano))
self.assertTrue(bool(np.all(dtn2._fa == nprep)))
ts = TimeSpan(np.random.randint(0, NANOS_PER_DAY, 5, dtype=np.int64))
ts2 = repeat(ts, 2)
nprep = repeat(ts._np, 2)
self.assertTrue(isinstance(dtn2, DateTimeNano))
self.assertTrue(bool(np.all(ts2._fa == nprep)))
ts2 = ts.repeat(2)
nprep = ts._np.repeat(2)
self.assertTrue(isinstance(dtn2, DateTimeNano))
self.assertTrue(bool(np.all(ts2._fa == nprep)))
d = Date(np.random.randint(15_000, 20_000, 5))
d2 = repeat(d, 2)
nprep = repeat(d._np, 2)
self.assertTrue(isinstance(d2, Date))
self.assertTrue(arr_eq(nprep, d2._fa))
d2 = d.repeat(2)
nprep = d._np.repeat(2)
self.assertTrue(isinstance(d2, Date))
self.assertTrue(arr_eq(nprep, d2._fa))
ds = DateSpan(np.random.randint(0, 365, 5))
ds2 = repeat(ds, 2)
nprep = repeat(ds._np, 2)
self.assertTrue(isinstance(ds2, DateSpan))
self.assertTrue(arr_eq(nprep, ds2._fa))
ds2 = ds.repeat(2)
nprep = ds._np.repeat(2)
self.assertTrue(isinstance(ds2, DateSpan))
self.assertTrue(arr_eq(nprep, ds2._fa))
c = Categorical(['a', 'a', 'b', 'c', 'a'])
c2 = repeat(c, 2)
nprep = repeat(c._np, 2)
self.assertTrue(isinstance(c2, Categorical))
self.assertTrue(arr_eq(nprep, c2._fa))
c2 = c.repeat(2)
nprep = c._np.repeat(2)
self.assertTrue(isinstance(c2, Categorical))
self.assertTrue(arr_eq(nprep, c2._fa))
def test_gbkeys(self):
length = 20
ds = Dataset(
{
'CAT': Categorical(np.random.choice(['a', 'b', 'c'], length)),
'DTN': DateTimeNano.random(length),
'DATE': Date(np.random.randint(15000, 20000, length)),
'TSPAN': TimeSpan(
np.random.randint(
0, 1_000_000_000 * 60 * 60 * 24, length, dtype=np.int64
)
),
'DSPAN': DateSpan(np.random.randint(0, 365, length)),
}
)
for k, v in ds.items():
result = ds.gb(k).count()
if k != 'CAT':
self.assertEqual(type(result[k]), type(v))
self.assertTrue(arr_eq([k], result.label_get_names()))
def test_mbget(self):
length = 20
ds = Dataset(
{
'CAT': Categorical(np.random.choice(['a', 'b', 'c'], length)),
'DTN': DateTimeNano.random(length),
'DATE': Date(np.random.randint(15000, 20000, length)),
'TSPAN': TimeSpan(
np.random.randint(
0, 1_000_000_000 * 60 * 60 * 24, length, dtype=np.int64
)
),
'DSPAN': DateSpan(np.random.randint(0, 365, length)),
}
)
for k, v in ds.items():
result = mbget(v, [1, 2, 3])
self.assertEqual(type(result), type(v))
def test_cat_grouping(self):
c = Categorical(['a', 'a', 'b', 'c', 'a'])
ds = Dataset({'catcol': c, 'data': FA([1, 1, 2, 3, 1])})
ds2 = ds.drop_duplicates('data')
c2 = ds2.catcol
self.assertTrue(isinstance(c2, Categorical))
self.assertTrue(arr_eq(['a', 'b', 'c'], c2))
self.assertTrue(arr_eq([1, 2, 3], c2.grouping.ikey))
self.assertTrue(c2.grouping.isdirty)
self.assertTrue(arr_eq(c2[[0]], ['a']))
if __name__ == '__main__':
tester = unittest.main()
```
#### File: riptable/tests/test_timewindow.py
```python
import unittest
from riptable import *
class TimeWindow_Test(unittest.TestCase):
def test_time_window(self):
a = arange(100)
r = rc.TimeWindow(uint64(a), int64(a), 0, 0)
self.assertEqual(r[99], 99, msg=f"Wrong result produced for timewindow {r}")
r = rc.TimeWindow(single(a), int64(a), 0, 0)
self.assertEqual(r[99], 99.0, msg=f"Wrong result produced for timewindow {r}")
r = rc.TimeWindow(int64(a), int64(a), 0, 0)
self.assertEqual(r[99], 99, msg=f"Wrong result produced for timewindow {r}")
r = rc.TimeWindow(int64(a), int64(a), 0, 3)
self.assertEqual(
r[99], 99 + 98 + 97 + 96, msg=f"Wrong result produced for timewindow {r}"
)
if __name__ == "__main__":
tester = unittest.main()
```
#### File: test_tooling_integration/test_display/vnu_checker.py
```python
import os
import subprocess
from typing import List, Optional
# The HTML rendering is expected to be a snippet of an HTML document as opposed to a standalone webpage.
# The following will ignore validation errors that are meant for a standalone HTML document.
_WHITELIST_ERRORS: List[str] = [
# DOCTYPE tag is expected at the top of the HTML document; ignore in snippet.
r'Expected "<!DOCTYPE html>"',
# Title element defines the documents title shown in the browser; ignore in snippet.
r'Element "head" is missing a required instance of child element "title"',
r"Non-space character in page trailer",
# Some CSS background-color hex values are reported as invalid.
r'not a "background-color" value',
# The following are due to multiple HTML <html> elements in a single snippet.
# Valid HTML has one per document, but we render two:
# 1) Riptable object and styles
# 2) Metadata about the Riptable object that was rendered such as shape and bytes
r'Stray start tag "html"',
r'Stray start tag "p"',
r"fatal: Cannot recover after last error",
]
class VNUChecker:
_CN = "VNUChecker"
_JAVA = r"java"
_test_dispaly_path = os.path.join(
os.getcwd(),
r"Python",
r"core",
r"riptable",
r"test_tooling_integration",
r"test_display",
)
_JAR_PATH = os.path.join(_test_dispaly_path, r"vnu_jar", r"vnu.jar")
_BASE_PATH = os.path.join(_test_dispaly_path, r"test_display", r"html_output")
def __init__(
self,
java: Optional[str] = None,
jar_path: Optional[str] = None,
base_path: Optional[str] = None,
errors_only: bool = False,
ascii_quotes: bool = False,
):
self._java = java
self._jar_path = jar_path
self.base_path = base_path
self._errors_only = errors_only
self._ascii_quotes = ascii_quotes
if self._java is None:
self._java = VNUChecker._JAVA
if self._jar_path is None:
self._jar_path = VNUChecker._JAR_PATH
if self.base_path is None:
self.base_path = VNUChecker._BASE_PATH
self._args = self._build_args()
def __str__(self):
return " ".join(self._args) # The command line representation.
def __repr_(self):
return f"{VNUChecker._CN}(java={self._java}, jar_path={self._jar_path}, dir_path={self.base_path}, errors_only={self._errors_only}, ascii_quotes={self._ascii_quotes})"
def _is_whitelist_error(
self, error_text: str, extra_whitelists: Optional[List[str]] = None
) -> bool:
"""Returns ``False`` if ``error_text`` is a whitelisted error, otherwise ``True``."""
if extra_whitelists is None:
extra_whitelists = []
whitelist_errors = _WHITELIST_ERRORS.copy() + extra_whitelists
for we in whitelist_errors:
if error_text.find(we) != -1: # found whitelisted error
return False
return True
def _build_args(self) -> List[str]:
"""Returns a list of program arguments that are used to kick of the VNU Checker."""
cmd: List[str] = [self._java, "-jar", self._jar_path]
if self._ascii_quotes:
cmd.append("--asciiquotes")
if self._errors_only:
cmd.append("--errors-only")
cmd.append(self.base_path)
return cmd
def _run(self, args: Optional[List[str]] = None) -> List[str]:
"""Runs the VNU Checker and returns a list of errors that are not whitelisted.
Uses the default program arguments if none are specified.
"""
if args is None:
args = self._args
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, stderr = p.communicate()
errors = stderr.decode("utf-8").splitlines()
return list(filter(self._is_whitelist_error, errors))
def validate(self, dir_path: Optional[str] = None) -> List[str]:
"""``validate`` will run the VNU Checker and return a list of errors. An empty list signals no errors.
``validate`` takes an optional directory path to run the VNU Checker, otherwise the default one is used.
"""
if dir_path is None:
if self.base_path is None:
raise ValueError(f"{self._CN}.validate: need a directory to validate")
dir_path = self.base_path
if not os.listdir(dir_path):
raise ValueError(
f"{self._CN}.validate: {dir_path} is empty; no files to validate"
)
return self._run()
```
#### File: riptable/Utils/rt_metadata.py
```python
__all__ = ['MetaData', 'meta_from_version', 'META_VERSION']
import json
from ..rt_enum import TypeId, TypeRegister, DisplayLength, CategoryMode
# global meta version, saved starting 12/17/2018 - anything prior will default to 0
META_VERSION = 0
version_dict = {
# this dictionary will be checked whwnever a riptable class is being rebuilt during an SDS load
# top level keys are the version number
# next keys are the class's type id - see TypeId in enum
0: {
TypeId.Categorical: {
# vars for container loader
'name': 'Categorical',
'typeid': TypeId.Categorical,
'version': 0,
# vars for additional arrays
'colnames': [],
'ncols': 0,
# vars to rebuild the same categorical
'instance_vars': {
'mode': CategoryMode.StringArray,
'base_index': 1,
'ordered': False,
'sorted': False,
},
},
TypeId.Dataset: None,
TypeId.Struct: None,
TypeId.DateTimeNano: {
'name': 'DateTimeNano',
'typeid': TypeId.DateTimeNano,
'ncols': 0,
'version': 0,
'instance_vars': {
'_display_length': DisplayLength.Long,
'_timezone_str': 'America/New York',
'_to_tz': 'NYC',
},
},
TypeId.TimeSpan: {
'name': 'TimeSpan',
'typeid': TypeId.TimeSpan,
'ncols': 0,
'version': 0,
'instance_vars': {'_display_length': DisplayLength.Long},
},
}
}
def meta_from_version(cls, vnum):
'''
Returns a dictionary of meta data defaults.
'''
id = getattr(TypeId, cls.__name__)
return version_dict[vnum][id]
class MetaData:
default_dict = {'name': "", 'typeid': TypeId.Default}
def __init__(self, metadict={}):
self._dict = self.default_dict.copy()
if isinstance(metadict, MetaData):
self._dict = metadict._dict
else:
if isinstance(metadict, (bytes, str)):
metadict = json.loads(metadict)
for k, v in metadict.items():
self._dict[k] = v
@property
def string(self):
return json.dumps(self._dict)
@property
def dict(self):
return self._dict
@property
def name(self):
return self['name']
@property
def typeid(self):
return self['typeid']
@property
def itemclass(self):
"""Starting 4/29/2019 item classes will be saved as strings in json meta data in `classname`.
For backwards compatibility, will also check `typeid`. The TypeId class is an enum of ItemClass -> typeid.
Both will lookup the items class in the TypeRegister, which holds classname -> itemclass.
"""
try:
classname = self['classname']
except:
classname = TypeId(self['typeid']).name
return getattr(TypeRegister, classname)
# pass these to dict
# ------------------------------------------------------------
def __getitem__(self, idx):
return self._dict[idx]
def __setitem__(self, idx, value):
self._dict[idx] = value
def get(self, key, default):
return self._dict.get(key, default)
def setdefault(self, k, v):
self._dict.setdefault(k, v)
def __repr__(self):
return self._dict.__repr__()
def __str__(self):
return self._dict.__str__()
# ------------------------------------------------------------
``` |
{
"source": "975935259/deep-code-search",
"score": 3
} |
#### File: deep-code-search/keras/models.py
```python
import os
#from tensorflow.keras.engine import Input
from tensorflow.keras.layers import Input, Concatenate, Dot, Embedding, Dropout, Lambda, Activation, LSTM, Dense
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.utils import plot_model
import numpy as np
import logging
logger = logging.getLogger(__name__)
class JointEmbeddingModel:
def __init__(self, config):
self.model_params = config.get('model_params', dict())
self.data_params = config.get('data_params',dict())
self.methname = Input(shape=(self.data_params['methname_len'],), dtype='int32', name='i_methname')
self.apiseq= Input(shape=(self.data_params['apiseq_len'],),dtype='int32',name='i_apiseq')
self.tokens=Input(shape=(self.data_params['tokens_len'],),dtype='int32',name='i_tokens')
self.desc_good = Input(shape=(self.data_params['desc_len'],), dtype='int32', name='i_desc_good')
self.desc_bad = Input(shape=(self.data_params['desc_len'],), dtype='int32', name='i_desc_bad')
# initialize a bunch of variables that will be set later
self._code_repr_model=None
self._desc_repr_model=None
self._sim_model = None
self._training_model = None
#self.prediction_model = None
def build(self):
'''
1. Build Code Representation Model
'''
logger.debug('Building Code Representation Model')
methname = Input(shape=(self.data_params['methname_len'],), dtype='int32', name='methname')
apiseq= Input(shape=(self.data_params['apiseq_len'],),dtype='int32',name='apiseq')
tokens=Input(shape=(self.data_params['tokens_len'],),dtype='int32',name='tokens')
## method name representation ##
#1.embedding
init_emb_weights = np.load(self.model_params['init_embed_weights_methname']) if self.model_params['init_embed_weights_methname'] is not None else None
if init_emb_weights is not None: init_emb_weights = [init_emb_weights]
embedding = Embedding(input_dim=self.data_params['n_words'],
output_dim=self.model_params.get('n_embed_dims', 100),
weights=init_emb_weights,
mask_zero=False,#Whether 0 in the input is a special "padding" value that should be masked out.
#If set True, all subsequent layers in the model must support masking, otherwise an exception will be raised.
name='embedding_methname')
methname_embedding = embedding(methname)
dropout = Dropout(0.25,name='dropout_methname_embed')
methname_dropout = dropout(methname_embedding)
#2.rnn
f_rnn = LSTM(self.model_params.get('n_lstm_dims', 128), recurrent_dropout=0.2,
return_sequences=True, name='lstm_methname_f')
b_rnn = LSTM(self.model_params.get('n_lstm_dims', 128), return_sequences=True,
recurrent_dropout=0.2, name='lstm_methname_b',go_backwards=True)
methname_f_rnn = f_rnn(methname_dropout)
methname_b_rnn = b_rnn(methname_dropout)
dropout = Dropout(0.25,name='dropout_methname_rnn')
methname_f_dropout = dropout(methname_f_rnn)
methname_b_dropout = dropout(methname_b_rnn)
#3.maxpooling
maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2]),name='maxpool_methname')
methname_pool = Concatenate(name='concat_methname_lstms')([maxpool(methname_f_dropout), maxpool(methname_b_dropout)])
activation = Activation('tanh',name='active_methname')
methname_repr = activation(methname_pool)
## API Sequence Representation ##
#1.embedding
embedding = Embedding(input_dim=self.data_params['n_words'],
output_dim=self.model_params.get('n_embed_dims', 100),
#weights=weights,
mask_zero=False,#Whether 0 in the input is a special "padding" value that should be masked out.
#If set True, all subsequent layers must support masking, otherwise an exception will be raised.
name='embedding_apiseq')
apiseq_embedding = embedding(apiseq)
dropout = Dropout(0.25,name='dropout_apiseq_embed')
apiseq_dropout = dropout(apiseq_embedding)
#2.rnn
f_rnn = LSTM(self.model_params.get('n_lstm_dims', 100), return_sequences=True, recurrent_dropout=0.2,
name='lstm_apiseq_f')
b_rnn = LSTM(self.model_params.get('n_lstm_dims', 100), return_sequences=True, recurrent_dropout=0.2,
name='lstm_apiseq_b', go_backwards=True)
apiseq_f_rnn = f_rnn(apiseq_dropout)
apiseq_b_rnn = b_rnn(apiseq_dropout)
dropout = Dropout(0.25,name='dropout_apiseq_rnn')
apiseq_f_dropout = dropout(apiseq_f_rnn)
apiseq_b_dropout = dropout(apiseq_b_rnn)
#3.maxpooling
maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2]),name='maxpool_apiseq')
apiseq_pool = Concatenate(name='concat_apiseq_lstms')([maxpool(apiseq_f_dropout), maxpool(apiseq_b_dropout)])
activation = Activation('tanh',name='active_apiseq')
apiseq_repr = activation(apiseq_pool)
## Tokens Representation ##
#1.embedding
init_emb_weights = np.load(self.model_params['init_embed_weights_tokens']) if self.model_params['init_embed_weights_tokens'] is not None else None
if init_emb_weights is not None: init_emb_weights = [init_emb_weights]
embedding = Embedding(input_dim=self.data_params['n_words'],
output_dim=self.model_params.get('n_embed_dims', 100),
weights=init_emb_weights,
#mask_zero=True,#Whether 0 in the input is a special "padding" value that should be masked out.
#If set True, all subsequent layers must support masking, otherwise an exception will be raised.
name='embedding_tokens')
tokens_embedding = embedding(tokens)
dropout = Dropout(0.25,name='dropout_tokens_embed')
tokens_dropout= dropout(tokens_embedding)
#4.maxpooling
maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2]),name='maxpool_tokens')
tokens_pool = maxpool(tokens_dropout)
activation = Activation('tanh',name='active_tokens')
tokens_repr= activation(tokens_pool)
## concatenate the representation of code ##
merged_methname_api=Concatenate(name='merge_methname_api')([methname_repr,apiseq_repr])
merged_code_repr=Concatenate(name='merge_coderepr')([merged_methname_api,tokens_repr])
code_repr=Dense(self.model_params.get('n_hidden',400),activation='tanh',name='dense_coderepr')(merged_code_repr)
self._code_repr_model=Model(inputs=[methname,apiseq,tokens],outputs=[code_repr],name='code_repr_model')
'''
2. Build Desc Representation Model
'''
## Desc Representation ##
logger.debug('Building Desc Representation Model')
desc = Input(shape=(self.data_params['desc_len'],), dtype='int32', name='desc')
#1.embedding
init_emb_weights = np.load(self.model_params['init_embed_weights_desc']) if self.model_params['init_embed_weights_desc'] is not None else None
if init_emb_weights is not None: init_emb_weights = [init_emb_weights]
embedding = Embedding(input_dim=self.data_params['n_words'],
output_dim=self.model_params.get('n_embed_dims', 100),
weights=init_emb_weights,
mask_zero=True,#Whether 0 in the input is a special "padding" value that should be masked out.
#If set True, all subsequent layers must support masking, otherwise an exception will be raised.
name='embedding_desc')
desc_embedding = embedding(desc)
dropout = Dropout(0.25,name='dropout_desc_embed')
desc_dropout = dropout(desc_embedding)
#2. rnn
f_rnn = LSTM(self.model_params.get('n_lstm_dims', 100), return_sequences=True, recurrent_dropout=0.2,
name='lstm_desc_f')
b_rnn = LSTM(self.model_params.get('n_lstm_dims', 100), return_sequences=True, recurrent_dropout=0.2,
name='lstm_desc_b', go_backwards=True)
desc_f_rnn = f_rnn(desc_dropout)
desc_b_rnn = b_rnn(desc_dropout)
dropout = Dropout(0.25,name='dropout_desc_rnn')
desc_f_dropout = dropout(desc_f_rnn)
desc_b_dropout = dropout(desc_b_rnn)
#3. maxpooling
maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2]),name='maxpool_desc')
desc_pool = Concatenate(name='concat_desc_rnns')([maxpool(desc_f_dropout), maxpool(desc_b_dropout)])
activation = Activation('tanh',name='active_desc')
desc_repr = activation(desc_pool)
self._desc_repr_model=Model(inputs=[desc],outputs=[desc_repr],name='desc_repr_model')
"""
3: calculate the cosine similarity between code and desc
"""
logger.debug('Building similarity model')
code_repr=self._code_repr_model([methname,apiseq,tokens])
desc_repr=self._desc_repr_model([desc])
cos_sim=Dot(axes=1, normalize=True, name='cos_sim')([code_repr, desc_repr])
sim_model = Model(inputs=[methname,apiseq,tokens,desc], outputs=[cos_sim],name='sim_model')
self._sim_model=sim_model #for model evaluation
'''
4:Build training model
'''
good_sim = sim_model([self.methname,self.apiseq,self.tokens, self.desc_good])# similarity of good output
bad_sim = sim_model([self.methname,self.apiseq,self.tokens, self.desc_bad])#similarity of bad output
loss = Lambda(lambda x: K.maximum(1e-6, self.model_params['margin'] - x[0] + x[1]),
output_shape=lambda x: x[0], name='loss')([good_sim, bad_sim])
logger.debug('Building training model')
self._training_model=Model(inputs=[self.methname,self.apiseq,self.tokens,self.desc_good,self.desc_bad],
outputs=[loss],name='training_model')
def compile(self, optimizer, **kwargs):
logger.info('compiling models')
self._code_repr_model.compile(loss='cosine_proximity', optimizer=optimizer, **kwargs)
self._desc_repr_model.compile(loss='cosine_proximity', optimizer=optimizer, **kwargs)
self._training_model.compile(loss=lambda y_true, y_pred: y_pred+y_true-y_true, optimizer=optimizer, **kwargs)
#+y_true-y_true is for avoiding an unused input warning, it can be simply +y_true since y_true is always 0 in the training set.
self._sim_model.compile(loss='binary_crossentropy', optimizer=optimizer, **kwargs)
def summary(self, export_path):
print('Summary of the code representation model')
self._code_repr_model.summary()
#plot_model(self._code_repr_model, show_shapes=True, to_file= export_path+'code_repr_model.png')
print('Summary of the desc representation model')
self._desc_repr_model.summary()
#plot_model(self._desc_repr_model, show_shapes=True, to_file=export_path+'desc_repr_model.png')
print ("Summary of the similarity model")
self._sim_model.summary()
#plot_model(self._sim_model, show_shapes=True, to_file= export_path+'sim_model.png')
print ('Summary of the training model')
self._training_model.summary()
#plot_model(self._training_model, show_shapes=True, to_file=export_path+'training_model.png')
def fit(self, x, **kwargs):
assert self._training_model is not None, 'Must compile the model before fitting data'
y = np.zeros(shape=x[0].shape[:1],dtype=np.float32)
return self._training_model.fit(x, y, **kwargs)
def repr_code(self, x, **kwargs):
return self._code_repr_model.predict(x, **kwargs)
def repr_desc(self, x, **kwargs):
return self._desc_repr_model.predict(x, **kwargs)
def predict(self, x, **kwargs):
return self._sim_model.predict(x, **kwargs)
def save(self, code_model_file, desc_model_file, **kwargs):
assert self._code_repr_model is not None, 'Must compile the model before saving weights'
self._code_repr_model.save_weights(code_model_file, **kwargs)
assert self._desc_repr_model is not None, 'Must compile the model before saving weights'
self._desc_repr_model.save_weights(desc_model_file, **kwargs)
def load(self, code_model_file, desc_model_file, **kwargs):
assert self._code_repr_model is not None, 'Must compile the model loading weights'
self._code_repr_model.load_weights(code_model_file, **kwargs)
assert self._desc_repr_model is not None, 'Must compile the model loading weights'
self._desc_repr_model.load_weights(desc_model_file, **kwargs)
``` |
{
"source": "97chenxa/Multiview2Novelview",
"score": 2
} |
#### File: 97chenxa/Multiview2Novelview/trainer.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
from util import log
from pprint import pprint
from input_ops import create_input_ops
from model import Model
import os
import time
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
class Trainer(object):
def __init__(self,
config,
dataset,
dataset_test):
self.config = config
hyper_parameter_str = 'bs_{}_lr_flow_{}_pixel_{}_d_{}'.format(
config.batch_size,
config.learning_rate_f,
config.learning_rate_p,
config.learning_rate_d,
)
self.train_dir = './train_dir/%s-%s-%s-num_input-%s-%s' % (
config.dataset,
config.prefix,
hyper_parameter_str,
str(config.num_input),
time.strftime("%Y%m%d-%H%M%S")
)
if not os.path.exists(self.train_dir): os.makedirs(self.train_dir)
log.infov("Train Dir: %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
_, self.batch_train = create_input_ops(
dataset, self.batch_size, is_training=True)
_, self.batch_test = create_input_ops(
dataset_test, self.batch_size, is_training=False)
# --- create model ---
self.model = Model(config)
# --- optimizer ---
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.learning_rate_p = config.learning_rate_p
self.learning_rate_f = config.learning_rate_f
self.learning_rate_d = config.learning_rate_d
self.check_op = tf.no_op()
# --- checkpoint and monitoring ---
all_vars = tf.trainable_variables()
f_var = [v for v in all_vars if 'Flow' in v.op.name or 'flow' in v.op.name]
log.warn("********* f_var ********** ")
slim.model_analyzer.analyze_vars(f_var, print_info=True)
p_var = [v for v in all_vars if 'Pixel' in v.op.name or 'pixel' in v.op.name]
log.warn("********* p_var ********** ")
slim.model_analyzer.analyze_vars(p_var, print_info=True)
d_var = [v for v in all_vars if v.op.name.startswith('Discriminator')]
log.warn("********* d_var ********** ")
slim.model_analyzer.analyze_vars(d_var, print_info=True)
# the whole model without the discriminator
g_var = p_var + f_var
self.f_optimizer = tf.train.AdamOptimizer(
self.learning_rate_f,
).minimize(self.model.flow_loss,
var_list=f_var, name='optimizer_flow_loss')
self.p_optimizer = tf.train.AdamOptimizer(
self.learning_rate_p,
).minimize(self.model.pixel_loss, global_step=self.global_step,
var_list=p_var, name='optimizer_pixel_loss')
self.p_optimizer_gan = tf.train.AdamOptimizer(
self.learning_rate_p,
beta1=0.5
).minimize(self.model.pixel_loss_gan, global_step=self.global_step,
var_list=p_var, name='optimizer_pixel_loss_gan')
self.d_optimizer = tf.train.AdamOptimizer(
self.learning_rate_d,
beta1=0.5
).minimize(self.model.d_loss, global_step=self.global_step,
var_list=d_var, name='optimizer_discriminator_loss')
self.train_summary_op = tf.summary.merge_all(key='train')
self.test_summary_op = tf.summary.merge_all(key='test')
self.saver = tf.train.Saver(max_to_keep=100)
self.pretrain_saver = tf.train.Saver(var_list=all_vars, max_to_keep=1)
self.pretrain_saver_p = tf.train.Saver(var_list=p_var, max_to_keep=1)
self.pretrain_saver_f = tf.train.Saver(var_list=f_var, max_to_keep=1)
self.pretrain_saver_g = tf.train.Saver(var_list=g_var, max_to_keep=1)
self.pretrain_saver_d = tf.train.Saver(var_list=d_var, max_to_keep=1)
self.summary_writer = tf.summary.FileWriter(self.train_dir)
self.max_steps = self.config.max_steps
self.ckpt_save_step = self.config.ckpt_save_step
self.log_step = self.config.log_step
self.test_sample_step = self.config.test_sample_step
self.write_summary_step = self.config.write_summary_step
self.gan_start_step = self.config.gan_start_step
self.checkpoint_secs = 600 # 10 min
self.supervisor = tf.train.Supervisor(
logdir=self.train_dir,
is_chief=True,
saver=None,
summary_op=None,
summary_writer=self.summary_writer,
save_summaries_secs=300,
save_model_secs=self.checkpoint_secs,
global_step=self.global_step,
)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = self.supervisor.prepare_or_wait_for_session(config=session_config)
self.ckpt_path = config.checkpoint
if self.ckpt_path is not None:
log.info("Checkpoint path: %s", self.ckpt_path)
self.pretrain_saver.restore(self.session, self.ckpt_path, )
log.info("Loaded the pretrain parameters from the provided checkpoint path")
self.ckpt_path_f = config.checkpoint_f
if self.ckpt_path_f is not None:
log.info("Checkpoint path: %s", self.ckpt_path_f)
self.pretrain_saver_f.restore(self.session, self.ckpt_path_f)
log.info("Loaded the pretrain Flow module from the provided checkpoint path")
self.ckpt_path_p = config.checkpoint_p
if self.ckpt_path_p is not None:
log.info("Checkpoint path: %s", self.ckpt_path_p)
self.pretrain_saver_p.restore(self.session, self.ckpt_path_p)
log.info("Loaded the pretrain Pixel module from the provided checkpoint path")
self.ckpt_path_g = config.checkpoint_g
if self.ckpt_path_g is not None:
log.info("Checkpoint path: %s", self.ckpt_path_g)
self.pretrain_saver_g.restore(self.session, self.ckpt_path_g)
log.info("Loaded the pretrain Generator (Pixel&Flow) module from the provided checkpoint path")
self.ckpt_path_d = config.checkpoint_d
if self.ckpt_path_d is not None:
log.info("Checkpoint path: %s", self.ckpt_path_d)
self.pretrain_saver_d.restore(self.session, self.ckpt_path_d)
log.info("Loaded the pretrain Discriminator module from the provided checkpoint path")
def train(self):
log.infov("Training Starts!")
pprint(self.batch_train)
max_steps = self.max_steps
ckpt_save_step = self.ckpt_save_step
log_step = self.log_step
test_sample_step = self.test_sample_step
write_summary_step = self.write_summary_step
gan_start_step = self.gan_start_step
for s in xrange(max_steps):
# periodic inference
if s % test_sample_step == 0:
step, test_summary, p_loss, f_loss, loss, output, step_time = \
self.run_test(self.batch_test, step=s, is_train=False)
self.log_step_message(step, p_loss, f_loss, loss, step_time, is_train=False)
self.summary_writer.add_summary(test_summary, global_step=step)
step, train_summary, p_loss, f_loss, loss, output, step_time = \
self.run_single_step(self.batch_train, step=s,
opt_gan=s > gan_start_step, is_train=True)
if s % log_step == 0:
self.log_step_message(step, p_loss, f_loss, loss, step_time)
if s % write_summary_step == 0:
self.summary_writer.add_summary(train_summary, global_step=step)
if s % ckpt_save_step == 0:
log.infov("Saved checkpoint at %d", s)
save_path = self.saver.save(
self.session, os.path.join(self.train_dir, 'model'),
global_step=step)
def run_single_step(self, batch, step=None, opt_gan=False, is_train=True):
_start_time = time.time()
batch_chunk = self.session.run(batch)
fetch = [self.global_step, self.train_summary_op, self.model.output,
self.model.pixel_loss, self.model.flow_loss,
self.model.loss, self.check_op]
# fetch optimizers
if not opt_gan:
# optimize only l1 losses
fetch += [self.p_optimizer, self.f_optimizer]
else:
if step % (self.config.update_rate+1) > 0:
# train the generator
fetch += [self.p_optimizer_gan, self.f_optimizer]
else:
# train the discriminator
fetch += [self.d_optimizer]
fetch_values = self.session.run(
fetch,
feed_dict=self.model.get_feed_dict(batch_chunk, step=step)
)
[step, summary, output, p_loss, f_loss, loss] = fetch_values[:6]
_end_time = time.time()
return step, summary, p_loss, f_loss, loss, output, (_end_time - _start_time)
def run_test(self, batch, step, is_train=False):
_start_time = time.time()
batch_chunk = self.session.run(batch)
step, summary, p_loss, f_loss, loss, output = self.session.run(
[self.global_step, self.test_summary_op,
self.model.pixel_loss, self.model.flow_loss,
self.model.loss, self.model.output],
feed_dict=self.model.get_feed_dict(batch_chunk, step=step, is_training=False)
)
_end_time = time.time()
return step, summary, p_loss, f_loss, loss, output, (_end_time - _start_time)
def log_step_message(self, step, p_loss, f_loss, loss, step_time, is_train=True):
if step_time == 0: step_time = 0.001
log_fn = (is_train and log.info or log.infov)
log_fn((" [{split_mode:5s} step {step:4d}] " +
"Loss: {loss:.5f} " +
"Pixel loss: {p_loss:.5f} " +
"Flow loss: {f_loss:.5f} " +
"({sec_per_batch:.3f} sec/batch, {instance_per_sec:.3f} instances/sec) "
).format(split_mode=(is_train and 'train' or 'val'),
step=step,
loss=loss,
p_loss=p_loss,
f_loss=f_loss,
sec_per_batch=step_time,
instance_per_sec=self.batch_size / step_time
)
)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=8,
help='the mini-batch size')
parser.add_argument('--prefix', type=str, default='default',
help='a nickname for the training')
parser.add_argument('--dataset', type=str, default='car', choices=[
'car', 'chair', 'kitti', 'synthia'],
help='you can add your own dataset here')
parser.add_argument('--num_input', type=int, default=2,
help='the number of source images')
parser.add_argument('--checkpoint', type=str, default=None,
help='load all the parameters including the flow and '
'pixel modules and the discriminator')
parser.add_argument('--checkpoint_p', type=str, default=None,
help='load the parameters of the pixel module')
parser.add_argument('--checkpoint_f', type=str, default=None,
help='load the parameters of the flow module')
parser.add_argument('--checkpoint_g', type=str, default=None,
help='load the parameters of both the flow and pixel module')
parser.add_argument('--checkpoint_d', type=str, default=None,
help='load the parameters of the discriminator')
# Log
parser.add_argument('--log_step', type=int, default=10,
help='the frequency of outputing log info')
parser.add_argument('--ckpt_save_step', type=int, default=5000,
help='the frequency of saving a checkpoint')
parser.add_argument('--test_sample_step', type=int, default=100,
help='the frequency of performing testing inference during training')
parser.add_argument('--write_summary_step', type=int, default=100,
help='the frequency of writing TensorBoard summaries')
# Learning
parser.add_argument('--max_steps', type=int, default=10000000,
help='the max training iterations')
parser.add_argument('--learning_rate_p', type=float, default=5e-5,
help='the learning rate of the pixel module')
parser.add_argument('--learning_rate_f', type=float, default=1e-4,
help='the learning rate of the flow module')
parser.add_argument('--learning_rate_d', type=float, default=1e-4,
help='the learning rate of the discriminator')
parser.add_argument('--local_confidence_weight', type=int, default=1e-2,
help='the weight of the confidence prediction objective')
# Architecture
parser.add_argument('--num_res_block_pixel', type=int, default=0,
help='the number of residual block in the bottleneck of the pixel module')
parser.add_argument('--num_res_block_flow', type=int, default=4,
help='the number of residual block in the bottleneck of the flow module')
parser.add_argument('--num_dis_conv_layer', type=int, default=5,
help='the number of convolutional layers of the discriminator')
parser.add_argument('--num_conv_layer', type=int, default=5,
help='the number of convolutional layers of '
'the encoder of both the flow and pixel modules')
parser.add_argument('--num_convlstm_block', type=int, default=2,
help='the number of residual ConvLSTM block of the pixel module')
parser.add_argument('--num_convlstm_scale', type=int, default=3,
help='how many innermost layers of the pixel module '
'have a residual ConvLSTM connection')
parser.add_argument('--norm_type', type=str, default='None',
choices=['batch', 'instance', 'None'],
help='the type of normalization')
# GAN
parser.add_argument('--gan_type', type=str, default='ls', choices=['ls', 'normal'],
help='the type of GAN losses such as LS-GAN, WGAN, etc')
parser.add_argument('--gan_start_step', type=int, default=5e5,
help='start to optimize the GAN loss when the model is stable')
parser.add_argument('--update_rate', type=int, default=1,
help='update G more frequently than D')
# Multi-scale prediction: this is not reporeted in the paper
# The main idea is to imporve the flow module by training it to start from
# predict a coarser flow fields (similar to progressive learning GAN
# proposed by Karras et al. ICLR 2017)
parser.add_argument('--num_scale', type=int, default=1,
help='the number of multi-scale flow prediction '
'(1 means without multi-scale prediction)')
parser.add_argument('--moving_weight', type=str, default='uniform',
choices=['uniform', 'shift', 'step'],
help='gradually learn each scale from coarse to fine')
config = parser.parse_args()
if config.dataset == 'car':
import datasets.shapenet_car as dataset
elif config.dataset == 'chair':
import datasets.shapenet_chair as dataset
elif config.dataset == 'kitti':
import datasets.kitti as dataset
elif config.dataset == 'synthia':
import datasets.synthia as dataset
else:
raise ValueError(config.dataset)
if 'car' in config.dataset or 'chair' in config.dataset:
config.dataset_type = 'object'
else:
config.dataset_type = 'scene'
dataset_train, dataset_test = \
dataset.create_default_splits(config.num_input)
image, pose = dataset_train.get_data(dataset_train.ids[0])
config.data_info = np.concatenate([np.asarray(image.shape), np.asarray(pose.shape)])
trainer = Trainer(config, dataset_train, dataset_test)
log.warning("dataset: %s", config.dataset)
trainer.train()
if __name__ == '__main__':
main()
``` |
{
"source": "97e57e/linkmoa",
"score": 2
} |
#### File: linkmoa/freeboard/models.py
```python
from django.db import models
from datetime import datetime
# Create your models here.
class Post(models.Model):
user_id = models.IntegerField()
owner = models.CharField(max_length=20, default="???")
title = models.CharField(max_length=200)
pub_date = models.DateTimeField('date_published')
body = models.TextField()
views = models.IntegerField(default=0)
def __str__(self):
return self.title
def increaseViews(self):
self.views +=1
self.save()
class Comment(models.Model):
post = models.ForeignKey('freeboard.Post', on_delete=models.CASCADE, related_name='comments')
author = models.CharField(max_length=200)
text = models.TextField()
def __str__(self):
return self.text
```
#### File: linkmoa/linkmoa/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.forms.models import model_to_dict
from django.dispatch import receiver
from datetime import datetime
import urllib.request
from django import template
from tagging.fields import TagField
register = template.Library()
# Create your models here.
class Memo(models.Model):
user_id = models.IntegerField()
owner = models.CharField(max_length=20, default="???")
directory = models.CharField(max_length=20, default="recently")
shared = models.BooleanField(default=False)
download = models.IntegerField(default=0)
keyword = models.CharField(max_length=30)
urls = models.TextField(default=None)
memo = models.TextField(default="")
pub_date = models.DateTimeField('date_published', default=datetime.now())
tag = TagField()
def updateMemo(self, u_id, u_owner, u_directory, u_shared, u_download, u_keyword, u_urls, u_memo, u_tag):
self.user_id = u_id
self.owner = u_owner
self.directory = u_directory
self.shared = u_shared
self.download = u_download
self.keyword = u_keyword
self.urls = u_urls
self.memo = u_memo
self.tag = u_tag
self.save()
def __str__(self):
return self.keyword
def split(urls):
urlList = urls.split('\n')
return urlList
def increaseDL(self):
self.download+=1
self.save()
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
numofDir = models.IntegerField(default=0)
selectedMemo = models.IntegerField(default=0)
currentdir = models.CharField(max_length=20, default='recently')
dir1 = models.CharField(max_length=30, blank=True)
dir2 = models.CharField(max_length=30, blank=True)
dir3 = models.CharField(max_length=30, blank=True)
dir4 = models.CharField(max_length=30, blank=True)
dir5 = models.CharField(max_length=30, blank=True)
dir6 = models.CharField(max_length=30, blank=True)
dir7 = models.CharField(max_length=30, blank=True)
dir8 = models.CharField(max_length=30, blank=True)
dir9 = models.CharField(max_length=30, blank=True)
dir10 = models.CharField(max_length=30, blank=True)
def setSelectedMemo(id):
self.selectedMemo=id
self.save()
def increase(self):
self.numofDir+=1
self.save()
def decrease(self):
self.numofDir-=1
self.save()
def get_fields_name(model):
names=[]
for key in model_to_dict(model).values():
if type(key) == str and key !='':
names.append(key)
names.pop(0)
return names
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
```
#### File: linkmoa/templatetags/quicksave.py
```python
from django.template import Library
from .. import models
register = Library()
@register.simple_tag
def setSelectedMemo(profile, id):
setattr(profile, 'selectedMemo', id)
print('setSelected : ', id)
profile.save()
```
#### File: linkmoa/linkmoa/views.py
```python
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib import auth
from django.utils import timezone
from django.http import HttpResponse
from django.views.generic import ListView, DetailView, TemplateView
from tagging.models import Tag, TaggedItem
from tagging.views import TaggedObjectList
from django.core.paginator import Paginator
from linkmoa import urlScrap
from linkmoa import dirManagement
from accounts import views
from .models import Memo
from .models import Profile
# Create your views here.
def board(request):
user=request.user
sort = request.GET.get('sort','')
if sort == 'likes':
memos = Memo.objects.filter(shared=True).order_by('-download')
elif sort == 'mymemo':
memos = Memo.objects.filter(shared=True, user_id=user.id).order_by('-id')
else:
memos = Memo.objects.filter(shared=True).order_by('-id')
board_paginator = Paginator(memos, 20)
page = request.GET.get('page')
board_posts = board_paginator.get_page(page)
print(page)
return render(request,'board.html',{'board_posts' : board_posts})
def search(request):
user=request.user
############## Pagination 처리 ##############
try:
#맨 처음 검색 했을때는 searchBox에서 value를 가져옴
keyword = request.POST['searchBox']
page = request.GET.get('page')
print('1페이지')
except Exception as e:
#search_board창에서 페이지 넘길 경우 hidden value, 해당 페이지 를 받아옴
keyword = request.GET['hidden-value']
page = request.GET['pagenum']
sort = request.GET.get('sort','')
############## Search Logic #################
if keyword =='': #빈 input 예외처리
return redirect('board')
if keyword[0] == '#': #태그 검색일 경우
try:
search_tag = keyword.replace("#","")
tag=Tag.objects.get(name=search_tag)
searched_memos = TaggedItem.objects.get_intersection_by_model(Memo, tag).filter(shared=True)
except Tag.DoesNotExist:
print('DoesNotExist')
return render(request, 'search_board.html')
else: #일반 검색일 경우
if sort == 'likes':
searched_memos = Memo.objects.filter(keyword= keyword, shared=True).order_by('-download')
elif sort == 'mymemo':
searched_memos = Memo.objects.filter(keyword= keyword, shared=True, user_id=user.id).order_by('-id')
else:
searched_memos = Memo.objects.filter(keyword= keyword, shared=True).order_by('-id')
search_paginator = Paginator(searched_memos, 4)
search_posts = search_paginator.get_page(page)
# 쿼리셋과 함께 템플릿에서 받아온 keyword도 함께 넘김. keyword는 템플릿의 페이지네이션 부분에서 사용
return render(request,'search_board.html', {'search_posts' : search_posts, 'keyword' : keyword})
def tag_board(request, tag):
tag=Tag.objects.get(name=tag)
tagged_memos = TaggedItem.objects.get_intersection_by_model(Memo, tag).filter(shared=True)
tag_paginator = Paginator(tagged_memos, 2)
page = request.GET.get('page')
tag_posts = tag_paginator.get_page(page)
return render(request, 'tag_board.html',{'tag_posts' : tag_posts})
def index(request):
user=request.user
print('Request user : ' + user.username)
memos = Memo.objects.filter(user_id=user.id).order_by('-id')
current = memos.filter(directory=user.profile.currentdir)
paginator = Paginator(current, 20)
page = request.GET.get('page')
posts = paginator.get_page(page)
return render(request,'index.html',{'memos' : memos, 'current' : current, 'userid' : user.id, 'posts' : posts, 'user_currentdir_name' : user.profile.currentdir})
def make_memo(request):
user=request.user
memo = Memo()
print(user.username + ' make new memo!')
splited = request.POST['url'].split('\n')
filteredUrl = urlScrap.scrapUrl(splited, request.POST['key'])
if len(filteredUrl) > 1:
memo.updateMemo(user.id, user.username, "recently", False, 0, request.POST['key'], filteredUrl, "", "")
return redirect('index')
def make_memo_direct(request):
user=request.user
memo = Memo()
unvalid = request.GET.get('editUrl').split('\n')
print(unvalid)
valid = ""
for url in unvalid:
if url[0:7] == 'http://' or url[0:8] == 'https://':
valid = valid + url + "\n"
print(valid)
memo.updateMemo(user.id, user.username, user.profile.currentdir, memo.shared, memo.download, request.GET.get('editKey'), valid, request.GET.get('editMemo'), request.GET.get('editTag').replace("#",","))
print(user.username + ' make direct memo')
return redirect('index')
def mkdir(request):
user=request.user
dname = request.POST['dirname']
if user.profile.numofDir == 10:
print('디렉토리 최대 개수는 10 개 입니다.')
else:
a = dirManagement.makeDirectory(user,dname)
if a == 0:
print('같은 이름의 디렉토리를 생성 할 수 없습니다.')
return redirect('index')
def changedir(request, cddir):
user=request.user
user.profile.currentdir=cddir
user.profile.save()
return redirect('index')
def changedirname(request, dirname):
user=request.user
newname = request.GET.get('changename')
if user.profile.currentdir == dirname:
user.profile.currentdir = newname
user.profile.save()
dirMemo = Memo.objects.filter(directory=dirname)
dirManagement.changedirname(user, dirname, newname, dirMemo)
return redirect('index')
def deletedir(request, dirname):
user=request.user
dname = dirname
memos = Memo.objects.filter(user_id=user.id, directory=dirname)
memos.delete()
dirManagement.deleteDirectory(user, dname)
return redirect('index')
def delete_memo(request, memo_id):
user=request.user
memo = Memo.objects.get(id=memo_id)
memo.delete()
return redirect('index')
def share_memo(request, memo_id):
memo = Memo.objects.get(id=memo_id)
memo.shared = True
memo.save()
return redirect('index')
def edit_memo(request, memo_id):
user=request.user
memo = Memo.objects.get(id=memo_id)
memo.updateMemo(user.id, user.username, memo.directory, memo.shared, memo.download, request.GET.get('editKey'), request.GET.get('editUrl'), request.GET.get('editMemo'), request.GET.get('editTag').replace("#",","))
return redirect('index')
def undo_share(request, memo_id):
memo = Memo.objects.get(id=memo_id)
memo.shared = False
memo.download = 0
memo.save()
return redirect('/')
def download_memo(request, memo_id):
user=request.user
newMemo = Memo()
oldMemo = Memo.objects.get(id=memo_id)
oldMemo.increaseDL()
newMemo.updateMemo(user.id, user.username, 'recently', False, 0, oldMemo.keyword, oldMemo.urls, "","")
return redirect('index')
def movedir(request, memo_id, dirname):
user = request.user
memo = Memo.objects.get(id=memo_id)
setattr(memo, 'directory', dirname)
memo.save()
return redirect('index')
#Deprecated function
# def appear_memo(request, memo_id):
# memo = Memo.objects.get(id=memo_id)
# memo.display='visible'
# memo.save()
# return redirect('index')
# def disappear_memo(request, memo_id):
# memo = Memo.objects.get(id=memo_id)
# memo.display='invisible'
# memo.save()
# return redirect('index')
``` |
{
"source": "97harsh/drug-lit-contradictory-claims",
"score": 3
} |
#### File: tests/models/test_train_model.py
```python
import os
import shutil
import unittest
import numpy as np
import tensorflow as tf
from contradictory_claims.models.train_model import build_model, load_model, regular_encode, save_model
from transformers import AutoModel, AutoTokenizer, TFAutoModel
class TestTrainModel(unittest.TestCase):
"""Test for training the model for contradictory-claims."""
def setUp(self) -> None:
"""Set up for the tests--load tokenizer."""
self.test_tokenizer = AutoTokenizer.from_pretrained("allenai/biomed_roberta_base")
self.model = AutoModel.from_pretrained("allenai/biomed_roberta_base")
self.model.resize_token_embeddings(len(self.test_tokenizer))
self.out_dir = 'tests/models/test_output'
def test_regular_encode(self):
"""Test that encoding is done properly."""
test_input = ["this is a test", "so is this"]
len_encoding = 20
encoded_input = regular_encode(test_input, self.test_tokenizer, len_encoding)
expected_encoded_input = np.array([[0, 9226, 16, 10, 1296, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 2527, 16, 42, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
self.assertTrue((encoded_input == expected_encoded_input).all())
def test_build_save_load_model(self):
"""Test that full model is built properly."""
strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
os.makedirs("biomed_roberta_base")
self.model.save_pretrained("biomed_roberta_base")
with strategy.scope():
model = TFAutoModel.from_pretrained("biomed_roberta_base", from_pt=True)
model = build_model(model)
shutil.rmtree("biomed_roberta_base")
# Note: this changed recently and I don't know why... Maybe different TF version?
# self.assertEqual(str(type(model)), "<class 'tensorflow.python.keras.engine.training.Model'>")
self.assertEqual(str(type(model)), "<class 'tensorflow.python.keras.engine.functional.Functional'>")
save_model(model, timed_dir_name=False, transformer_dir=self.out_dir)
self.assertTrue(os.path.isfile(os.path.join(self.out_dir, 'sigmoid.pickle')))
self.assertTrue(os.path.isfile(os.path.join(self.out_dir, 'config.json')))
self.assertTrue(os.path.isfile(os.path.join(self.out_dir, 'tf_model.h5')))
pickle_path = os.path.join(self.out_dir, 'sigmoid.pickle')
model = load_model(pickle_path=pickle_path, transformer_dir=self.out_dir)
# Same comment here applies
# self.assertEqual(str(type(model)), "<class 'tensorflow.python.keras.engine.training.Model'>")
self.assertEqual(str(type(model)), "<class 'tensorflow.python.keras.engine.functional.Functional'>")
@unittest.skip("Yeah I don't know how to reasonably test this sorry")
def test_train_model(self):
"""Test that the model can be trained."""
# What's a good way to test this?
# TODO: Implement something
pass
def tearDown(self):
"""Clean-up after all tests have run."""
if os.path.isdir(self.out_dir):
shutil.rmtree(self.out_dir)
``` |
{
"source": "97harsh/task-geo",
"score": 2
} |
#### File: data_sources/mobility/__init__.py
```python
from task_geo.data_sources.mobility.mobility_connector import mobility_connector
from task_geo.data_sources.mobility.mobility_formatter import mobility_formatter
def mobility():
"""Retrieve the mobility reports from Google.
Arguments:
None
Returns:
pandas.DataFrame
Example:
>>> from task_geo.data_sources import get_data_source
>>> mobility = get_data_source('mobility')
>>> mobility()
"""
raw = mobility_connector()
return mobility_formatter(raw)
```
#### File: data_sources/mobility/mobility_formatter.py
```python
import pandas as pd
def mobility_formatter(raw):
# Put column names in lowercase alphanumerical
column_names = {
'country_region_code': 'country_iso',
'country_region': 'country',
'sub_region_1': 'region',
'sub_region_2': 'sub_region',
'date': 'date',
'retail_and_recreation_percent_change_from_baseline': 'retail_recreation',
'grocery_and_pharmacy_percent_change_from_baseline': 'grocery_pharmacy',
'parks_percent_change_from_baseline': 'parks',
'transit_stations_percent_change_from_baseline': 'transit_stations',
'workplaces_percent_change_from_baseline': 'workplaces',
'residential_percent_change_from_baseline': 'residential',
}
raw = raw.rename(columns=column_names)
numeric_columns = [
'retail_recreation', 'grocery_pharmacy', 'parks',
'transit_stations', 'workplaces', 'residential'
]
raw[numeric_columns] = raw[numeric_columns].astype(float)
raw['date'] = pd.to_datetime(raw.date)
column_order = [
'country_iso', 'country', 'region', 'date', 'retail_recreation', 'grocery_pharmacy',
'parks', 'transit_stations', 'workplaces', 'residential'
]
return raw[column_order]
```
#### File: data_sources/noaa/__main__.py
```python
import argparse
from ftp_connector import download_noaa_files, process_noaa_data
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--download',
action='store_true', help="Wheter download or not the files.")
parser.add_argument(
'-o', '--output', required=True,
help='Destination file to store the processed dataset.')
parser.add_argument(
'-c', '--countries', required=True,
nargs='?', help='FIPS Country codes to select data for.')
return parser
def main():
parser = get_argparser()
args = parser.parse_args()
if args.download:
download_noaa_files()
else:
dataset = process_noaa_data(args.countries)
dataset.to_csv(args.output, index=False, header=True)
if __name__ == '__main__':
main()
``` |
{
"source": "97I8TOE47K/kinematics-equation-solver",
"score": 4
} |
#### File: 97I8TOE47K/kinematics-equation-solver/main.py
```python
import math
#Kinematics Equation solver (for NeoTech)
#Make more user friendly by making variables function-wise (should I?)
print ("Welcome to this Kinematics Equation solver!")
print("Please fill the parameters. You can fill the unknown one(s) with 0.")
'''
v=u+at
V^2 = u^2 + 2as
S = ut+1/2 at^2
'''
warning = print("\nWarning: This program currently *does not* support units of measurement while solving equations\n")
u = float(input("Please enter the initial velocity: "))
v = float(input("Please enter the final velocity: "))
a = float(input("Please enter the acceleration: "))
t = float(input("Please enter the time: "))
s = float(input("Please enter the displacement: "))
def equation1():
if v != 0:
options = float(input("1. Initial velocity (u) \n2. Acceleration (a)\n3. Time (t)\nWhat do you want to solve for? "))
if options == 1:
ans = v - (a*t)
print("The initial velocity is ",ans)
elif options == 2:
ans = (v-u)/t
print("The acceleration is ",ans)
elif options == 3:
ans = (v-u)/a
print("The time is ",ans)
else:
print("Invalid input")
else:
print("Invalid input")
def equation2():
if v**2 != 0:
options = float(input("1. Initial velocity (u) \n2. Acceleration (a)\n3. Displacement (s)\nWhat do you want to solve for? "))
if options == 1:
ans = math.sqrt(v**2 - (2*a*s))
print("The initial velocity is ",ans)
elif options == 2:
ans = ((v**2 - u**2))/(2*s)
print("The acceleration is ",ans)
elif options == 3:
ans = ((v**2 - u**2))/(2*a)
print("The displacement is ",ans)
else:
print("Invalid input")
def equation3():
if s != 0:
options = float(input("1. Initial velocity (u) \n2. Acceleration (a)\n3. Time (t)\nWhat do you want to solve for? "))
if options == 1:
ans = (s-((1/2)*a*(t**2)))/t
print("The initial velocity is ",ans)
elif options == 2:
ans = ((2*(s-(u*t))))/(t**2)
print("The acceleration is ",ans)
elif options == 3:
ans = ((s-(1/2)*a*(t**2)))/u
print("The time is ",ans)
else:
print("Invalid input")
choice = float(input("\n1. v=u+at\n2. v^2=u^2 + 2as\n3. S=ut+1/2 at^2 \nPlease enter the equation you want to solve for: "))
if choice == 1:
equation1()
elif choice == 2:
equation2()
elif choice == 3:
equation3()
``` |
{
"source": "97littleleaf11/mypyc-benchmarks",
"score": 3
} |
#### File: mypyc-benchmarks/microbenchmarks/builtins.py
```python
from benchmarking import benchmark
@benchmark
def min_max_pair() -> None:
a = []
for i in range(20):
a.append(i * 12753 % (2**15 - 1))
expected_min = min(a)
expected_max = max(a)
n = 0
for i in range(100 * 1000):
n = 1000000000
m = 0
for j in a:
n = min(n, j)
m = max(m, j)
assert n == expected_min
assert m == expected_max
@benchmark
def min_max_sequence() -> None:
a = []
for i in range(1000):
a.append([i * 2])
a.append([i, i + 2])
a.append([i] * 15)
n = 0
for i in range(100):
for s in a:
x = min(s)
n += x
x = max(s)
n += x
assert n == 399800000, n
@benchmark
def map_builtin() -> None:
a = []
for j in range(100):
for i in range(10):
a.append([i * 2])
a.append([i, i + 2])
a.append([i] * 6)
n = 0
for i in range(100):
k = 0
for lst in a:
x = list(map(inc, lst))
if k == 0:
y = "".join(map(str, lst))
n += len(y)
n += x[-1]
k += 1
if k == 3:
k = 0
assert n == 2450000, n
def inc(x: int) -> int:
return x + 1
```
#### File: mypyc-benchmarks/microbenchmarks/bytes.py
```python
from benchmarking import benchmark
@benchmark
def bytes_concat() -> None:
a = []
for i in range(1000):
a.append(b'Foobar-%d' % i)
a.append(b' %d str' % i)
n = 0
for i in range(1000):
for s in a:
b = b'foo' + s
if b == s:
n += 1
b += b'bar'
if b != s:
n += 1
assert n == 2000000, n
@benchmark
def bytes_methods() -> None:
"""Use a mix of bytes methods (but not split/join)."""
a = []
for i in range(1000):
a.append(b'Foobar-%d' % i)
a.append(b' %d str' % i)
n = 0
for i in range(100):
for s in a:
if s.startswith(b'foo'):
n += 1
if s.endswith(b'r'):
n += 1
if s.replace(b'-', b'/') != s:
n += 1
if s.strip() != s:
n += 1
if s.rstrip() != s:
n += 1
if s.lower() == s:
n += 1
assert n == 400000, n
@benchmark
def bytes_format() -> None:
a = []
for i in range(1000):
a.append(b'Foobar-%d' % i)
a.append(b'%d str' % i)
n = 0
for i in range(100):
for s in a:
n += len(b"foobar %s stuff" % s)
ss = b"foobar %s stuff" % s
n += len(b"%s-%s" % (s, ss))
assert n == 10434000, n
@benchmark
def bytes_slicing() -> None:
a = []
for i in range(1000):
a.append(b'Foobar-%d' % i)
a.append(b'%d str' % i)
n = 0
for i in range(1000):
for s in a:
n += len(s[2:-2])
if s[:3] == b'Foo':
n += 1
if s[-2:] == b'00':
n += 1
assert n == 9789000, n
@benchmark
def bytes_split_and_join() -> None:
a = []
for i in range(1000):
a.append(b'Foobar-%d' % i)
a.append(b'%d-ab-asdfsdf-asdf' % i)
a.append(b'yeah')
n = 0
for i in range(100):
for s in a:
items = s.split(b'-')
if b'-'.join(items) == s:
n += 1
assert n == 300000, n
@benchmark
def bytes_searching() -> None:
a = []
for i in range(1000):
a.append(b'Foobar-%d' % i)
a.append(b'%d-ab-asdfsdf-asdf' % i)
a.append(b'yeah')
n = 0
for i in range(100):
for s in a:
if b'i' in s:
n += 1
if s.find(b'asd') >= 0:
n += 1
n += s.index(b'a')
assert n == 1089000, n
@benchmark
def bytes_call() -> None:
a = []
for i in range(100):
a.append([65, 55])
a.append([0, 1, 2, 3])
a.append([100])
n = 0
for i in range(10 * 1000):
for s in a:
b = bytes(s)
n += len(b)
assert n == 7000000, n
@benchmark
def bytes_indexing() -> None:
a = []
for i in range(1000):
a.append(b'Foobar-%d' % i)
a.append(b'%d-ab-asdfsdf-asdf' % i)
a.append(b'yeah')
n = 0
for i in range(100):
for s in a:
for j in range(len(s)):
if s[j] == 97:
n += 1
assert n == 500000, n
```
#### File: mypyc-benchmarks/microbenchmarks/singledispatch.py
```python
from functools import singledispatch
from benchmarking import benchmark
NUM_ITER = 500
class Tree:
pass
class Leaf(Tree):
pass
class Node(Tree):
def __init__(self, value: int, left: Tree, right: Tree) -> None:
self.value = value
self.left = left
self.right = right
@singledispatch
def calc_sum(x: Tree) -> int:
raise TypeError("invalid type for x")
@calc_sum.register(Leaf)
def sum_leaf(x: Leaf) -> int:
return 0
@calc_sum.register(Node)
def sum_node(x: Node) -> int:
return x.value + calc_sum(x.left) + calc_sum(x.right)
def build(n: int) -> Tree:
if n == 0:
return Leaf()
return Node(n, build(n - 1), build(n - 1))
@benchmark
def sum_tree_singledispatch():
# making the tree too big causes building the tree to take too long or just crash python entirely
tree = build(10)
n: int = 0
for i in range(NUM_ITER):
n += calc_sum(tree)
# calc_sum(tree) should be 2036
assert n == NUM_ITER * 2036, n
```
#### File: mypyc-benchmarks/reporting/collect_baseline.py
```python
from typing import Tuple
from datetime import datetime
import argparse
from reporting.common import get_csv_path
from reporting.gitutil import get_current_commit
from reporting.collect import write_csv_line, run_bench
def parse_args() -> Tuple[str, str]:
parser = argparse.ArgumentParser(
description="""Run an interpreted benchmark, and append result to the
file <data_repo>/data/<benchmark>-cpython.csv.""")
parser.add_argument(
"benchmark",
help="""benchmark name, such as 'richards' (use 'runbench.py --list' to show valid
values)""")
parser.add_argument(
"data_repo",
help="target data repository where output will be written (this will be modified!)")
args = parser.parse_args()
return args.benchmark, args.data_repo
def main() -> None:
benchmark, data_repo = parse_args()
now = datetime.utcnow()
benchmark_commit = get_current_commit(".")
runtime, stddev = run_bench(benchmark, None, compiled=False)
fnam = get_csv_path(data_repo, benchmark, cpython=True)
write_csv_line(fnam, benchmark, now, runtime, stddev, "", benchmark_commit)
if __name__ == "__main__":
main()
```
#### File: mypyc-benchmarks/reporting/markdown.py
```python
from reporting.common import BENCHMARKS_DIR
def mypy_commit_link(commit: str) -> str:
url = 'https://github.com/python/mypy/commit/%s' % commit
return '[%s](%s)' % (commit[:12], url)
def benchmark_link(benchmark: str, link_name: str = '') -> str:
link_name = link_name or benchmark
return '[%s](%s/%s.md)' % (link_name, BENCHMARKS_DIR, benchmark)
def bold(s: str) -> str:
if not s:
return s
return '**%s**' % s
``` |
{
"source": "97tuna/Flask_RoomHairCare",
"score": 2
} |
#### File: 97tuna/Flask_RoomHairCare/baseFunction.py
```python
import firebase_admin
from firebase_admin import credentials
from firebase_admin import storage
from firebase_admin import db
import os
import cv2
import json
from datetime import datetime, timedelta
import pytz
from detects import detectFunction
# Firebase 세팅 정보
cred = credentials.Certificate("./roomdentist-firebase-adminsdk-43dh8-b473fe93b2.json")
firebase_admin.initialize_app(cred, {
'storageBucket': f"roomdentist.appspot.com",
'databaseURL': 'https://roomdentist-default-rtdb.firebaseio.com/'
})
# 버킷은 바이너리 객체의 상위 컨테이너이다. 버킷은 Storage에서 데이터를 보관하는 기본 컨테이너이다.
bucket = storage.bucket() # 기본 버킷 사용
print(" * Firebase Storage Setting Success")
dir = db.reference().child("users")
print(" * Firebase Realtime Database Setting Success")
# Auth Functions
def downloadImage(uid, imageNum, isCavity):
source_blob_name = f"users/{uid}/{todayDate()}/{imageNum}.png" # 유저의 uid에서 오늘 날짜의 이미지 번호를 소스로 지정
destination_file_name = f"./images/{uid}/{todayDate()}/{imageNum}.png" # 로컬 폴더의 저장 위치 지정
createFolder(f"./images/{uid}/{todayDate()}/results")
blob = bucket.blob(source_blob_name) # blob형태로 다운
blob.download_to_filename(destination_file_name) # 다운받은 파일을 지정한 로컬 폴더에 저장
imagePath = f"./images/{uid}/{todayDate()}" # openCV로 다운받은 이미지 불러오기
dentalResults = detectFunction(imagePath, uid, imageNum) # detect 함수로 이미지와 imageNum 전달 후 결과 이미지 return
uploadDatabase(uid, imageNum, dentalResults)
return uploadImage(uid, imageNum)
def uploadImage(uid, imageNum):
source_blob_name = f"users/{uid}/{todayDate()}/results/{imageNum}.png" # Remote 폴더, 유저의 uid에서 오늘 날짜의 이미지 번호를 소스로 지정
source_file_name = f"./images/{uid}/{todayDate()}/results/{imageNum}.png" # 로컬 폴더의 저장 위치
blob = bucket.blob(source_blob_name)
blob.upload_from_filename(source_file_name)
return True # 성공하면 True return
def uploadDatabase(uid, imageNum, dentalResults):
# dir.child(uid).child("results").child(f"{todayDate()}").child("0").update(dentalResults) # 사진 번호가 0부터 시작하기 때문에 오류 방지용
dir.child(uid).child("results").child(f"{todayDate()}").child(f"{imageNum - 1}").update(dentalResults)
# Charts Functions, 오늘부터 -7일까지의 충치 개수 및 아말감 개수 조회해서 차트로 전송
def makeChartsinDatabase(uid):
date = []
cavityValue = []
for i in range(6, -1, -1): # 오늘부터 -7일까지
data = dir.child(uid).child("results").child(f"{todayDate() - timedelta(days = i)}")
calDate = f"{todayDate() - timedelta(days = i)}"
calDate = f"{calDate[-2:]}일"
date.append(calDate)
maxCavityCount = 0
results = data.get()
if results == None:
cavityValue.append("0")
continue
for result in results:
maxCavityCount = max(result["Cavity"], maxCavityCount)
cavityValue.append(f"{maxCavityCount}")
data = {"date" : date, "cavityValue": cavityValue}
return json.dumps(data, ensure_ascii=False)
# 필요 Utils
def todayDate():
dt_now = datetime.now(pytz.timezone('Asia/Seoul'))
return dt_now.date()# - timedelta(days = 4)
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
```
#### File: 97tuna/Flask_RoomHairCare/detects.py
```python
import argparse
import os
import sys
from pathlib import Path
import datetime, pytz
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from models.experimental import attempt_load
from utils.datasets import LoadImages, LoadStreams
from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \
increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \
strip_optimizer, xyxy2xywh
from utils.plots import Annotator, colors
from utils.torch_utils import load_classifier, select_device, time_sync
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
@torch.no_grad()
def detectFunction(imageSource, uid, imageNum):
weights = "./hairModel.pt"
hide_conf = False
project = f"{imageSource}/results"
hide_labels = False
device = select_device('')
half = False
classify = False
imgsz = 640
visualize = False
augment = False
conf_thres = 0.7
iou_thres = 0.45
classes = None
agnostic_nms = False
max_det = 1000
# name = name
exist_ok = False
save_txt = False
save_crop = False
nosave = False
line_thickness = 3
view_img = False
dictValue = {'Cavity': 0, 'Gold': 0, 'Amalgam': 0, 'isCavity': "True"}
source = f"{imageSource}/{imageNum}.png"
save_img = not nosave and not source.endswith('.txt') # save inference images
save_dir = increment_path(Path(project), exist_ok=exist_ok) # increment run
# (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
set_logging()
device = select_device(device)
half &= device.type != 'cpu' # half precision only supported on CUDA
# Load model
w = str(weights[0] if isinstance(weights, list) else weights)
classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflitdefinedsave_dire', '.pb', '']
check_suffix(w, suffixes) # check weights have acceptable suffix
pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans
model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device)
stride = int(model.stride.max()) # model stride
names = model.module.names if hasattr(model, 'module') else model.names # get class names
if half:
model.half() # to FP16
if classify: # second-stage classifier
modelc = load_classifier(name='resnet50', n=2) # initialize
modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval()
imgsz = check_img_size(imgsz, s=stride) # check image size
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt)
bs = 1 # batch_size
vid_path, vid_writer = [None] * bs, [None] * bs
dt, seen = [0.0, 0.0, 0.0], 0
for path, img, im0s, vid_cap in dataset:
t1 = time_sync()
if onnx:
img = img.astype('float32')
else:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img = img / 255.0 # 0 - 255 to 0.0 - 1.0
if len(img.shape) == 3:
img = img[None] # expand for batch dim
t2 = time_sync()
dt[0] += t2 - t1
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
pred = model(img, augment=augment, visualize=visualize)[0]
t3 = time_sync()
dt[1] += t3 - t2
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
dt[2] += time_sync() - t3
for i, det in enumerate(pred): # per image
p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
# save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
#s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwhdefinedsave_dir
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
if len(det):
# Rescale boxes from img_size to im0 sizesonghun
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
x = int((det[:, -1] == c).sum())
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
dictValue[f"{names[int(c)]}"] = x
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or save_crop or view_img: # Add bbox to image
c = int(cls) # integer class
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]}')
annotator.box_label(xyxy, label, color=colors(c, True))
if save_crop:
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
im0 = annotator.result()
if view_img:
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
if save_img:
if dataset.mode == 'image':
cv2.imwrite(f"{project}/{imageNum}.png", im0)
print(dictValue)
return {'Cavity': 0, 'Gold': 0, 'Amalgam': 0, 'isCavity': "True"}
def todayDate():
dt_now = datetime.datetime.now(pytz.timezone('Asia/Seoul'))
return dt_now.date()
```
#### File: 97tuna/Flask_RoomHairCare/firebase.py
```python
from flask import Flask, json, request, jsonify
from flask_restful import Resource, Api
from flask_cors import CORS
from OpenSSL import SSL
import datetime as dt
import logging, ssl
import baseFunction
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
CORS(app)
api = Api(app)
# iOS앱에서 사용자 uid 및 사진 번호 Server로 POST
@app.route('/Auth', methods = ['POST'])
def Auth():
jsonData = request.get_json()
uid = jsonData["uid"] # 사용자 uid
imageNum = int(jsonData["numbers"]) # 사진 번호
isCavity = jsonData["isCavity"] # 충치 모델인지 치주 모델인지 선택하는 파라미터
datetime = baseFunction.todayDate() # 업로드된 날짜
checkResults = baseFunction.downloadImage(uid, imageNum, isCavity) # Firebase Storage에서 이미지 다운로드
if checkResults:
return {"status": "OK", "datetime": f"{datetime}"}
else :
return {"status": "Fail", "datetime": f"{datetime}"}
# iOS앱에서 사용자 uid 및 사진 번호 Server로 POST
@app.route('/Charts', methods = ['POST'])
def Charts():
jsonData = request.get_json()
uid = jsonData["uid"] # 사용자 uid
datetime = baseFunction.todayDate() # 업로드된 날짜
return baseFunction.makeChartsinDatabase(uid)
if __name__ == '__main__':
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
cert = '/workspace/Cert/cert.pem'
pkey = '/workspace/Cert/privkey.pem'
chainkey = '/workspace/Cert/fullca.pem'
context.load_verify_locations('/workspace/Cert/fullca.pem')
context.load_cert_chain(cert, pkey)
app.run(debug=False, host='192.168.10.6', port=6000, ssl_context=context)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.