content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a set of objects to represent different stages of a connection
to a Bokeh server.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from enum import Enum, auto
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'CONNECTED_BEFORE_ACK',
'CONNECTED_AFTER_ACK',
'DISCONNECTED',
'ErrorReason',
'NOT_YET_CONNECTED',
'WAITING_FOR_REPLY',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class ErrorReason(Enum):
NO_ERROR = auto()
HTTP_ERROR = auto()
NETWORK_ERROR = auto()
class NOT_YET_CONNECTED(object):
''' The ``ClientConnection`` is not yet connected.
'''
async def run(self, connection):
return await connection._connect_async()
class CONNECTED_BEFORE_ACK(object):
''' The ``ClientConnection`` connected to a Bokeh server, but has not yet
received an ACK from it.
'''
async def run(self, connection):
return await connection._wait_for_ack()
class CONNECTED_AFTER_ACK(object):
''' The ``ClientConnection`` connected to a Bokeh server, and has
received an ACK from it.
'''
async def run(self, connection):
return await connection._handle_messages()
class DISCONNECTED(object):
''' The ``ClientConnection`` was connected to a Bokeh server, but is
now disconnected.
'''
def __init__(self, reason=ErrorReason.NO_ERROR, error_code=None, error_detail=""):
''' Constructs a DISCONNECT-State with given reason (``ErrorReason``
enum), error id and additional information provided as string.
'''
self._error_code = error_code
self._error_detail = error_detail
self._error_reason = reason
@property
def error_reason(self):
''' The reason for the error encoded as an enumeration value.
'''
return self._error_reason
@property
def error_code(self):
''' Holds the error code, if any. None otherwise.
'''
return self._error_code
@property
def error_detail(self):
''' Holds the error message, if any. Empty string otherwise.
'''
return self._error_detail
async def run(self, connection):
return None
class WAITING_FOR_REPLY(object):
''' The ``ClientConnection`` has sent a message to the Bokeh Server which
should generate a paired reply, and is waiting for the reply.
'''
def __init__(self, reqid):
self._reqid = reqid
self._reply = None
@property
def reply(self):
''' The reply from the server. (``None`` until the reply arrives) '''
return self._reply
@property
def reqid(self):
''' The request ID of the originating message. '''
return self._reqid
async def run(self, connection):
message = await connection._pop_message()
if message is None:
return await connection._transition_to_disconnected()
elif 'reqid' in message.header and message.header['reqid'] == self.reqid:
self._reply = message
return await connection._transition(CONNECTED_AFTER_ACK())
else:
return await connection._next()
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 30.496732 | 86 | 0.476211 | [
"BSD-3-Clause"
] | lvcarlosja/bokeh | bokeh/client/states.py | 4,666 | Python |
import mxnet as mx
import proposal
import proposal_target
from rcnn.config import config
import focal_loss
eps = 2e-5
use_global_stats = True
workspace = 512
res_deps = {'50': (3, 4, 6, 3), '101': (3, 4, 23, 3), '152': (3, 8, 36, 3), '200': (3, 24, 36, 3)}
units = res_deps['101']
filter_list = [256, 512, 1024, 2048]
def residual_unit(data, num_filter, stride, dim_match, name):
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_sc')
sum = mx.sym.ElementWiseSum(*[conv3, shortcut], name=name + '_plus')
return sum
def get_resnet_conv(data):
# res1
data_bn = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='bn_data')
conv0 = mx.sym.Convolution(data=data_bn, num_filter=64, kernel=(7, 7), stride=(2, 2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
bn0 = mx.sym.BatchNorm(data=conv0, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn0')
relu0 = mx.sym.Activation(data=bn0, act_type='relu', name='relu0')
pool0 = mx.symbol.Pooling(data=relu0, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name='pool0')
# res2
unit = residual_unit(data=pool0, num_filter=filter_list[0], stride=(1, 1), dim_match=False, name='stage1_unit1')
for i in range(2, units[0] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[0], stride=(1, 1), dim_match=True, name='stage1_unit%s' % i)
# res3
unit = residual_unit(data=unit, num_filter=filter_list[1], stride=(2, 2), dim_match=False, name='stage2_unit1')
for i in range(2, units[1] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[1], stride=(1, 1), dim_match=True, name='stage2_unit%s' % i)
# res4
unit = residual_unit(data=unit, num_filter=filter_list[2], stride=(2, 2), dim_match=False, name='stage3_unit1')
for i in range(2, units[2] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[2], stride=(1, 1), dim_match=True, name='stage3_unit%s' % i)
return unit
def get_resnet_train(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
gt_boxes = mx.symbol.Variable(name="gt_boxes")
rpn_label = mx.symbol.Variable(name='label')
rpn_bbox_target = mx.symbol.Variable(name='bbox_target')
rpn_bbox_weight = mx.symbol.Variable(name='bbox_weight')
# shared convolutional layers
conv_feat = get_resnet_conv(data)
# RPN layers
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
# classification
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob")
# bounding box regression
rpn_bbox_loss_ = rpn_bbox_weight * mx.symbol.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / config.TRAIN.RPN_BATCH_SIZE)
# ROI proposal
rpn_cls_act = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_act")
rpn_cls_act_reshape = mx.symbol.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
if config.TRAIN.CXX_PROPOSAL:
rois = mx.contrib.symbol.Proposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
# ROI proposal target
gt_boxes_reshape = mx.symbol.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
group = mx.symbol.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target',
num_classes=num_classes, batch_images=config.TRAIN.BATCH_IMAGES,
batch_rois=config.TRAIN.BATCH_ROIS, fg_fraction=config.TRAIN.FG_FRACTION)
rois = group[0]
label = group[1]
bbox_target = group[2]
bbox_weight = group[3]
# Fast R-CNN
roi_pool = mx.symbol.ROIPooling(
name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(14, 14), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
# res5
unit = residual_unit(data=roi_pool, num_filter=filter_list[3], stride=(2, 2), dim_match=False, name='stage4_unit1')
for i in range(2, units[3] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[3], stride=(1, 1), dim_match=True, name='stage4_unit%s' % i)
bn1 = mx.sym.BatchNorm(data=unit, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4)
if config.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, \
num_reg_classes=num_classes, roi_per_img=config.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, \
bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, \
normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, \
data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS_OHEM)
elif config.TRAIN.ENABLE_FOCALLOSS:
#cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
cls_prob = mx.sym.Custom(op_type='FocalLoss', name='cls_prob', data=cls_score, labels=label, gamma= 2,alpha = 0.25)
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS)
else:
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS)
# reshape output
label = mx.symbol.Reshape(data=label, shape=(config.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.symbol.Reshape(data=bbox_loss, shape=(config.TRAIN.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_loss_reshape')
group = mx.symbol.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.symbol.BlockGrad(label)])
return group
def get_resnet_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
# shared convolutional layers
conv_feat = get_resnet_conv(data)
# RPN
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# ROI Proposal
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.symbol.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if config.TEST.CXX_PROPOSAL:
rois = mx.contrib.symbol.Proposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
# Fast R-CNN
roi_pool = mx.symbol.ROIPooling(
name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(14, 14), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
# res5
unit = residual_unit(data=roi_pool, num_filter=filter_list[3], stride=(2, 2), dim_match=False, name='stage4_unit1')
for i in range(2, units[3] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[3], stride=(1, 1), dim_match=True, name='stage4_unit%s' % i)
bn1 = mx.sym.BatchNorm(data=unit, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes)
cls_prob = mx.symbol.softmax(name='cls_prob', data=cls_score)
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(config.TEST.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_pred_reshape')
# group output
group = mx.symbol.Group([rois, cls_prob, bbox_pred])
return group
| 59 | 133 | 0.696163 | [
"Apache-2.0"
] | angelfish91/Faster-RCNN-MXnet011 | rcnn/symbol/symbol_resnet_modify.py | 13,629 | Python |
# -*- coding: utf-8 -*-
"""Subspace Outlier Detection (SOD)
"""
# Author: Yahya Almardeny <[email protected]>
# License: BSD 2 clause
import numpy as np
import numba as nb
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_array
from ..utils.utility import check_parameter
from .base import BaseDetector
@nb.njit(parallel=True)
def _snn_imp(ind, ref_set_):
"""Internal function for fast snn calculation
Parameters
----------
ind : int
Indices return by kNN.
ref_set_ : int, optional (default=10)
specifies the number of shared nearest neighbors to create the
reference set. Note that ref_set must be smaller than n_neighbors.
"""
n = ind.shape[0]
_count = np.zeros(shape=(n, ref_set_), dtype=np.uint32)
for i in nb.prange(n):
temp = np.empty(n, dtype=np.uint32)
test_element_set = set(ind[i])
for j in nb.prange(n):
temp[j] = len(set(ind[j]).intersection(test_element_set))
temp[i] = np.iinfo(np.uint32).max
_count[i] = np.argsort(temp)[::-1][1:ref_set_ + 1]
return _count
class SOD(BaseDetector):
"""Subspace outlier detection (SOD) schema aims to detect outlier in
varying subspaces of a high dimensional feature space. For each data
object, SOD explores the axis-parallel subspace spanned by the data
object's neighbors and determines how much the object deviates from the
neighbors in this subspace.
See :cite:`kriegel2009outlier` for details.
Parameters
----------
n_neighbors : int, optional (default=20)
Number of neighbors to use by default for k neighbors queries.
ref_set: int, optional (default=10)
specifies the number of shared nearest neighbors to create the
reference set. Note that ref_set must be smaller than n_neighbors.
alpha: float in (0., 1.), optional (default=0.8)
specifies the lower limit for selecting subspace.
0.8 is set as default as suggested in the original paper.
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e.
the proportion of outliers in the data set. Used when fitting to
define the threshold on the decision function.
Attributes
----------
decision_scores_ : numpy array of shape (n_samples,)
The outlier scores of the training data.
The higher, the more abnormal. Outliers tend to have higher
scores. This value is available once the detector is
fitted.
threshold_ : float
The threshold is based on ``contamination``. It is the
``n_samples * contamination`` most abnormal samples in
``decision_scores_``. The threshold is calculated for generating
binary outlier labels.
labels_ : int, either 0 or 1
The binary labels of the training data. 0 stands for inliers
and 1 for outliers/anomalies. It is generated by applying
``threshold_`` on ``decision_scores_``.
"""
def __init__(self, contamination=0.1, n_neighbors=20, ref_set=10,
alpha=0.8):
super(SOD, self).__init__(contamination=contamination)
if isinstance(n_neighbors, int):
check_parameter(n_neighbors, low=1, param_name='n_neighbors')
else:
raise ValueError(
"n_neighbors should be int. Got %s" % type(n_neighbors))
if isinstance(ref_set, int):
check_parameter(ref_set, low=1, high=n_neighbors,
param_name='ref_set')
else:
raise ValueError("ref_set should be int. Got %s" % type(ref_set))
if isinstance(alpha, float):
check_parameter(alpha, low=0.0, high=1.0, param_name='alpha')
else:
raise ValueError("alpha should be float. Got %s" % type(alpha))
self.n_neighbors_ = n_neighbors
self.ref_set_ = ref_set
self.alpha_ = alpha
self.decision_scores_ = None
def fit(self, X, y=None):
"""Fit detector. y is ignored in unsupervised methods.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
# validate inputs X and y (optional)
X = check_array(X)
self._set_n_classes(y)
self.decision_scores_ = self.decision_function(X)
self._process_decision_scores()
return self
def decision_function(self, X):
"""Predict raw anomaly score of X using the fitted detector.
The anomaly score of an input sample is computed based on different
detector algorithms. For consistency, outliers are assigned with
larger anomaly scores.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only
if they are supported by the base estimator.
Returns
-------
anomaly_scores : numpy array of shape (n_samples,)
The anomaly score of the input samples.
"""
return self._sod(X)
def _snn(self, X):
"""This function is called internally to calculate the shared nearest
neighbors (SNN). SNN is reported to be more robust than k nearest
neighbors.
Returns
-------
snn_indices : numpy array of shape (n_shared_nearest_neighbors,)
The indices of top k shared nearest neighbors for each observation.
"""
knn = NearestNeighbors(n_neighbors=self.n_neighbors_)
knn.fit(X)
# Get the knn index
ind = knn.kneighbors(return_distance=False)
return _snn_imp(ind, self.ref_set_)
def _sod(self, X):
"""This function is called internally to perform subspace outlier
detection algorithm.
Returns
-------
anomaly_scores : numpy array of shape (n_samples,)
The anomaly score of the input samples.
"""
ref_inds = self._snn(X)
anomaly_scores = np.zeros(shape=(X.shape[0],))
for i in range(X.shape[0]):
obs = X[i]
ref = X[ref_inds[i,],]
means = np.mean(ref, axis=0) # mean of each column
# average squared distance of the reference to the mean
var_total = np.sum(np.sum(np.square(ref - means))) / self.ref_set_
var_expect = self.alpha_ * var_total / X.shape[1]
var_actual = np.var(ref, axis=0) # variance of each attribute
var_inds = [1 if (j < var_expect) else 0 for j in var_actual]
rel_dim = np.sum(var_inds)
if rel_dim != 0:
anomaly_scores[i] = np.sqrt(
np.dot(var_inds, np.square(obs - means)) / rel_dim)
return anomaly_scores
| 35.164179 | 79 | 0.621817 | [
"BSD-2-Clause"
] | BillyGareth/pyod | pyod/models/sod.py | 7,068 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from unittestzero import Assert
from pages.dashboard import DashboardPage
class TestProductFilter(object):
@pytest.mark.nondestructive
def test_feedback_can_be_filtered_by_all_products_and_versions(self, mozwebqa):
"""Tests product filtering in dashboard
1. Verify that at least one product exists
2. Verify that filtering by product returns results
3. Verify that versions show up when you choose a product
4. Verify that the state of the filters are correct after being applied
5. Verify product and version values in the URL
NB: We don't cycle through all product/version
combinations--only the first two of each.
"""
dashboard_pg = DashboardPage(mozwebqa)
dashboard_pg.go_to_dashboard_page()
total_messages = dashboard_pg.total_message_count
products = dashboard_pg.product_filter.products
Assert.greater(len(products), 0)
for product in products[:2]:
if not product:
# If it's the "unknown" product, just skip it.
continue
dashboard_pg.product_filter.select_product(product)
Assert.greater(total_messages, dashboard_pg.total_message_count)
versions = dashboard_pg.product_filter.versions
Assert.greater(len(versions), 0)
for version in versions[:2]:
if not version:
# If it's the "unknown" version, just skip it.
continue
dashboard_pg.product_filter.select_version(version)
Assert.greater(total_messages, dashboard_pg.total_message_count)
Assert.equal(dashboard_pg.product_filter.selected_product, product)
Assert.equal(dashboard_pg.product_filter.selected_version, version)
Assert.equal(dashboard_pg.product_from_url, product)
Assert.equal(dashboard_pg.version_from_url, version)
Assert.greater(len(dashboard_pg.messages), 0)
dashboard_pg.product_filter.unselect_version(version)
dashboard_pg.product_filter.unselect_product(product)
| 39.295082 | 83 | 0.673759 | [
"BSD-3-Clause"
] | ANKIT-KS/fjord | smoketests/tests/dashboard/test_product_filter.py | 2,397 | Python |
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class thepetexpress_spider(BaseSpider):
name = 'thepetexpress.co.uk'
allowed_domains = ['thepetexpress.co.uk', 'www.thepetexpress.co.uk']
start_urls = ('http://www.thepetexpress.co.uk/',)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
categories = hxs.select(u'//nav[@class="cat"]/ul/li/ul/li/a/@href').extract()
for url in categories:
url = urljoin_rfc(get_base_url(response), url + u'?sort=titledesc')
yield Request(url)
# pagination
next_page = hxs.select(u'//a[@class="nxt"]/@href').extract()
if next_page:
next_page = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(next_page)
# products
products = hxs.select(u'//div[@class="products"]//a/@href').extract()
for url in products:
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, callback=self.parse_product)
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
loader = ProductLoader(item=Product(), response=response)
loader.add_value('url', response.url)
loader.add_xpath('name', u'//div[@id="product"]/h1/text()')
loader.add_xpath('price', u'//p[@class="price"]/span[@class="our_price"]/text()')
if loader.get_output_value('price'):
yield loader.load_item() | 35.526316 | 89 | 0.668148 | [
"Apache-2.0"
] | 0--key/lib | portfolio/Python/scrapy/seapets/thepetexpress.py | 2,025 | Python |
from CtCI_Custom_Classes.stack import Stack
class SetOfStacks:
def __init__(self, capacity):
self.capacity = capacity
self.stacks = []
def get_last_stack(self):
if not self.stacks:
return None
return self.stacks[-1]
def is_empty(self):
last = self.get_last_stack()
return not last or last.is_empty()
def pop(self):
last = self.get_last_stack()
if not last:
return None
v = last.pop()
if last.get_size() == 0:
del self.stacks[-1]
return v
def push(self, data):
last = self.get_last_stack()
if last and not last.is_full():
last.push(data)
else:
stack = Stack(self.capacity)
stack.push(data)
self.stacks.append(stack)
| 23.914286 | 43 | 0.551971 | [
"MIT"
] | enyquist/Cracking_the_Coding_Interview | CtCI_custom_classes/overflow_stack.py | 837 | Python |
# coding: utf-8
"""
Ory Kratos
Welcome to the ORY Kratos HTTP API documentation! # noqa: E501
The version of the OpenAPI document: latest
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import ory_kratos_client
from ory_kratos_client.models.request_method_config import RequestMethodConfig # noqa: E501
from ory_kratos_client.rest import ApiException
class TestRequestMethodConfig(unittest.TestCase):
"""RequestMethodConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test RequestMethodConfig
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = ory_kratos_client.models.request_method_config.RequestMethodConfig() # noqa: E501
if include_optional :
return RequestMethodConfig(
action = '0',
errors = [
ory_kratos_client.models.error.Error(
message = '0', )
],
fields = [
ory_kratos_client.models.form_field.formField(
disabled = True,
errors = [
ory_kratos_client.models.error.Error(
message = '0', )
],
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
],
method = '0'
)
else :
return RequestMethodConfig(
action = '0',
fields = [
ory_kratos_client.models.form_field.formField(
disabled = True,
errors = [
ory_kratos_client.models.error.Error(
message = '0', )
],
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
],
method = '0',
)
def testRequestMethodConfig(self):
"""Test RequestMethodConfig"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 32.627907 | 100 | 0.497149 | [
"Apache-2.0"
] | Marcuzz/sdk | clients/kratos/python/test/test_request_method_config.py | 2,806 | Python |
# -*- coding: utf-8 -*-
"""
mslib.mscolab._tests.test_utils
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tests for mscolab/utils
This file is part of mss.
:copyright: Copyright 2019 Shivashis Padhi
:copyright: Copyright 2019-2020 by the mss team, see AUTHORS.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mslib.mscolab.server import db, APP, initialize_managers
from mslib.mscolab.models import User
from mslib.mscolab.utils import get_recent_pid
from mslib.mscolab.conf import mscolab_settings
from mslib.mscolab.mscolab import handle_db_seed
class Test_Utils(object):
def setup(self):
handle_db_seed()
self.app = APP
self.app.config['SQLALCHEMY_DATABASE_URI'] = mscolab_settings.SQLALCHEMY_DB_URI
self.app.config['MSCOLAB_DATA_DIR'] = mscolab_settings.MSCOLAB_DATA_DIR
self.app.config['UPLOAD_FOLDER'] = mscolab_settings.UPLOAD_FOLDER
self.app, _, cm, fm = initialize_managers(self.app)
self.fm = fm
self.cm = cm
db.init_app(self.app)
with self.app.app_context():
self.user = User.query.filter_by(id=8).first()
def test_get_recent_pid(self):
with self.app.app_context():
p_id = get_recent_pid(self.fm, self.user)
assert p_id == 4
def teardown(self):
pass
| 33.509091 | 87 | 0.686923 | [
"Apache-2.0"
] | gisi90/MSS | mslib/mscolab/_tests/test_utils.py | 1,843 | Python |
import os
import numpy as np
import tensorflow as tf
from collections import deque
def sample(logits):
noise = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(noise)), 1)
def cat_entropy(logits):
a0 = logits - tf.reduce_max(logits, 1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, 1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1)
def cat_entropy_softmax(p0):
return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1)
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False):
if data_format == 'NHWC':
channel_ax = 3
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, nf]
elif data_format == 'NCHW':
channel_ax = 1
strides = [1, 1, stride, stride]
bshape = [1, nf, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
nin = x.get_shape()[channel_ax].value
wshape = [rf, rf, nin, nf]
with tf.variable_scope(scope):
w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
x = (x-u)/tf.sqrt(s+e)
x = x*g+b
return x
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def conv_to_fc(x):
nh = np.prod([v.value for v in x.get_shape()[1:]])
x = tf.reshape(x, [-1, nh])
return x
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done) # fixed off by one bug
discounted.append(r)
return discounted[::-1]
def find_trainable_variables(key):
return tf.trainable_variables(key)
def make_path(f):
return os.makedirs(f, exist_ok=True)
def constant(p):
return 1
def linear(p):
return 1-p
def middle_drop(p):
eps = 0.75
if 1-p<eps:
return eps*0.1
return 1-p
def double_linear_con(p):
p *= 2
eps = 0.125
if 1-p<eps:
return eps
return 1-p
def double_middle_drop(p):
eps1 = 0.75
eps2 = 0.25
if 1-p<eps1:
if 1-p<eps2:
return eps2*0.5
return eps1*0.1
return 1-p
schedules = {
'linear':linear,
'constant':constant,
'double_linear_con': double_linear_con,
'middle_drop': middle_drop,
'double_middle_drop': double_middle_drop
}
class Scheduler(object):
def __init__(self, v, nvalues, schedule):
self.n = 0.
self.v = v
self.nvalues = nvalues
self.schedule = schedules[schedule]
def value(self):
current_value = self.v*self.schedule(self.n/self.nvalues)
self.n += 1.
return current_value
def value_steps(self, steps):
return self.v*self.schedule(steps/self.nvalues)
class EpisodeStats:
def __init__(self, nsteps, nenvs):
self.episode_rewards = []
for i in range(nenvs):
self.episode_rewards.append([])
self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
self.nsteps = nsteps
self.nenvs = nenvs
def feed(self, rewards, masks):
rewards = np.reshape(rewards, [self.nenvs, self.nsteps])
masks = np.reshape(masks, [self.nenvs, self.nsteps])
for i in range(0, self.nenvs):
for j in range(0, self.nsteps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
l = len(self.episode_rewards[i])
s = sum(self.episode_rewards[i])
self.lenbuffer.append(l)
self.rewbuffer.append(s)
self.episode_rewards[i] = []
def mean_length(self):
if self.lenbuffer:
return np.mean(self.lenbuffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
# For ACER
def get_by_index(x, idx):
assert(len(x.get_shape()) == 2)
assert(len(idx.get_shape()) == 1)
idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx
y = tf.gather(tf.reshape(x, [-1]), # flatten input
idx_flattened) # use flattened indices
return y
def check_shape(ts,shapes):
i = 0
for (t,shape) in zip(ts,shapes):
assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape)
i += 1
def avg_norm(t):
return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1)))
def gradient_add(g1, g2, param):
print([g1, g2, param.name])
assert (not (g1 is None and g2 is None)), param.name
if g1 is None:
return g2
elif g2 is None:
return g1
else:
return g1 + g2
def q_explained_variance(qpred, q):
_, vary = tf.nn.moments(q, axes=[0, 1])
_, varpred = tf.nn.moments(q - qpred, axes=[0, 1])
check_shape([vary, varpred], [[]] * 2)
return 1.0 - (varpred / vary)
| 32.616725 | 107 | 0.587651 | [
"MIT"
] | zeuseyera/baselines-kr | baselines/a2c/utils.py | 9,361 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v2beta1_horizontal_pod_autoscaler import V2beta1HorizontalPodAutoscaler
class TestV2beta1HorizontalPodAutoscaler(unittest.TestCase):
""" V2beta1HorizontalPodAutoscaler unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV2beta1HorizontalPodAutoscaler(self):
"""
Test V2beta1HorizontalPodAutoscaler
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v2beta1_horizontal_pod_autoscaler.V2beta1HorizontalPodAutoscaler()
pass
if __name__ == '__main__':
unittest.main()
| 23.888889 | 108 | 0.739535 | [
"Apache-2.0"
] | TokkoLabs/client-python | kubernetes/test/test_v2beta1_horizontal_pod_autoscaler.py | 1,075 | Python |
# Generated by Django 3.0.8 on 2021-07-07 22:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('keyword_relation', '0010_auto_20210322_2049'),
]
operations = [
migrations.CreateModel(
name='Keyword_Grouping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('keyword', models.CharField(max_length=512)),
('group', models.IntegerField(default=-1)),
],
),
]
| 27.090909 | 114 | 0.588926 | [
"MIT"
] | rohanjsuresh/extracted_keyword_validation | keyword_relation/migrations/0011_keyword_grouping.py | 596 | Python |
import tensorflow as tf
class FrozenBatchNorm2D(tf.keras.layers.Layer):
def __init__(self, eps=1e-5, **kwargs):
super().__init__(**kwargs)
self.eps = eps
def build(self, input_shape):
self.weight = self.add_weight(name='weight', shape=[input_shape[-1]],
initializer='zeros', trainable=False)
self.bias = self.add_weight(name='bias', shape=[input_shape[-1]],
initializer='zeros', trainable=False)
self.running_mean = self.add_weight(name='running_mean',
shape=[input_shape[-1]],
initializer='zeros',
trainable=False)
self.running_var = self.add_weight(name='running_var',
shape=[input_shape[-1]],
initializer='ones',
trainable=False)
def call(self, x):
scale = self.weight * tf.math.rsqrt(self.running_var + self.eps)
shift = self.bias - self.running_mean * scale
return x * scale + shift
def compute_output_shape(self, input_shape):
return input_shape
class Linear(tf.keras.layers.Layer):
'''
Use this custom layer instead of tf.keras.layers.Dense
to allow loading converted PyTorch Dense weights
that have shape (output_dim, input_dim)
'''
def __init__(self, output_dim, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dim
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=[self.output_dim, input_shape[-1]],
initializer='zeros', trainable=True)
self.bias = self.add_weight(name='bias',
shape=[self.output_dim],
initializer='zeros', trainable=True)
def call(self, x):
return tf.matmul(x, self.kernel, transpose_b=True) + self.bias
def compute_output_shape(self, input_shape):
return input_shape.as_list()[:-1] + [self.output_dim]
class FixedEmbedding(tf.keras.layers.Layer):
def __init__(self, embed_shape, **kwargs):
super().__init__(**kwargs)
self.embed_shape = embed_shape
def build(self, input_shape):
self.w = self.add_weight(name='kernel', shape=self.embed_shape,
initializer='zeros', trainable=True)
def call(self, x=None):
return self.w
| 38.911765 | 79 | 0.544974 | [
"MIT"
] | Leonardo-Blanger/detr_tensorflow | detr_tensorflow/models/custom_layers.py | 2,646 | Python |
import ClientSide2 #custom package
import numpy as np
import argparse
import json
import os
import ClassifierFunctions2 as cf
import random
import logging
from matplotlib import pyplot as plt
from builtins import input
from Notation import SpaceGroupsDict as spgs
SpGr = spgs.spacegroups()
from itertools import combinations,chain
# Initialize essential global variables
#URL = "" #you'll need me to send you the link
FAMILIES = ["triclinic","monoclinic","orthorhombic","tetragonal",
"trigonal","hexagonal","cubic"]
DEFAULT_SESSION = os.path.join ("Sessions","session.json")
DEFAULT_USER = "user_profile.json"
SERVER_INFO = "server_gen2.json"
# list of three, one per level
prediction_per_level = [1, 1, 2]
num_peaks = [1, 5]
DEFAULT_FILTER_SETTINGS = { "max_numpeaks": 75,
"dspace_range" : [0.5,6],
"peak_threshold": 0.7,
"filter_size" : 15,
"passes" : 2
}
def build_parser():
parser = argparse.ArgumentParser()
# This will be implemented as rollout broadens
parser.add_argument('--apikey', type=str,
dest='key', help='api key to securely access service',
metavar='KEY', required=False)
parser.add_argument('--session',
dest='session', help='Keep user preferences for multirun sessions', metavar='SESSION',required=False, default=None)
parser.add_argument('--subset',
dest='subset',help='Run a small number of the possible combinations. Mostly for testing. Input the number of combos to run.', metavar='NO_OF_COMBOS',required=False, default=None)
parser.add_argument('--dataonly',
dest='data_only',help='run the classification without plotting', metavar='True/[False]',required=False, default=False)
parser.add_argument('--figuresonly',
dest='figures_only',help='Plot the figures without running data. Data must be saved previously.', metavar='True/[False]',required=False, default=False)
return parser
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def combination_peaks(peak_batch, chem_vec, mode, temp_name, crystal_family, user_info, URL, prediction_per_level, subset, num_peaks):
outpath = "Ready"
if not os.path.exists(outpath):
os.makedirs(outpath)
find_valid_peaks = list(powerset(peak_batch["vec"]))
find_valid_peaks = [item for item in find_valid_peaks if len(item) > num_peaks[0] and len(item) < num_peaks[1]]
print(len(find_valid_peaks),"valid peak combinations")
valid_peaks_combinations = [{"vec":proto_combo} for proto_combo in find_valid_peaks]
found = False
threshold = 0
tot_spec = 1
for p in prediction_per_level:
tot_spec *= p
guesses = {"num_pred": tot_spec}
for k in range(1,tot_spec+1):
guesses["species_"+str(k)]=[]
guesses["spec_confidence_"+str(k)]=[]
# print(guesses)
common_peaks = []
failed_combos = valid_peaks_combinations
#peak_locs,user_info,URL,fam
persistance = 0
LIMIT = 3
# print(failed_combos)
if subset >0 and subset<len(failed_combos):
failed_combos = random.sample(failed_combos, subset)
print("using ", len(failed_combos)," peak combinations")
while len(failed_combos) > 0 and persistance < LIMIT:
for combo in failed_combos:
try:
# print('---classifying---')
# print(combo)
classificated = ClientSide2.Send_For_Classification(combo, chem_vec, mode, crystal_family, user_info, URL, prediction_per_level)
print(classificated)
classificated["file_name"] = temp_name
# print('name =')
# print(temp_name)
print(os.path.join(outpath,temp_name))
cf.write_to_csv(os.path.join(outpath,temp_name) + ".csv", classificated, prediction_per_level)
print(tot_spec)
for k in range(1,tot_spec+1):
print(guesses)
guesses['species_'+str(k)].append( classificated["species_"+str(k)] )
guesses['spec_confidence_'+str(k)].append( classificated["spec_confidence_"+str(k)] )
common_peaks.append(classificated["peaks"])
# remove the classified combination
failed_combos.remove(combo)
except KeyboardInterrupt:
raise
except:
print("An error occured this combination was not classified.\nIt will be retried {} more times".format(LIMIT-persistance))
persistance += 1
if len(failed_combos)>0:
print("there were {} failed combinations".format(len(failed_combos)))
print('returning')
return common_peaks, guesses
def make_figures(guesses,crystal_family,froot):
if crystal_family:
lower_gen = SpGr.edges["genus"][crystal_family][0]
upper_gen = SpGr.edges["genus"][crystal_family][1]
else:
lower_gen = SpGr.edges["genus"][FAMILIES[0]][0]
upper_gen = SpGr.edges["genus"][FAMILIES[-1]][1]
fam_range = range(SpGr.edges["species"][lower_gen][0],1+SpGr.edges["species"][upper_gen][1])
# phi = 2*np.pi/360
fig_ang = 300
phi = (2*np.pi*fig_ang/360)/(max(fam_range)-min(fam_range)+1)
thet = fig_ang/(max(fam_range)-min(fam_range)+1)
fam_axes = [1,3,16,75,143,168,195]
# fig1 = plt.figure(1,figsize=(len(fam_range),16))
fig1 = plt.figure(2,figsize=(16,8))
plt.clf()
ax1 = fig1.add_axes([0.03,0.1,.96,.8])
# ax1.set_yscale('log')
fam_color = ['k','g','b','c','m','y','k']
for k in range(len(fam_axes)-1):
ax1.axvspan(fam_axes[k]-0.5,fam_axes[k+1]-0.5,facecolor = fam_color[k], alpha=0.5)
# ax1.axvspan(fam_axes[0],fam_axes[1]-1,alpha=0.5)
ax1.axvspan(fam_axes[-1]-0.5,np.max(fam_range)-0.5,alpha=0.3)
plt.ion
fig2 = plt.figure(3,figsize=(8,8))
plt.clf()
plt.ion
ax2 = fig2.add_axes([0.1,0.1,0.8,0.8],polar=True)
ax2.set_thetamin(1)
ax2.set_rmin(0)
ax2.set_thetamax(fig_ang)
ax2.set_rlabel_position(30)
ax2.set_theta_direction(-1)
ax2.set_theta_zero_location("S",offset=-(360-fig_ang)/2)
# ax2.set_rscale('log')
prev_histograms_1 = []
prev_histograms_2 = []
plots_1 = []
plots_2 = []
# print('guesses = ')
# print(guesses)
num_pred = np.prod(prediction_per_level)
for rank in range(1,num_pred+1):
histo = np.histogram([g for g in guesses["species_{}".format(rank)]], weights = [g for g in guesses["spec_confidence_{}".format(rank)]], bins = np.arange(min(fam_range)-0.5, max(fam_range)+1.5))
histo_log = np.array([np.log10(float(h))+1 if h>0 else 0 for h in histo[0]])
# print('log_histo = ')
# print(histo_log.tolist())
if rank > 1:
plt.figure(2)
plot_1 = plt.bar(fam_range, histo[0], bottom = np.sum(np.vstack(prev_histograms_1), axis=0), align="center", width = 1.1)
plt.figure(3)
sum_hist = np.sum(np.vstack(prev_histograms_1), axis=0)
log_sum = np.array([np.log10(float(h))-1 if h>0 else -1 for h in sum_hist])
# print('log_sum = ')
# print(log_sum.tolist())
plot_2 = plt.bar([f*phi for f in fam_range], histo_log, bottom = log_sum, align="center", width = phi)
else:
plt.figure(2)
plot_1 = plt.bar(fam_range, histo[0], align="center", color='red', width = 1.1)
plt.figure(3)
plot_2 = plt.bar([f*phi for f in fam_range], histo_log, bottom = -1, align="center", color='red', width = phi)
plots_1.append(plot_1)
plots_2.append(plot_2)
plt.figure(2)
plt.yticks(rotation='vertical')
plt.xticks(fam_range,rotation='vertical')
prev_histograms_1.append(histo[0])
prev_histograms_2.append(histo[0])
# plt.figure(3)
# ax2.set_xticks(histo[1][:-1])
plt.figure(2)
# ym = ax1.get_ymax()*.9
r_max = 0
for rect in plot_1:
n_max = rect.get_height()+rect.get_y()
if n_max>r_max:
r_max = n_max
for k in range(len(FAMILIES)-1):
if k ==0:
ym_t = r_max*0.7
cent = 'left'
else:
ym_t = r_max*0.6
cent = 'center'
ax1.text((fam_axes[k+1]+fam_axes[k])/2,ym_t, FAMILIES[k], horizontalalignment=cent)
ax1.text((fam_axes[-1]+np.max(fam_range))/2,ym_t, FAMILIES[-1], horizontalalignment='center')
ax1.autoscale(enable=True, axis='x', tight=True)
ax1.tick_params(axis='x', which='major', labelsize=6)
plt.xlabel("Prediction",fontsize=10)
plt.ylabel("Counts",fontsize=10)
# plt.legend(plots,("species_1","species_2","species_3","species_4"))
leg_list = [ "species_{}".format(k+1) for k in range(num_pred) ]
plt.legend(plots_1,leg_list)
print("Results/"+froot+"_gen2.png")
plt.savefig("Results/"+froot+"_gen2.png",dpi = 300)
plt.figure(3)
# plt.xlabel("Prediction",fontsize=10,rotation='vertical')
# plt.ylabel("Counts",fontsize=10)
r_ticks = list(range(int(np.floor(ax2.get_rmin())),int(np.ceil(ax2.get_rmax())+1)))
ax2.set_rgrids(r_ticks, labels = ['10e'+str(r) for r in r_ticks])
ax2.set_thetagrids([f*thet for f in fam_axes],labels = FAMILIES)
plt.legend(plots_2,leg_list)
# plt.legend(plots,("species_1","species_2","species_3","species_4"))
# print("Results/"+froot+"_gen2_polar.png")
# plt.savefig("Results/"+froot+"_gen2_polar.png",dpi = 300)
# plt.show()
def main():
parser = build_parser()
options = parser.parse_args()
if options.subset:
subset = int(options.subset)
else:
subset = -1
print(options.session)
# opens the user specified session
if options.session:
with open(os.path.join("Sessions",options.session),'r') as f:
session = json.load(f)
# opens the default session
else:
with open(DEFAULT_SESSION,'r') as f:
session = json.load(f)
# set variables from loaded session data
# print(session)
file_path = session["file_path"]
if "output_file" in session:
output_file = session["output_file"]
else:
output_file = ''
if "output_file_root" in session:
output_file_root = session["output_file_root"]
else:
output_file_root = ''
if not (output_file or output_file_root):
raise ValueError('output_file or output_file_root must be defined in session file.')
manual_peak_selection = session["manual_peak_selection"]
known_family = session["known_family"]
chemistry = session["chemistry"]
diffraction = session["diffraction"]
print('file inputs')
print(output_file)
print(output_file_root)
mode = ""
if diffraction:
if chemistry:
mode="DiffChem"
else:
mode="DiffOnly"
else:
if chemistry:
raise ValueError('Running chemistry only predictions is currently not implemented')
else:
raise ValueError('Invalid prediction type. Either diffraction or chemistry must be enabled')
if known_family and known_family=='yes':
print('known family')
crystal_family = session["crystal_family"]
prediction_per_level[0] = 1
else:
crystal_family = None
# Load user from provided path, [IN PROGRESS]
if session["user_info"]:
with open(session["user_info"],'r') as f:
user_info = json.load(f)
else:
with open(DEFAULT_USER,'r') as f:
user_info = json.load(f)
with open(session["server_info"],'r') as f:
server_info = json.load(f)
if server_info['URL']:
url = server_info['URL']
else:
raise ValueError('you need to have the server URL provided to you')
chem_vec = cf.check_for_chemistry(session)
print(file_path)
print('---starting loop--')
# Determine if the path is a directory or a file
if os.path.isdir(file_path):
print("loading files from directory")
file_paths = []
for dirpath,dirnames,fpath in os.walk(file_path):
for path in fpath:
if not path[0] == '.':
file_paths.append(os.path.join(dirpath,path))
print("found {} files to load.".format(len(file_paths)))
else:
file_paths = [file_path]
if not os.path.exists("Results"):
os.makedirs("Results")
print(file_paths)
for f_path in file_paths:
# Load Data from specified file (DM3, TIFF, CSV etc....)
print("loading data from {}".format(f_path))
image_data,scale = ClientSide2.Load_Profile(f_path)
print("I successfully loaded the data")
# print(scale)
print(options.figures_only)
print(options.data_only)
# difining filepaths here to facilitate loading data.
froot = os.path.splitext(os.path.basename(f_path))[0]
if output_file_root:
outfile = 'Results/'+output_file_root+froot+'.json'
outfile_2 = 'Results/'+output_file_root+froot+'_peaks.json'
else:
output_file_root='' #for the figure filenames
[outroot, ext] = os.path.splitext(output_file)
if not ext=='.json':
output_file = outroot+'.json'
output_file_2 = outroot+'_peaks.json'
outfile = 'Results/'+output_file
outfile_2 = 'Results/'+output_file_2
# optional skipping the data creation
if options.figures_only:
print('Only creating figures')
with open(outfile, 'r') as fp:
guesses = json.load(fp)
else:
if diffraction:
peak_locs,peaks_h = ClientSide2.Find_Peaks(image_data, scale, **FILTER_SETTINGS)
# Choose which peaks to classify on
if manual_peak_selection:
peak_locs = cf.choose_peaks(peak_locs,peaks_h)
#raise NotImplementedError
else:
peak_locs = []
peaks_h = []
# Script hangs when there are too many peaks.
# TODO: implement something better.
if len(peak_locs['d_spacing'])>25:
print("\n\n======================================================")
print("there are "+ str(len(peak_locs['d_spacing']))+" peaks, which is too many.")
print(f_path)
print("======================================================\n\n")
continue
common_peaks,guesses = combination_peaks(peak_locs, chem_vec, mode, froot, crystal_family, user_info, url, prediction_per_level, subset, num_peaks)
# print("--- peak_locs ---")
# print(peak_locs)
guesses["pk_d_spacing"] = peak_locs["d_spacing"].tolist()
guesses["pk_vec"] = peak_locs["vec"]
print(guesses)
# save data
with open(outfile, 'w') as fp:
json.dump(guesses, fp)
with open(outfile_2, 'w') as fp:
json.dump(common_peaks, fp)
if options.data_only:
print('skipping figures')
else:
make_figures(guesses,crystal_family,output_file_root+froot)
# TODO: Split up this function and enable plotting on precomupted data.
# plt.show(block=False)
if __name__ == "__main__":
main()
| 36.224215 | 203 | 0.590121 | [
"MIT"
] | MatthewGong/DiffractionClassification | DiffractionClassifierCombinatorial2.0.py | 16,156 | Python |
from django.core.urlresolvers import reverse
from oscar.core.loading import get_model
from oscar.test.testcases import WebTestCase, add_permissions
from oscar.test.factories import (
CategoryFactory, PartnerFactory, ProductFactory, ProductAttributeFactory)
from hooks.test.factories import (
HookEventFactory, HookFactory
)
ProductClass = get_model('catalogue', 'ProductClass')
Hook = get_model('hooks', 'Hook')
class TestAStaffUser(WebTestCase):
is_staff = True
def setUp(self):
super(TestAStaffUser, self).setUp()
self.partner = PartnerFactory()
def test_can_create_hook_with_hook_event(self):
hookevent = HookEventFactory()
hook = HookFactory()
product_class = ProductClass.objects.create(name="Book")
page = self.get(reverse('hook-create', kwargs={"hook_class_slug": product_class.slug}))
form = page.form
form["name"] = u'books'
form["description"] = u'this is description'
form["hookevent_set-0-id"] = hook
form["hookevent_set-0-signal_type"] = hookevent.signal_type
form["hookevent_set-0-URL"] = hookevent.URL
form["hookevent_set-0-extra_headers"] = hookevent.extra_headers
response = form.submit(name='action', value='save')
assert Hook.objects.count() == 2
| 32.121951 | 95 | 0.699317 | [
"BSD-3-Clause"
] | cage1016/django-oscar-hooks | tests/functional/dashboard/test_hook.py | 1,317 | Python |
"""The tests the History component."""
# pylint: disable=protected-access,invalid-name
from datetime import timedelta
import json
from unittest.mock import patch, sentinel
import pytest
from pytest import approx
from homeassistant.components import history, recorder
from homeassistant.components.recorder.history import get_significant_states
from homeassistant.components.recorder.models import process_timestamp
import homeassistant.core as ha
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM
from tests.common import init_recorder_component
from tests.components.recorder.common import trigger_db_commit, wait_recording_done
@pytest.mark.usefixtures("hass_history")
def test_setup():
"""Test setup method of history."""
# Verification occurs in the fixture
pass
def test_get_significant_states(hass_history):
"""Test that only significant states are returned.
We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
hass = hass_history
zero, four, states = record_states(hass)
hist = get_significant_states(hass, zero, four, filters=history.Filters())
assert states == hist
def test_get_significant_states_minimal_response(hass_history):
"""Test that only significant states are returned.
When minimal responses is set only the first and
last states return a complete state.
We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
hass = hass_history
zero, four, states = record_states(hass)
hist = get_significant_states(
hass, zero, four, filters=history.Filters(), minimal_response=True
)
# The second media_player.test state is reduced
# down to last_changed and state when minimal_response
# is set. We use JSONEncoder to make sure that are
# pre-encoded last_changed is always the same as what
# will happen with encoding a native state
input_state = states["media_player.test"][1]
orig_last_changed = json.dumps(
process_timestamp(input_state.last_changed),
cls=JSONEncoder,
).replace('"', "")
orig_state = input_state.state
states["media_player.test"][1] = {
"last_changed": orig_last_changed,
"state": orig_state,
}
assert states == hist
def test_get_significant_states_with_initial(hass_history):
"""Test that only significant states are returned.
We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
hass = hass_history
zero, four, states = record_states(hass)
one = zero + timedelta(seconds=1)
one_and_half = zero + timedelta(seconds=1.5)
for entity_id in states:
if entity_id == "media_player.test":
states[entity_id] = states[entity_id][1:]
for state in states[entity_id]:
if state.last_changed == one:
state.last_changed = one_and_half
hist = get_significant_states(
hass,
one_and_half,
four,
filters=history.Filters(),
include_start_time_state=True,
)
assert states == hist
def test_get_significant_states_without_initial(hass_history):
"""Test that only significant states are returned.
We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
hass = hass_history
zero, four, states = record_states(hass)
one = zero + timedelta(seconds=1)
one_and_half = zero + timedelta(seconds=1.5)
for entity_id in states:
states[entity_id] = list(
filter(lambda s: s.last_changed != one, states[entity_id])
)
del states["media_player.test2"]
hist = get_significant_states(
hass,
one_and_half,
four,
filters=history.Filters(),
include_start_time_state=False,
)
assert states == hist
def test_get_significant_states_entity_id(hass_history):
"""Test that only significant states are returned for one entity."""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
hist = get_significant_states(
hass, zero, four, ["media_player.test"], filters=history.Filters()
)
assert states == hist
def test_get_significant_states_multiple_entity_ids(hass_history):
"""Test that only significant states are returned for one entity."""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
hist = get_significant_states(
hass,
zero,
four,
["media_player.test", "thermostat.test"],
filters=history.Filters(),
)
assert states == hist
def test_get_significant_states_exclude_domain(hass_history):
"""Test if significant states are returned when excluding domains.
We should get back every thermostat change that includes an attribute
change, but no media player changes.
"""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test"]
del states["media_player.test2"]
del states["media_player.test3"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_EXCLUDE: {history.CONF_DOMAINS: ["media_player"]}
},
}
)
check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_exclude_entity(hass_history):
"""Test if significant states are returned when excluding entities.
We should get back every thermostat and script changes, but no media
player changes.
"""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_EXCLUDE: {history.CONF_ENTITIES: ["media_player.test"]}
},
}
)
check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_exclude(hass_history):
"""Test significant states when excluding entities and domains.
We should not get back every thermostat and media player test changes.
"""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test"]
del states["thermostat.test"]
del states["thermostat.test2"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_EXCLUDE: {
history.CONF_DOMAINS: ["thermostat"],
history.CONF_ENTITIES: ["media_player.test"],
}
},
}
)
check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_exclude_include_entity(hass_history):
"""Test significant states when excluding domains and include entities.
We should not get back every thermostat and media player test changes.
"""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_ENTITIES: ["media_player.test", "thermostat.test"]
},
history.CONF_EXCLUDE: {history.CONF_DOMAINS: ["thermostat"]},
},
}
)
check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include_domain(hass_history):
"""Test if significant states are returned when including domains.
We should get back every thermostat and script changes, but no media
player changes.
"""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test"]
del states["media_player.test2"]
del states["media_player.test3"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {history.CONF_DOMAINS: ["thermostat", "script"]}
},
}
)
check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include_entity(hass_history):
"""Test if significant states are returned when including entities.
We should only get back changes of the media_player.test entity.
"""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {history.CONF_ENTITIES: ["media_player.test"]}
},
}
)
check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include(hass_history):
"""Test significant states when including domains and entities.
We should only get back changes of the media_player.test entity and the
thermostat domain.
"""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test2"]
del states["media_player.test3"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_DOMAINS: ["thermostat"],
history.CONF_ENTITIES: ["media_player.test"],
}
},
}
)
check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include_exclude_domain(hass_history):
"""Test if significant states when excluding and including domains.
We should not get back any changes since we include only the
media_player domain but also exclude it.
"""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test"]
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {history.CONF_DOMAINS: ["media_player"]},
history.CONF_EXCLUDE: {history.CONF_DOMAINS: ["media_player"]},
},
}
)
check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include_exclude_entity(hass_history):
"""Test if significant states when excluding and including domains.
We should not get back any changes since we include only
media_player.test but also exclude it.
"""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test"]
del states["media_player.test2"]
del states["media_player.test3"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {history.CONF_ENTITIES: ["media_player.test"]},
history.CONF_EXCLUDE: {history.CONF_ENTITIES: ["media_player.test"]},
},
}
)
check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include_exclude(hass_history):
"""Test if significant states when in/excluding domains and entities.
We should only get back changes of the media_player.test2 entity.
"""
hass = hass_history
zero, four, states = record_states(hass)
del states["media_player.test"]
del states["thermostat.test"]
del states["thermostat.test2"]
del states["script.can_cancel_this_one"]
config = history.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_DOMAINS: ["media_player"],
history.CONF_ENTITIES: ["thermostat.test"],
},
history.CONF_EXCLUDE: {
history.CONF_DOMAINS: ["thermostat"],
history.CONF_ENTITIES: ["media_player.test"],
},
},
}
)
check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_are_ordered(hass_history):
"""Test order of results from get_significant_states.
When entity ids are given, the results should be returned with the data
in the same order.
"""
hass = hass_history
zero, four, _states = record_states(hass)
entity_ids = ["media_player.test", "media_player.test2"]
hist = get_significant_states(
hass, zero, four, entity_ids, filters=history.Filters()
)
assert list(hist.keys()) == entity_ids
entity_ids = ["media_player.test2", "media_player.test"]
hist = get_significant_states(
hass, zero, four, entity_ids, filters=history.Filters()
)
assert list(hist.keys()) == entity_ids
def test_get_significant_states_only(hass_history):
"""Test significant states when significant_states_only is set."""
hass = hass_history
entity_id = "sensor.test"
def set_state(state, **kwargs):
"""Set the state."""
hass.states.set(entity_id, state, **kwargs)
wait_recording_done(hass)
return hass.states.get(entity_id)
start = dt_util.utcnow() - timedelta(minutes=4)
points = []
for i in range(1, 4):
points.append(start + timedelta(minutes=i))
states = []
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=start):
set_state("123", attributes={"attribute": 10.64})
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=points[0]
):
# Attributes are different, state not
states.append(set_state("123", attributes={"attribute": 21.42}))
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=points[1]
):
# state is different, attributes not
states.append(set_state("32", attributes={"attribute": 21.42}))
with patch(
"homeassistant.components.recorder.dt_util.utcnow", return_value=points[2]
):
# everything is different
states.append(set_state("412", attributes={"attribute": 54.23}))
hist = get_significant_states(hass, start, significant_changes_only=True)
assert len(hist[entity_id]) == 2
assert states[0] not in hist[entity_id]
assert states[1] in hist[entity_id]
assert states[2] in hist[entity_id]
hist = get_significant_states(hass, start, significant_changes_only=False)
assert len(hist[entity_id]) == 3
assert states == hist[entity_id]
def check_significant_states(hass, zero, four, states, config):
"""Check if significant states are retrieved."""
filters = history.Filters()
exclude = config[history.DOMAIN].get(history.CONF_EXCLUDE)
if exclude:
filters.excluded_entities = exclude.get(history.CONF_ENTITIES, [])
filters.excluded_domains = exclude.get(history.CONF_DOMAINS, [])
include = config[history.DOMAIN].get(history.CONF_INCLUDE)
if include:
filters.included_entities = include.get(history.CONF_ENTITIES, [])
filters.included_domains = include.get(history.CONF_DOMAINS, [])
hist = get_significant_states(hass, zero, four, filters=filters)
assert states == hist
def record_states(hass):
"""Record some test states.
We inject a bunch of state updates from media player, zone and
thermostat.
"""
mp = "media_player.test"
mp2 = "media_player.test2"
mp3 = "media_player.test3"
therm = "thermostat.test"
therm2 = "thermostat.test2"
zone = "zone.home"
script_c = "script.can_cancel_this_one"
def set_state(entity_id, state, **kwargs):
"""Set the state."""
hass.states.set(entity_id, state, **kwargs)
wait_recording_done(hass)
return hass.states.get(entity_id)
zero = dt_util.utcnow()
one = zero + timedelta(seconds=1)
two = one + timedelta(seconds=1)
three = two + timedelta(seconds=1)
four = three + timedelta(seconds=1)
states = {therm: [], therm2: [], mp: [], mp2: [], mp3: [], script_c: []}
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=one):
states[mp].append(
set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)})
)
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[mp2].append(
set_state(mp2, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[mp3].append(
set_state(mp3, "idle", attributes={"media_title": str(sentinel.mt1)})
)
states[therm].append(
set_state(therm, 20, attributes={"current_temperature": 19.5})
)
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=two):
# This state will be skipped only different in time
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt3)})
# This state will be skipped because domain is excluded
set_state(zone, "zoning")
states[script_c].append(
set_state(script_c, "off", attributes={"can_cancel": True})
)
states[therm].append(
set_state(therm, 21, attributes={"current_temperature": 19.8})
)
states[therm2].append(
set_state(therm2, 20, attributes={"current_temperature": 19})
)
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=three):
states[mp].append(
set_state(mp, "Netflix", attributes={"media_title": str(sentinel.mt4)})
)
states[mp3].append(
set_state(mp3, "Netflix", attributes={"media_title": str(sentinel.mt3)})
)
# Attributes changed even though state is the same
states[therm].append(
set_state(therm, 21, attributes={"current_temperature": 20})
)
return zero, four, states
async def test_fetch_period_api(hass, hass_client):
"""Test the fetch period view for history."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "history", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(f"/api/history/period/{dt_util.utcnow().isoformat()}")
assert response.status == 200
async def test_fetch_period_api_with_use_include_order(hass, hass_client):
"""Test the fetch period view for history with include order."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(
hass, "history", {history.DOMAIN: {history.CONF_ORDER: True}}
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(f"/api/history/period/{dt_util.utcnow().isoformat()}")
assert response.status == 200
async def test_fetch_period_api_with_minimal_response(hass, hass_client):
"""Test the fetch period view for history with minimal_response."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "history", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(
f"/api/history/period/{dt_util.utcnow().isoformat()}?minimal_response"
)
assert response.status == 200
async def test_fetch_period_api_with_no_timestamp(hass, hass_client):
"""Test the fetch period view for history with no timestamp."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "history", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get("/api/history/period")
assert response.status == 200
async def test_fetch_period_api_with_include_order(hass, hass_client):
"""Test the fetch period view for history."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(
hass,
"history",
{
"history": {
"use_include_order": True,
"include": {"entities": ["light.kitchen"]},
}
},
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(
f"/api/history/period/{dt_util.utcnow().isoformat()}",
params={"filter_entity_id": "non.existing,something.else"},
)
assert response.status == 200
async def test_fetch_period_api_with_entity_glob_include(hass, hass_client):
"""Test the fetch period view for history."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(
hass,
"history",
{
"history": {
"include": {"entity_globs": ["light.k*"]},
}
},
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.states.async_set("light.kitchen", "on")
hass.states.async_set("light.cow", "on")
hass.states.async_set("light.nomatch", "on")
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(
f"/api/history/period/{dt_util.utcnow().isoformat()}",
)
assert response.status == 200
response_json = await response.json()
assert response_json[0][0]["entity_id"] == "light.kitchen"
async def test_fetch_period_api_with_entity_glob_exclude(hass, hass_client):
"""Test the fetch period view for history."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(
hass,
"history",
{
"history": {
"exclude": {
"entity_globs": ["light.k*"],
"domains": "switch",
"entities": "media_player.test",
},
}
},
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.states.async_set("light.kitchen", "on")
hass.states.async_set("light.cow", "on")
hass.states.async_set("light.match", "on")
hass.states.async_set("switch.match", "on")
hass.states.async_set("media_player.test", "on")
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(
f"/api/history/period/{dt_util.utcnow().isoformat()}",
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 2
assert response_json[0][0]["entity_id"] == "light.cow"
assert response_json[1][0]["entity_id"] == "light.match"
async def test_fetch_period_api_with_entity_glob_include_and_exclude(hass, hass_client):
"""Test the fetch period view for history."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(
hass,
"history",
{
"history": {
"exclude": {
"entity_globs": ["light.many*"],
},
"include": {
"entity_globs": ["light.m*"],
"domains": "switch",
"entities": "media_player.test",
},
}
},
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.states.async_set("light.kitchen", "on")
hass.states.async_set("light.cow", "on")
hass.states.async_set("light.match", "on")
hass.states.async_set("light.many_state_changes", "on")
hass.states.async_set("switch.match", "on")
hass.states.async_set("media_player.test", "on")
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(
f"/api/history/period/{dt_util.utcnow().isoformat()}",
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 3
assert response_json[0][0]["entity_id"] == "light.match"
assert response_json[1][0]["entity_id"] == "media_player.test"
assert response_json[2][0]["entity_id"] == "switch.match"
async def test_entity_ids_limit_via_api(hass, hass_client):
"""Test limiting history to entity_ids."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(
hass,
"history",
{"history": {}},
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.states.async_set("light.kitchen", "on")
hass.states.async_set("light.cow", "on")
hass.states.async_set("light.nomatch", "on")
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(
f"/api/history/period/{dt_util.utcnow().isoformat()}?filter_entity_id=light.kitchen,light.cow",
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 2
assert response_json[0][0]["entity_id"] == "light.kitchen"
assert response_json[1][0]["entity_id"] == "light.cow"
async def test_entity_ids_limit_via_api_with_skip_initial_state(hass, hass_client):
"""Test limiting history to entity_ids with skip_initial_state."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(
hass,
"history",
{"history": {}},
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.states.async_set("light.kitchen", "on")
hass.states.async_set("light.cow", "on")
hass.states.async_set("light.nomatch", "on")
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(
f"/api/history/period/{dt_util.utcnow().isoformat()}?filter_entity_id=light.kitchen,light.cow&skip_initial_state",
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 0
when = dt_util.utcnow() - timedelta(minutes=1)
response = await client.get(
f"/api/history/period/{when.isoformat()}?filter_entity_id=light.kitchen,light.cow&skip_initial_state",
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 2
assert response_json[0][0]["entity_id"] == "light.kitchen"
assert response_json[1][0]["entity_id"] == "light.cow"
POWER_SENSOR_ATTRIBUTES = {
"device_class": "power",
"state_class": "measurement",
"unit_of_measurement": "kW",
}
PRESSURE_SENSOR_ATTRIBUTES = {
"device_class": "pressure",
"state_class": "measurement",
"unit_of_measurement": "hPa",
}
TEMPERATURE_SENSOR_ATTRIBUTES = {
"device_class": "temperature",
"state_class": "measurement",
"unit_of_measurement": "°C",
}
@pytest.mark.parametrize(
"units, attributes, state, value",
[
(IMPERIAL_SYSTEM, POWER_SENSOR_ATTRIBUTES, 10, 10000),
(METRIC_SYSTEM, POWER_SENSOR_ATTRIBUTES, 10, 10000),
(IMPERIAL_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, 10, 50),
(METRIC_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, 10, 10),
(IMPERIAL_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, 1000, 14.503774389728312),
(METRIC_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, 1000, 100000),
],
)
async def test_statistics_during_period(
hass, hass_ws_client, units, attributes, state, value
):
"""Test statistics_during_period."""
now = dt_util.utcnow()
hass.config.units = units
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "history", {})
await async_setup_component(hass, "sensor", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.states.async_set("sensor.test", state, attributes=attributes)
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
hass.data[recorder.DATA_INSTANCE].do_adhoc_statistics(period="hourly", start=now)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_ws_client()
await client.send_json(
{
"id": 1,
"type": "history/statistics_during_period",
"start_time": now.isoformat(),
"end_time": now.isoformat(),
"statistic_ids": ["sensor.test"],
}
)
response = await client.receive_json()
assert response["success"]
assert response["result"] == {}
client = await hass_ws_client()
await client.send_json(
{
"id": 1,
"type": "history/statistics_during_period",
"start_time": now.isoformat(),
"statistic_ids": ["sensor.test"],
}
)
response = await client.receive_json()
assert response["success"]
assert response["result"] == {
"sensor.test": [
{
"statistic_id": "sensor.test",
"start": now.isoformat(),
"mean": approx(value),
"min": approx(value),
"max": approx(value),
"last_reset": None,
"state": None,
"sum": None,
}
]
}
async def test_statistics_during_period_bad_start_time(hass, hass_ws_client):
"""Test statistics_during_period."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(
hass,
"history",
{"history": {}},
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_ws_client()
await client.send_json(
{
"id": 1,
"type": "history/statistics_during_period",
"start_time": "cats",
}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "invalid_start_time"
async def test_statistics_during_period_bad_end_time(hass, hass_ws_client):
"""Test statistics_during_period."""
now = dt_util.utcnow()
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(
hass,
"history",
{"history": {}},
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_ws_client()
await client.send_json(
{
"id": 1,
"type": "history/statistics_during_period",
"start_time": now.isoformat(),
"end_time": "dogs",
}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "invalid_end_time"
@pytest.mark.parametrize(
"units, attributes, unit",
[
(IMPERIAL_SYSTEM, POWER_SENSOR_ATTRIBUTES, "W"),
(METRIC_SYSTEM, POWER_SENSOR_ATTRIBUTES, "W"),
(IMPERIAL_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, "°F"),
(METRIC_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, "°C"),
(IMPERIAL_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, "psi"),
(METRIC_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, "Pa"),
],
)
async def test_list_statistic_ids(hass, hass_ws_client, units, attributes, unit):
"""Test list_statistic_ids."""
now = dt_util.utcnow()
hass.config.units = units
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "history", {"history": {}})
await async_setup_component(hass, "sensor", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_ws_client()
await client.send_json({"id": 1, "type": "history/list_statistic_ids"})
response = await client.receive_json()
assert response["success"]
assert response["result"] == []
hass.states.async_set("sensor.test", 10, attributes=attributes)
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await client.send_json({"id": 2, "type": "history/list_statistic_ids"})
response = await client.receive_json()
assert response["success"]
assert response["result"] == [
{"statistic_id": "sensor.test", "unit_of_measurement": unit}
]
hass.data[recorder.DATA_INSTANCE].do_adhoc_statistics(period="hourly", start=now)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
# Remove the state, statistics will now be fetched from the database
hass.states.async_remove("sensor.test")
await hass.async_block_till_done()
await client.send_json({"id": 3, "type": "history/list_statistic_ids"})
response = await client.receive_json()
assert response["success"]
assert response["result"] == [
{"statistic_id": "sensor.test", "unit_of_measurement": unit}
]
await client.send_json(
{"id": 4, "type": "history/list_statistic_ids", "statistic_type": "dogs"}
)
response = await client.receive_json()
assert not response["success"]
await client.send_json(
{"id": 5, "type": "history/list_statistic_ids", "statistic_type": "mean"}
)
response = await client.receive_json()
assert response["success"]
assert response["result"] == [
{"statistic_id": "sensor.test", "unit_of_measurement": unit}
]
await client.send_json(
{"id": 6, "type": "history/list_statistic_ids", "statistic_type": "sum"}
)
response = await client.receive_json()
assert response["success"]
assert response["result"] == []
| 34.985646 | 122 | 0.666986 | [
"Apache-2.0"
] | 0xFEEDC0DE64/homeassistant-core | tests/components/history/test_init.py | 36,563 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkcs.endpoint import endpoint_data
class PauseClusterUpgradeRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'CS', '2015-12-15', 'PauseClusterUpgrade')
self.set_uri_pattern('/api/v2/clusters/[ClusterId]/upgrade/pause')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterId(self):
return self.get_path_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_path_param('ClusterId',ClusterId) | 38.512821 | 74 | 0.768309 | [
"Apache-2.0"
] | Explorer1092/aliyun-openapi-python-sdk | aliyun-python-sdk-cs/aliyunsdkcs/request/v20151215/PauseClusterUpgradeRequest.py | 1,502 | Python |
#MenuTitle: Generate lowercase from uppercase
"""
Generate lowercase a-z from uppercase A-Z
TODO (M Foley) Generate all lowercase glyphs, not just a-z
"""
font = Glyphs.font
glyphs = list('abcdefghijklmnopqrstuvwxyz')
masters = font.masters
for glyph_name in glyphs:
glyph = GSGlyph(glyph_name)
glyph.updateGlyphInfo()
font.glyphs.append(glyph)
for idx,layer in enumerate(masters):
comp_name = glyph_name.upper()
component = GSComponent(comp_name, (0,0))
glyph.layers[idx].components.append(component)
Glyphs.redraw()
| 24.652174 | 58 | 0.714286 | [
"MIT"
] | m4rc1e/mf-glyphs-scripts | Glyph-Builders/lowercase_from_upper.py | 567 | Python |
# to run:
# pip install unittest2
# unit2 discover
#
# to debug:
# pip install nose
# nosetests --pdb
import StringIO
import sys
import pdfquery
import unittest2
from pdfquery.cache import FileCache
class TestPDFQuery(unittest2.TestCase):
"""
Various tests based on the IRS_1040A sample doc.
"""
@classmethod
def setUpClass(cls):
cls.pdf = pdfquery.PDFQuery(
"tests/samples/IRS_1040A.pdf",
parse_tree_cacher=FileCache("/tmp/") if sys.argv[1] == 'cache' else None,
)
cls.pdf.load()
def test_xml_conversion(self):
"""
Test that converted XML hasn't changed from saved version.
"""
# get current XML for sample file
tree_string = StringIO.StringIO()
self.pdf.tree.write(tree_string, pretty_print=True, encoding="utf-8")
tree_string = tree_string.getvalue()
# get previous XML
# this varies by Python version, because the float handling isn't quite
# the same
comparison_file = "tests/saved_output/IRS_1040A_output%s.xml" % (
"_python_2.6" if sys.version_info[0] == 2 and sys.version_info[1] < 7 else "")
with open(comparison_file, 'rb') as f:
saved_string = f.read()
# compare current to previous
if tree_string != saved_string:
with open("tests/failed_output.xml", "wb") as out:
out.write(tree_string)
self.fail("XML conversion of sample pdf has changed! Compare %s to "
"tests/failed_output.xml." % comparison_file)
def test_selectors(self):
"""
Test the :contains and :in_bbox selectors.
"""
label = self.pdf.pq('LTTextLineHorizontal:contains("Your first name '
'and initial")')
self.assertEqual(len(label), 1)
left_corner = float(label.attr('x0'))
self.assertEqual(left_corner, 143.651)
bottom_corner = float(label.attr('y0'))
self.assertEqual(bottom_corner, 714.694)
name = self.pdf.pq('LTTextLineHorizontal:in_bbox("%s, %s, %s, %s")' %
(left_corner,
bottom_corner - 30,
left_corner + 150,
bottom_corner)
).text()
self.assertEqual(name, "John E.")
def test_extract(self):
"""
Test the extract() function.
"""
values = self.pdf.extract([
('with_parent', 'LTPage[pageid="1"]'),
('with_formatter', 'text'),
('last_name', 'LTTextLineHorizontal:in_bbox("315,680,395,700")'),
('spouse', 'LTTextLineHorizontal:in_bbox("170,650,220,680")'),
('with_parent', 'LTPage[pageid="2"]'),
('oath', 'LTTextLineHorizontal:contains("perjury")',
lambda match: match.text()[:30] + "..."),
('year', 'LTTextLineHorizontal:contains("Form 1040A (")',
lambda match: int(match.text()[-5:-1]))
])
self.assertDictEqual(values, {
'last_name': 'Michaels',
'spouse': 'Susan R.',
'oath': u'Under penalties of perjury, I ...',
'year': 2007
})
def test_page_numbers(self):
self.assertEqual(self.pdf.tree.getroot()[0].get('page_label'), '1')
class TestDocInfo(unittest2.TestCase):
def test_docinfo(self):
doc_info_results = [
["tests/samples/bug11.pdf",
{'Producer': 'Mac OS X 10.9.3 Quartz PDFContext',
'Title': u'\u262d\U0001f61c\U0001f4a9Unicode is fun!',
'Author': 'Russkel', 'Creator': 'Firefox',
'ModDate': "D:20140528141914+08'00'",
'CreationDate': 'D:20140528061106Z', 'Subject': ''}],
["tests/samples/bug15.pdf",
{'Producer': 'Mac OS X 10.9.3 Quartz PDFContext',
'Author': 'Brepols Publishers',
'Creator': 'PDFsharp 1.2.1269-g (www.pdfsharp.com)',
'AAPL_Keywords': "[u'Brepols', u'Publishers', u'CTLO']",
'Title': 'Exporter',
'ModDate': "D:20140614192741Z00'00'",
'Keywords': 'Brepols, Publishers, CTLO',
'CreationDate': "D:20140614192741Z00'00'",
'Subject': 'Extrait de la Library of Latin Texts - Series A'}],
["tests/samples/bug17.pdf",
{'CreationDate': 'D:20140328164512Z',
'Creator': 'Adobe InDesign CC (Macintosh)',
'ModDate': 'D:20140328164513Z',
'Producer': 'Adobe PDF Library 10.0.1', 'Trapped': '/False'}]
]
for file_path, expected_results in doc_info_results:
pdf = pdfquery.PDFQuery(file_path)
pdf.load(None)
self.assertDictEqual(
dict(pdf.tree.getroot().attrib),
expected_results
)
class TestUnicode(unittest2.TestCase):
def test_unicode_text(self):
pdf = pdfquery.PDFQuery("tests/samples/bug18.pdf")
pdf.load()
self.assertEqual(
pdf.pq('LTTextLineHorizontal:contains("Hop Hing Oils")').text(),
(u'5 Hop Hing Oils and Fats (Hong Kong) Ltd \uf06c '
u'\u7279\u5bf6\u7cbe\u88fd\u8c6c\u6cb9')
)
class TestAnnotations(unittest2.TestCase):
"""
Ensure that annotations such as links are getting added to the PDFs
properly, as discussed in issue #28.
"""
@classmethod
def setUpClass(cls):
cls.pdf = pdfquery.PDFQuery(
"tests/samples/bug28.pdf",
parse_tree_cacher=FileCache("/tmp/") if sys.argv[1] == 'cache' else None,
)
cls.pdf.load()
def test_xml_conversion(self):
"""
Test that converted XML hasn't changed from saved version.
"""
# get current XML for sample file
tree_string = StringIO.StringIO()
self.pdf.tree.write(tree_string, pretty_print=True, encoding="utf-8")
tree_string = tree_string.getvalue()
# get previous XML
comparison_file = 'tests/saved_output/bug28.xml'
with open(comparison_file, 'rb') as f:
saved_string = f.read()
# compare current to previous
if tree_string != saved_string:
with open("tests/failed_output.xml", "wb") as out:
out.write(tree_string)
self.fail("XML conversion of sample pdf has changed! Compare %s "
"to tests/failed_output.xml." % comparison_file)
if __name__ == '__main__':
unittest2.main()
| 34.630208 | 90 | 0.563393 | [
"MIT"
] | leeoniya/pdfquery | tests/tests.py | 6,649 | Python |
from controller.enums import PartEnum
def convert_from_part_id(part_id):
if part_id == PartEnum.FUSE.value:
return 'Fuse', 'Fuse'
elif part_id == PartEnum.BACKCOVER.value:
return 'BottomCover', 'BottomCoverFlipped'
elif part_id == PartEnum.WHITECOVER.value:
return 'WhiteCover', 'WhiteCoverFlipped'
elif part_id == PartEnum.BLUECOVER.value:
return 'BlueCover', 'BlueCoverFlipped'
elif part_id == PartEnum.BLACKCOVER.value:
return 'BlackCover', 'BlackCoverFlipped'
elif part_id == PartEnum.PCB.value:
return 'PCB', 'PCBFlipped'
else:
print("[W] Could not convert class_id")
return -1, -1
def convert_to_part_id(class_name):
if class_name == 'Fuse':
return PartEnum.FUSE.value
elif class_name == 'BottomCover':
return PartEnum.BACKCOVER.value
elif class_name == 'BottomCoverFlipped':
return PartEnum.BACKCOVER_FLIPPED.value
elif class_name == 'WhiteCover':
return PartEnum.WHITECOVER.value
elif class_name == 'WhiteCoverFlipped':
return PartEnum.WHITECOVER_FLIPPED.value
elif class_name == 'BlueCover':
return PartEnum.BLUECOVER.value
elif class_name == 'BlueCoverFlipped':
return PartEnum.BLUECOVER_FLIPPED.value
elif class_name == 'BlackCover':
return PartEnum.BLACKCOVER.value
elif class_name == 'BlackCoverFlipped':
return PartEnum.BLACKCOVER_FLIPPED.value
elif class_name == 'PCB':
return PartEnum.PCB.value
elif class_name == 'PCBFlipped':
return PartEnum.PCB_FLIPPED.value
else:
return PartEnum.INVALID.value
| 34.479167 | 50 | 0.682175 | [
"MIT"
] | EmilRyberg/P6BinPicking | controller/class_converter.py | 1,655 | Python |
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fixture as ovo_fixture
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests import uuidsentinel
class ConsoleAuthTokenTestCase(test.TestCase):
def setUp(self):
super(ConsoleAuthTokenTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
instance = objects.Instance(
context=self.context,
project_id=self.context.project_id,
uuid=uuidsentinel.fake_instance)
instance.create()
self.console = objects.ConsoleAuthToken(
context=self.context,
instance_uuid=uuidsentinel.fake_instance,
console_type='fake-type',
host='fake-host',
port=1000,
internal_access_path='fake-internal_access_path',
access_url_base='fake-external_access_path'
)
self.token = self.console.authorize(100)
def test_validate(self):
connection_info = objects.ConsoleAuthToken.validate(
self.context, self.token)
expected = self.console.obj_to_primitive()['nova_object.data']
del expected['created_at']
ovo_fixture.compare_obj(self, connection_info, expected,
allow_missing=['created_at'])
def test_validate_invalid(self):
unauthorized_token = uuidsentinel.token
self.assertRaises(
exception.InvalidToken,
objects.ConsoleAuthToken.validate,
self.context, unauthorized_token)
| 38.551724 | 78 | 0.68381 | [
"Apache-2.0"
] | MultipleCrashes/nova | nova/tests/functional/db/test_console_auth_token.py | 2,236 | Python |
import argparse
import datetime
import json
import os
import time
from os import path
import numpy as np
import torch
from absl import flags
from torch import optim
from pprint import pprint
import wandb
from src.alive_sieve import AliveSieve, SievePlayback
from src.nets import AgentModel
from src.rewards_lib import calc_rewards
from src.sampling import (generate_test_batches,
generate_training_batch,
hash_batches)
FLAGS = flags.FLAGS
def render_action(t, s, prop, term):
agent = t % 2
speaker = 'A' if agent == 0 else 'B'
utility = s.utilities[:, agent]
print(' ', end='')
if speaker == 'B':
print(' ', end='')
print(' ' + ''.join([str(v) for v in s.m_prev[0].view(-1).tolist()]), end='')
print(' %s/%s %s/%s %s/%s' % (
prop[0][0].item(), s.pool[0][0].item(),
prop[0][1].item(), s.pool[0][1].item(),
prop[0][2].item(), s.pool[0][2].item(),
), end='')
print('')
if t + 1 == s.N[0]:
print(' [out of time]')
elif term[0][0]:
print(' ACC')
def save_model(model_file, agent_models, agent_opts, start_time, episode):
state = {}
for i in range(2):
state['agent%s' % i] = {}
state['agent%s' % i]['model_state'] = agent_models[i].state_dict()
state['agent%s' % i]['opt_state'] = agent_opts[i].state_dict()
state['episode'] = episode
state['elapsed_time'] = time.time() - start_time
with open(model_file + '.tmp', 'wb') as f:
torch.save(state, f)
os.rename(model_file + '.tmp', model_file)
def load_model(model_file, agent_models, agent_opts):
with open(model_file, 'rb') as f:
state = torch.load(f)
for i in range(2):
agent_models[i].load_state_dict(state['agent%s' % i]['model_state'])
agent_opts[i].load_state_dict(state['agent%s' % i]['opt_state'])
episode = state['episode']
# create a kind of 'virtual' start_time
start_time = time.time() - state['elapsed_time']
return episode, start_time
class State(object):
def __init__(self, N, pool, utilities):
batch_size = N.size()[0]
self.N = N
self.pool = pool
self.utilities = torch.zeros(batch_size, 2, 3, dtype=torch.int64, device=FLAGS.device)
self.utilities[:, 0] = utilities[0]
self.utilities[:, 1] = utilities[1]
self.last_proposal = torch.zeros(batch_size, 3, dtype=torch.int64, device=FLAGS.device)
self.m_prev = torch.zeros(batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)
def sieve_(self, still_alive_idxes):
self.N = self.N[still_alive_idxes]
self.pool = self.pool[still_alive_idxes]
self.utilities = self.utilities[still_alive_idxes]
self.last_proposal = self.last_proposal[still_alive_idxes]
self.m_prev = self.m_prev[still_alive_idxes]
def run_episode(
batch,
agent_models,
batch_size,
testing,
render=False,
initial_agent=0):
"""
turning testing on means, we disable stochasticity: always pick the argmax
"""
s = State(**batch)
sieve = AliveSieve(batch_size=batch_size)
actions_by_timestep = []
alive_masks = []
# next two tensors wont be sieved, they will stay same size throughout
# entire batch, we will update them using sieve.out_idxes[...]
rewards = torch.zeros(batch_size, 3, device=FLAGS.device)
num_steps = torch.full((batch_size,), FLAGS.max_timesteps, dtype=torch.int64, device=FLAGS.device)
term_matches_argmax_count = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
num_policy_runs = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
utt_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device)
prop_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device)
entropy_loss_by_agent = [
torch.zeros(1, device=FLAGS.device),
torch.zeros(1, device=FLAGS.device)
]
if render:
print(' ')
print(' ',
'{} {} {}'.format(*s.utilities[0][0].tolist()),
' ',
'{} {} {}'.format(*s.pool[0].tolist()),
' ',
'{} {} {}'.format(*s.utilities[0][1].tolist()))
current_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)
prev_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)
current_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)
prev_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)
current_A_term = torch.zeros(sieve.batch_size, 1, dtype=torch.uint8)
for t in range(FLAGS.max_timesteps):
if FLAGS.linguistic:
if FLAGS.normal_form and t % 2 == 1:
_prev_message = prev_A_message
else:
_prev_message = s.m_prev
else:
_prev_message = torch.zeros(sieve.batch_size, 6, dtype=torch.int64, device=FLAGS.device)
if FLAGS.proposal:
if FLAGS.normal_form and t % 2 == 1:
_prev_proposal = prev_A_proposal
else:
_prev_proposal = s.last_proposal
else:
_prev_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)
# agent = t % 2
agent = (initial_agent + t) % 2
agent_model = agent_models[agent]
(nodes, term_a, s.m_prev, this_proposal, _entropy_loss,
_term_matches_argmax_count, _utt_matches_argmax_count, _utt_stochastic_draws,
_prop_matches_argmax_count, _prop_stochastic_draws, _utt_mask, _prop_mask) = agent_model(
pool=s.pool,
utility=s.utilities[:, agent],
m_prev=_prev_message,
prev_proposal=_prev_proposal,
testing=testing,
)
entropy_loss_by_agent[agent] += _entropy_loss
actions_by_timestep.append(nodes)
term_matches_argmax_count += _term_matches_argmax_count
num_policy_runs += sieve.batch_size
utt_matches_argmax_count += _utt_matches_argmax_count
utt_stochastic_draws += _utt_stochastic_draws
prop_matches_argmax_count += _prop_matches_argmax_count
prop_stochastic_draws += _prop_stochastic_draws
if FLAGS.force_masking_comm:
utt_mask[agent][sieve.out_idxes] |= _utt_mask
prop_mask[agent][sieve.out_idxes] |= _prop_mask
if FLAGS.proposal_termination and not FLAGS.normal_form:
term_a = torch.prod(this_proposal == _prev_proposal,
dim=1,
keepdim=True)
elif not FLAGS.proposal_termination and FLAGS.normal_form:
#TODO which proposal to use here?
if t % 2 == 1:
term_a = (term_a * current_A_term)
else:
current_A_term = term_a
term_a = torch.zeros((sieve.batch_size,1), dtype=torch.uint8, device=FLAGS.device)
elif FLAGS.proposal_termination and FLAGS.normal_form:
if t % 2 == 1:
term_a = torch.prod(this_proposal == current_A_proposal,
dim=1,
keepdim=True)
else:
term_a = torch.zeros((sieve.batch_size,1), dtype=torch.uint8, device=FLAGS.device)
if render and sieve.out_idxes[0] == 0:
render_action(
t=t,
s=s,
term=term_a,
prop=this_proposal
)
new_rewards = calc_rewards(
t=t,
s=s,
term=term_a,
agent=agent,
)
rewards[sieve.out_idxes] = new_rewards
s.last_proposal = this_proposal
if FLAGS.normal_form and t % 2 == 0:
prev_A_proposal = current_A_proposal
current_A_proposal = this_proposal
prev_A_message = current_A_message
current_A_message = s.m_prev
sieve.mark_dead(term_a)
sieve.mark_dead(t + 1 >= s.N)
alive_masks.append(sieve.alive_mask.clone())
sieve.set_dead_global(num_steps, t + 1)
if sieve.all_dead():
break
s.sieve_(sieve.alive_idxes)
if FLAGS.normal_form:
current_A_proposal = current_A_proposal[sieve.alive_idxes]
prev_A_proposal = prev_A_proposal[sieve.alive_idxes]
current_A_message = current_A_message[sieve.alive_idxes]
prev_A_message = prev_A_message[sieve.alive_idxes]
sieve.self_sieve_()
if render:
print(' rewards: {:2.2f} {:2.2f} {:2.2f}'.format(*rewards[0].tolist()))
print(' ')
utt_mask_count = utt_mask.sum(dim=[1,2]).cpu().numpy()
prop_mask_count = prop_mask.sum(dim=[1,2]).cpu().numpy()
return (actions_by_timestep, rewards, num_steps, alive_masks, entropy_loss_by_agent,
term_matches_argmax_count, num_policy_runs, utt_matches_argmax_count, utt_stochastic_draws,
prop_matches_argmax_count, prop_stochastic_draws, utt_mask_count, prop_mask_count)
def safe_div(a, b):
"""
returns a / b, unless b is zero, in which case returns 0
this is primarily for usage in cases where b might be systemtically zero, eg because comms are disabled or similar
also accounts for a or b being tensors
"""
if isinstance(a, torch.Tensor):
a = a.item()
if isinstance(b, torch.Tensor):
b = b.item()
return 0 if b == 0 else a / b
def run(args):
"""
testing option will:
- use argmax, ie disable stochastic draws
- not run optimizers
- not save model
"""
if args.wandb:
if args.wandb_offline:
os.environ["WANDB_MODE"] = "dryrun"
wandb.init(project='ecn',
name=args.name,
dir=f'{args.savedir}',
group=args.wandb_group)
wandb.config.update(args)
wandb.config.update(FLAGS)
flags_dict = {flag.name: flag.value for flag in FLAGS.flags_by_module_dict()['main.py']}
args_dict = args.__dict__
pprint(args_dict)
pprint(flags_dict)
os.makedirs(args.model_dir, exist_ok=True)
os.makedirs(args.logdir, exist_ok=True)
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
train_r = np.random.RandomState(args.seed)
else:
train_r = np.random
test_r = np.random.RandomState(args.test_seed)
test_batches = generate_test_batches(batch_size=args.batch_size,
num_batches=5,
random_state=test_r)
test_hashes = hash_batches(test_batches)
episode = 0
start_time = time.time()
agent_models = []
agent_opts = []
agent_name = ['A', 'B']
for i in range(2):
model = AgentModel(
name=agent_name[i],
term_entropy_reg=args.term_entropy_reg,
utterance_entropy_reg=args.utterance_entropy_reg,
proposal_entropy_reg=args.proposal_entropy_reg
).to(FLAGS.device)
agent_models.append(model)
agent_opts.append(optim.Adam(params=agent_models[i].parameters()))
if args.wandb:
wandb.watch(agent_models)
if path.isfile(args.model_file) and not args.no_load:
episode, start_time = load_model(
model_file=args.model_file,
agent_models=agent_models,
agent_opts=agent_opts)
print('loaded model')
elif args.testing:
print('')
print('ERROR: must have loadable model to use --testing option')
print('')
return
last_print = time.time()
rewards_sum = torch.zeros(3, device=FLAGS.device)
steps_sum = 0
count_sum = 0
f_log = open(args.log_file, 'w')
all_args = {**args_dict, **flags_dict}
f_log.write('meta: %s\n' % json.dumps(all_args))
last_save = time.time()
baseline = torch.zeros(3, device=FLAGS.device)
term_matches_argmax_count = 0
num_policy_runs = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
utt_mask_count = np.array([0,0])
prop_mask_count = np.array([0,0])
while episode < args.episodes:
render = (episode % args.render_every_episode == 0)
split = 2 if FLAGS.randomize_first else 1
agent_losses = [0,0]
both_rewards = []
for i in range(2):
agent_opts[i].zero_grad()
for initial_agent in range(split):
batch = generate_training_batch(batch_size=args.batch_size // split,
test_hashes=test_hashes,
random_state=train_r)
(actions, rewards, steps, alive_masks, entropy_loss_by_agent,
_term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws,
_prop_matches_argmax_count, _prop_stochastic_draws,
_utt_mask_count, _prop_mask_count) = run_episode(
batch=batch,
agent_models=agent_models,
batch_size=args.batch_size // split,
render=render,
initial_agent=initial_agent,
testing=args.testing)
term_matches_argmax_count += _term_matches_argmax_count
utt_matches_argmax_count += _utt_matches_argmax_count
utt_stochastic_draws += _utt_stochastic_draws
num_policy_runs += _num_policy_runs
prop_matches_argmax_count += _prop_matches_argmax_count
prop_stochastic_draws += _prop_stochastic_draws
utt_mask_count += _utt_mask_count
prop_mask_count += _prop_mask_count
if not args.testing:
reward_loss_by_agent = [0, 0]
baselined_rewards = rewards - baseline
rewards_by_agent = []
for i in range(2):
if FLAGS.prosocial:
rewards_by_agent.append(baselined_rewards[:, 2])
else:
rewards_by_agent.append(baselined_rewards[:, i])
sieve_playback = SievePlayback(alive_masks)
for t, global_idxes in sieve_playback:
agent = (initial_agent + t) % 2
if len(actions[t]) > 0:
for action in actions[t]:
_rewards = rewards_by_agent[agent]
_reward = _rewards[global_idxes].float().contiguous().view(
sieve_playback.batch_size, 1)
_reward_loss = - (action * _reward)
_reward_loss = _reward_loss.sum()
reward_loss_by_agent[agent] += _reward_loss
for i in range(2):
loss = entropy_loss_by_agent[i] + reward_loss_by_agent[i]
loss.backward()
rewards_sum += rewards.detach().sum(0)
steps_sum += steps.sum()
count_sum += args.batch_size // split
both_rewards.append(rewards)
for i in range(2):
agent_opts[i].step()
rewards = torch.cat(both_rewards).detach()
baseline = 0.7 * baseline + 0.3 * rewards.mean(0).detach()
if render:
"""
run the test batches, print the results
"""
test_rewards_sum = np.zeros(3)
test_count_sum = len(test_batches) * args.batch_size
test_num_policy_runs = 0
test_utt_mask_count = [0,0]
test_prop_mask_count = [0,0]
test_utt_mask_count = np.array([0,0])
test_prop_mask_count = np.array([0,0])
for test_batch in test_batches:
(actions, test_rewards, steps, alive_masks, entropy_loss_by_agent,
_term_matches_argmax_count, _test_num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws,
_prop_matches_argmax_count, _prop_stochastic_draws,
_test_utt_mask_count, _test_prop_mask_count) = run_episode(
batch=test_batch,
agent_models=agent_models,
batch_size=args.batch_size,
render=True,
testing=True)
test_rewards_sum += test_rewards.sum(0).cpu().numpy()
test_num_policy_runs += _test_num_policy_runs
test_utt_mask_count += _test_utt_mask_count
test_prop_mask_count += _test_prop_mask_count
time_since_last = time.time() - last_print
rewards_str = '%.2f,%.2f,%.2f' % (rewards_sum[0] / count_sum,
rewards_sum[1] / count_sum,
rewards_sum[2] / count_sum)
test_rewards_str = '%.2f,%.2f,%.2f' % (test_rewards_sum[0] / test_count_sum,
test_rewards_sum[1] / test_count_sum,
test_rewards_sum[2] / test_count_sum)
baseline_str = '%.2f,%.2f,%.2f' % (baseline[0], baseline[1], baseline[2])
utt_mask_pct = utt_mask_count / (3 * count_sum)
test_utt_mask_pct = test_utt_mask_count / (3 * test_count_sum)
prop_mask_pct = prop_mask_count / (3 * count_sum)
test_prop_mask_pct = test_prop_mask_count / (3 * test_count_sum)
print('test {}'.format(test_rewards_str))
print('train {}'.format(rewards_str))
print('base {}'.format(baseline_str))
print('ep {}, {} games/sec, {:2.2f} avg steps'.format(
episode,
int(count_sum / time_since_last),
steps_sum.item() / count_sum
))
print('argmaxp term={:4.4f} utt={:4.4f} prop={:4.4f}'.format(
term_matches_argmax_count / num_policy_runs,
safe_div(utt_matches_argmax_count, utt_stochastic_draws),
prop_matches_argmax_count / prop_stochastic_draws
))
if FLAGS.force_masking_comm:
print('utt mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(
*utt_mask_pct, *test_utt_mask_pct,
))
print('prop mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(
*prop_mask_pct, *test_prop_mask_pct,
))
episode_log = {
'episode': episode,
'avg_reward_A': (rewards_sum[0] / count_sum).item(),
'avg_reward_B': (rewards_sum[1] / count_sum).item(),
'avg_reward_0': (rewards_sum[2] / count_sum).item(),
'test_reward_A': (test_rewards_sum[0] / test_count_sum).item(),
'test_reward_B': (test_rewards_sum[1] / test_count_sum).item(),
'test_reward': (test_rewards_sum[2] / test_count_sum).item(),
'avg_steps': torch.true_divide(steps_sum, count_sum).item(),
'games_sec': (count_sum / time_since_last),
'elapsed': time.time() - start_time,
'argmaxp_term': term_matches_argmax_count / num_policy_runs,
'argmaxp_utt': safe_div(utt_matches_argmax_count, utt_stochastic_draws),
'argmaxp_prop': prop_matches_argmax_count / prop_stochastic_draws,
'utt_unmasked_A': utt_mask_pct[0],
'utt_unmasked_B': utt_mask_pct[1],
'prop_unmasked_A': prop_mask_pct[0],
'prop_unmasked_B': prop_mask_pct[1],
'test_utt_unmasked_A': test_utt_mask_pct[0],
'test_utt_unmasked_B': test_utt_mask_pct[1],
'test_prop_unmasked_A': test_prop_mask_pct[0],
'test_prop_unmasked_B': test_prop_mask_pct[1],
}
f_log.write(json.dumps(episode_log) + '\n')
f_log.flush()
if args.wandb:
wandb.log(episode_log)
last_print = time.time()
steps_sum = 0
rewards_sum.fill_(0)
term_matches_argmax_count = 0
num_policy_runs = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
count_sum = 0
utt_mask_count.fill(0)
prop_mask_count.fill(0)
if (not args.testing
and not args.no_save
and episode > 0
and episode % args.save_every_episode == 0):
save_model(model_file=args.model_file,
agent_models=agent_models,
agent_opts=agent_opts,
start_time=start_time,
episode=episode)
print('saved model')
episode += 1
if (not args.no_save and
not args.testing):
save_model(
model_file=args.model_file,
agent_models=agent_models,
agent_opts=agent_opts,
start_time=start_time,
episode=episode)
print('saved model')
f_log.close()
| 39.565138 | 118 | 0.58554 | [
"MIT"
] | mnoukhov/ecn | src/ecn.py | 21,563 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
!!! This generally needs to be run right after the close of applications for a framework, and passed to product
!!! managers & CCS.
Generate a CSV with per-lot draft statistics for each supplier who registered interest in the framework,
whether or not they made a complete application in the end.
Fields included:
* Supplier ID
* Supplier DM name
* Application / no_application
* The status of their declaration
* The number of services submitted and left in draft per lot
Usage:
scripts/framework-applications/export-framework-applications-at-close.py <framework_slug> <stage> <auth_token>
<output-dir> [-e <exclude_suppliers>]
Example:
scripts/framework-applications/export-framework-applications-at-close.py g-cloud-11 preview myToken path/to/myfolder
-e 123,456,789
"""
import os
import sys
from datetime import datetime
from dmapiclient import DataAPIClient
from docopt import docopt
sys.path.insert(0, '.')
from dmscripts.export_framework_applications_at_close import GenerateFrameworkApplicationsCSV
from dmutils.env_helpers import get_api_endpoint_from_stage
if __name__ == "__main__":
arguments = docopt(__doc__)
output_dir = arguments['<output-dir>']
stage = arguments['<stage>']
framework_slug = arguments['<framework_slug>']
filename = "{}-how-application-looked-at-close-{}-{}.csv".format(
framework_slug,
stage,
datetime.utcnow().strftime("%Y-%m-%d_%H.%M-")
)
# Create output directory if it doesn't already exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
client = DataAPIClient(
base_url=get_api_endpoint_from_stage(stage),
auth_token=arguments['<auth_token>'],
)
csv_builder = GenerateFrameworkApplicationsCSV(
client=client,
target_framework_slug=framework_slug
)
if arguments.get('<exclude_suppliers>') is not None: # updates the generator with any IDs the user wants excluded
csv_builder.excluded_supplier_ids = [int(n) for n in arguments['<exclude_suppliers>'].split(',')]
csv_builder.populate_output()
with open(os.path.join(output_dir, filename), 'w') as csvfile:
csv_builder.write_csv(outfile=csvfile)
| 33.382353 | 120 | 0.725551 | [
"MIT"
] | Crown-Commercial-Service/digitalmarketplace-scripts | scripts/framework-applications/export-framework-applications-at-close.py | 2,270 | Python |
from docxtpl import DocxTemplate
import csv
import json
import random
#случайный авто
with open('Car_info.txt') as file:
car_rand = []
reader = csv.reader(file)
for row in file:
car_rand.append(row)
report_car = car_rand[random.randint(0, len(car_rand)-1)]
car_info = report_car.split()
#О авто
def get_data (Brand, Model, Fuel_cons, Price):
return {
'Название': Brand,
'Модель': Model,
'Объем': Fuel_cons,
'Цена': Price
}
def from_template(Brand, Model, Fuel_cons, Price, template):
template = DocxTemplate(template)
data = get_data(Brand, Model, Fuel_cons, Price)
template.render(data)
template.save('О_машине.docx')
def report(Brand, Model, Fuel_cons, Price):
template = 'О_машине.docx'
document = from_template(Brand, Model, Fuel_cons, Price, template)
report(car_info[0], car_info[1], car_info[2], car_info[3])
#csv файл
car_list=[]
with open('Авто_инфо.txt', 'r') as file:
for row in file:
inner_list = [x.strip() for x in row.split(',')]
car_list.append(inner_list)
print(car_list)
with open('car.csv', 'w') as file:
writer = csv.writer(file, delimiter = '*')
writer.writerows(car_list)
#json файл
with open('Авто_json.txt', 'w') as f:
json.dump(str(car_info), f) | 30.809524 | 70 | 0.665379 | [
"MIT"
] | Nikolas-01/Lesson_7 | 7.py | 1,369 | Python |
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v2.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class SecurityFilterExclusionFilter(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"name": (str,), # noqa: E501
"query": (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"name": "name", # noqa: E501
"query": "query", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, name, query, *args, **kwargs): # noqa: E501
"""SecurityFilterExclusionFilter - a model defined in OpenAPI
Args:
name (str): Exclusion filter name.
query (str): Exclusion filter query. Logs that match this query are excluded from the security filter.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.query = query
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| 39.994152 | 114 | 0.583126 | [
"Apache-2.0"
] | rchenzheng/datadog-api-client-python | src/datadog_api_client/v2/model/security_filter_exclusion_filter.py | 6,839 | Python |
# -*- coding:utf-8 -*-
"""
"""
import pandas as pd
from pandas.util import hash_pandas_object
from hypernets.tabular.datasets.dsutils import load_bank
from . import if_cuml_ready, is_cuml_installed
if is_cuml_installed:
import cudf
from hypernets.tabular.cuml_ex import CumlToolBox
dd_selector = CumlToolBox.feature_selector_with_drift_detection
@if_cuml_ready
class Test_drift_detection:
def test_shift_score(self):
df = load_bank().head(1000)
df = cudf.from_pandas(df)
selector = dd_selector()
scores = selector._covariate_shift_score(df[:700], df[700:])
print('_covariate_shift_score', scores)
assert scores['id'] >=0.95
def test_feature_selection(self):
df = load_bank()
df = cudf.from_pandas(df)
y = df.pop('y')
p = int(df.shape[0] * 0.8)
X_train = df[:p]
X_test = df[p:]
# = train_test_split(df, train_size=0.7, random_state=9527)
selector = dd_selector(remove_shift_variable=False,
auc_threshold=0.55,
min_features=15,
remove_size=0.2)
remain_features, history, scores = selector.select(X_train, X_test, copy_data=True)
assert len(remain_features) == 15
selector = dd_selector(remove_shift_variable=True,
auc_threshold=0.55,
min_features=15,
remove_size=0.2)
remain_features, history, scores = selector.select(X_train, X_test, copy_data=True)
assert len(remain_features) == 16
def test_drift_detector_split(self):
df = cudf.from_pandas(load_bank())
y = df.pop('y')
X_train, X_test = CumlToolBox.train_test_split(df.copy(), train_size=0.7, shuffle=True, random_state=9527)
dd = dd_selector().get_detector()
dd.fit(X_train, X_test)
assert len(dd.feature_names_) == 17
assert len(dd.feature_importances_) == 17
assert dd.auc_
assert len(dd.estimator_) == 5
proba = dd.predict_proba(df)
assert proba.shape[0] == df.shape[0]
df = cudf.from_pandas(load_bank())
y = df.pop('y')
p = int(df.shape[0] * 0.2)
X_train, X_test, y_train, y_test = dd.train_test_split(df.copy(), y, test_size=0.2)
assert X_train.shape == (df.shape[0] - p, df.shape[1])
assert y_train.shape == (df.shape[0] - p,)
assert X_test.shape == (p, df.shape[1])
assert y_test.shape == (p,)
df['y'] = y
X_train['y'] = y_train
X_test['y'] = y_test
df, X_train, X_test = CumlToolBox.to_local(df, X_train, X_test)
df_split = pd.concat([X_train, X_test])
df_hash = hash_pandas_object(df).sort_values()
splitted_hash = hash_pandas_object(df_split).sort_values()
assert (df_hash == splitted_hash).all()
| 35.590361 | 114 | 0.603927 | [
"Apache-2.0"
] | lyhue1991/Hypernets | hypernets/tests/tabular/tb_cuml/drift_detection_test.py | 2,954 | Python |
"""Abstract class for image transports.
Defines generic functions.
"""
# Copyright (c) 2018 Erling Andersen, Haukeland University Hospital, Bergen, Norway
from abc import ABCMeta, abstractmethod # , abstractproperty
# import imagedata.transports
class NoOtherInstance(Exception):
pass
class AbstractTransport(object, metaclass=ABCMeta):
"""Abstract base class definition for imagedata transport plugins.
Plugins must be a subclass of AbstractPlugin and
must define the attributes set in __init__() and
the following methods:
open() method
isfile() method
walk() method
"""
plugin_type = 'transport'
def __init__(self, name, description, authors, version, url, schemes):
object.__init__(self)
self.__name = name
self.__description = description
self.__authors = authors
self.__version = version
self.__url = url
self.__schemes = schemes
@property
def name(self):
"""Plugin name
Single word string describing the image format.
Typical names: file, dicom, xnat
"""
return self.__name
@property
def description(self):
"""Plugin description
Single line string describing the transport method.
"""
return self.__description
@property
def authors(self):
"""Plugin authors
Multi-line string naming the author(s) of the plugin.
"""
return self.__authors
@property
def version(self):
"""Plugin version
String giving the plugin version.
Version scheme: 1.0.0
"""
return self.__version
@property
def url(self):
"""Plugin URL
URL string to the site of the plugin or the author(s).
"""
return self.__url
@property
def schemes(self):
"""List of transport schemes supported by this plugin.
List of strings.
"""
return self.__schemes
@abstractmethod
def walk(self, top):
"""Generate the file names in a directory tree by walking the tree.
Input:
- top: starting point for walk (str)
Return:
- tuples of (root, dirs, files)
"""
pass
@abstractmethod
def isfile(self, path):
"""Return True if path is an existing regular file.
"""
pass
@abstractmethod
def open(self, path, mode='r'):
"""Extract a member from the archive as a file-like object.
"""
pass
@abstractmethod
def close(self):
"""Close the transport
"""
pass
@abstractmethod
def info(self, path) -> str:
"""Return info describing the object
Args:
path (str): object path
Returns:
description (str): Preferably a one-line string describing the object
"""
pass | 23.52381 | 83 | 0.592105 | [
"MIT"
] | erling6232/imagedata | src/imagedata/transports/abstracttransport.py | 2,964 | Python |
# coding: utf-8
"""
IncQuery Server
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.12.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class TWCRepositoryInfoResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'repository_structure': 'TWCRepositoryStructure',
'last_updated': 'str'
}
attribute_map = {
'repository_structure': 'repositoryStructure',
'last_updated': 'lastUpdated'
}
def __init__(self, repository_structure=None, last_updated=None): # noqa: E501
"""TWCRepositoryInfoResponse - a model defined in OpenAPI""" # noqa: E501
self._repository_structure = None
self._last_updated = None
self.discriminator = None
self.repository_structure = repository_structure
self.last_updated = last_updated
@property
def repository_structure(self):
"""Gets the repository_structure of this TWCRepositoryInfoResponse. # noqa: E501
:return: The repository_structure of this TWCRepositoryInfoResponse. # noqa: E501
:rtype: TWCRepositoryStructure
"""
return self._repository_structure
@repository_structure.setter
def repository_structure(self, repository_structure):
"""Sets the repository_structure of this TWCRepositoryInfoResponse.
:param repository_structure: The repository_structure of this TWCRepositoryInfoResponse. # noqa: E501
:type: TWCRepositoryStructure
"""
if repository_structure is None:
raise ValueError("Invalid value for `repository_structure`, must not be `None`") # noqa: E501
self._repository_structure = repository_structure
@property
def last_updated(self):
"""Gets the last_updated of this TWCRepositoryInfoResponse. # noqa: E501
:return: The last_updated of this TWCRepositoryInfoResponse. # noqa: E501
:rtype: str
"""
return self._last_updated
@last_updated.setter
def last_updated(self, last_updated):
"""Sets the last_updated of this TWCRepositoryInfoResponse.
:param last_updated: The last_updated of this TWCRepositoryInfoResponse. # noqa: E501
:type: str
"""
if last_updated is None:
raise ValueError("Invalid value for `last_updated`, must not be `None`") # noqa: E501
self._last_updated = last_updated
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TWCRepositoryInfoResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.858156 | 124 | 0.619168 | [
"Apache-2.0"
] | thomas-bc/mms-autocref | iqs_client/models/twc_repository_info_response.py | 4,351 | Python |
"""This is a set of tools built up over time for working with Gaussian and
QChem input and output."""
########################################################################
# #
# #
# This script was written by Thomas Heavey in 2017. #
# [email protected] [email protected] #
# #
# Copyright 2017 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
pass
| 61.25 | 74 | 0.348105 | [
"Apache-2.0"
] | theavey/QM-calc-scripts | gautools/__init__.py | 1,715 | Python |
# -*- coding: utf-8 -*-
from datetime import date, time
import pytest
from django.contrib.admin import site as admin_site
from resources.admin.period_inline import PeriodModelForm, prefix_weekday
from resources.models import Period, Resource
from resources.models.unit import Unit
from resources.tests.utils import assert_response_contains, get_form_data
@pytest.mark.django_db
@pytest.mark.parametrize("commit", (False, True))
def test_period_model_form(space_resource, commit):
period = Period(resource=space_resource, start=date(2015, 8, 1), end=date(2015, 11, 1), name="plop")
period.full_clean()
period.save()
for wd in range(7):
period.days.create(weekday=wd, opens=time(9, wd * 2), closes=time(12 + wd))
pmf = PeriodModelForm(instance=period)
data = get_form_data(pmf, prepared=True)
# Make every day open at 06, set closed on wednesdays
for key in list(data.keys()):
if key.startswith(prefix_weekday(2, "")):
data[key] = ""
elif key.endswith("opens"):
data[key] = "06:00"
pmf = PeriodModelForm(instance=period, data=data)
assert pmf.is_valid()
period = pmf.save(commit=commit)
if not commit:
period.save()
pmf.save_m2m()
assert all(day.opens.hour == 6 for day in period.days.all())
assert not period.days.filter(weekday=2).exists() # Weekdays _got_ closed, yeah?
@pytest.mark.django_db
@pytest.mark.parametrize("model", (Resource, Unit))
def test_period_inline_containing_admins_work(rf, admin_user, model, space_resource, test_unit):
if model is Resource:
instance = space_resource
elif model is Unit:
instance = test_unit
else:
raise NotImplementedError("Unexpected parametrization")
admin = admin_site._registry[model] # Sorry for accessing a private member :(
request = rf.get("/")
request.user = admin_user
response = admin.change_view(request, instance.pk)
assert_response_contains(response, prefix_weekday(2, "opens")) # should have a weekday field
| 36.642857 | 104 | 0.703704 | [
"MIT"
] | City-of-Helsinki/respa | resources/tests/test_admin_period_inline.py | 2,052 | Python |
import os
import layout
import callbacks # layout needs to be defined before creating callbacks
import routes
import appserver
server = appserver.app.server
if __name__ == "__main__":
debug_mode = True if os.getenv("DEBUG", "false") == "true" else False
if debug_mode is True:
print(f"Initiating server. Debug mode enabled.")
# appserver.app.enable_dev_tools(debug=True)
else:
print(f"Initiating server.")
appserver.app.run_server(
debug=debug_mode,
host="0.0.0.0",
port=5000
) | 27.4 | 73 | 0.671533 | [
"MIT"
] | budavariam/activity-visualizer | src/app.py | 548 | Python |
# Visualizer is for debugging purposes only
import logging
import math
import random
import threading
import http.server
import socketserver
import os
import re
from shapely import wkt
import matplotlib.pyplot as plt
import mpld3
import screeninfo
import tempfile
import webbrowser
import owlready2
from shapely import geometry
import numpy as np
from tqdm import tqdm
import time as pytime
import auto.auto
from criticality_recognition import phenomena_extraction
# TODO
# - visualize scenario level CPs
# - show has distance to in table for each individual - as ternary relations - instead of omitting it
####################
# Config constants #
####################
# Classes to not show in visualization
_NO_PRINTING_CLASSES = {"physics.Has_Distance_To", "perception.Is_Full_Occlusion", "perception.Is_Occlusion"}
# Data/object properties to hide from the individual tables shown when hovering
_NO_PRINTING_PROPERTIES = {"perceptional_property", "traffic_related_concept_property",
"descriptive_traffic_entity_property", "traffic_entity_property", "activity_property",
"physical_property", "traffic_modeling_property", "traffic_entity_property",
"automotive_urban_traffic_property", "L1_property", "L2_property", "L3_property",
"L4_property", "L5_property", "L6_property", "traffic_model_element_property",
"criticality_phenomenon_as_object_property", "has_positional_relation",
"has_spatial_relation", "has_dynamical_relation", "SF_spatial_relation",
"performance_spatial_relation", "EH_spatial_relation", "RCC8_spatial_relation", "rcc8dc",
"ehDisjoint"}
# If one hides long property lists, this is the number after which the list is cut off
_MAX_PROPS_DISPLAY = 4
_AVOID_LABEL_COLLISIONS = False
# Logging
logger = logging.getLogger(__name__)
# Helper function for sorting CPs & individuals
def natural_sort_key(s, _nsre=re.compile("([0-9]+)")):
return [int(text) if text.isdigit() else text.lower() for text in _nsre.split(str(s))]
#######
# CSS #
#######
# Scene CSS (added is iframes to scenario HTML)
scene_css = """
<style>
svg * {
font-size: 4pt;
}
table {
border: solid 1px #DDEEEE;
border-collapse: collapse;
border-spacing: 0;
font: normal 8px, sans-serif;
}
thead th {
background-color: #DDEFEF;
border: solid 1px #DDEEEE;
color: #336B6B;
padding: 3px;
text-align: left;
text-shadow: 1px 1px 1px #fff;
font-size: 10pt;
}
tbody td {
background-color: #FFFFFF;
border: solid 1px #DDEEEE;
color: #333;
padding: 3px;
text-shadow: 1px 1px 1px #fff;
font-size: 8pt;
}
.cp-tooltip {}
</style>
"""
# Scenario CSS (main CSS)
scenario_css = """
<style>
.slider {
-webkit-appearance: none; /* Override default CSS styles */
appearance: none;
width: 100%; /* Full-width */
height: 25px; /* Specified height */
background: #d3d3d3; /* Grey background */
outline: none; /* Remove outline */
opacity: 0.7; /* Set transparency (for mouse-over effects on hover) */
-webkit-transition: .2s; /* 0.2 seconds transition on hover */
transition: opacity .2s;
}
.slider:hover {
opacity: 1; /* Fully shown on mouse-over */
}
.slider::-webkit-slider-thumb {
-webkit-appearance: none; /* Override default look */
appearance: none;
width: 25px; /* Set a specific slider handle width */
height: 25px; /* Slider handle height */
background: #04AA6D; /* Green background */
cursor: pointer; /* Cursor on hover */
}
.slider::-moz-range-thumb {
width: 25px; /* Set a specific slider handle width */
height: 25px; /* Slider handle height */
background: #04AA6D; /* Green background */
cursor: pointer; /* Cursor on hover */
}
</style>"""
def visualize_scenario(scenario, cps=None):
"""
Creates an HTML visualization of the given scenario. Starts a simple web server at localhost:8000 (blocking).
:param scenario: Either a list of worlds, each world representing a single scene or a single world representing a
whole scenario
:param cps: A list of criticality phenomena which optionally to visualize as well.
:return: The path to the directory in which to find the created HTML visualization.
"""
pl_html = []
scenario_inst = None
if cps is None:
cps = []
# Fetch scene list
if type(scenario) == list:
scenes = [scene_world.search(type=auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scene_world).Scene)
[0] for scene_world in scenario]
elif type(scenario) == owlready2.namespace.World or type(scenario) == owlready2.World:
tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scenario)
scenario_inst = scenario.search(type=tm.Scenario)[0]
scenes = list(filter(lambda x: tm.Scene in x.is_a, scenario_inst.has_traffic_model))
else:
raise ValueError
scenes = sorted(scenes, key=lambda x: x.inTimePosition[0].numericPosition[0])
# Assemble scenario title
title = "Scenario"
if scenario_inst and hasattr(scenario_inst, "identifier") and len(scenario_inst.identifier) > 0:
title += " " + str(scenario_inst.identifier[0])
scenario_info = "(" + str(len(scenes)) + " Scenes)"
# Main HTML code for index.html
html_body = """<!DOCTYPE html>
<html>
<head>
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<meta charset="utf-8">""" + scenario_css + """
<title>""" + title + """</title>
</head>
<body>
<div class=\"d-flex flex-row justify-content-center\"><div class=\"mt-3 py-1 px-6 alert alert-info\" style=\"display: inline-block\" role=\"alert\"><center><h5>""" + title + """ """ + scenario_info + """</h5></center></div></div>
<div class="slidecontainer m-2">
<input type="range" min="1" max=\"""" + str(len(scenes)) + """\" value="1" class="slider" id="myRange">
</div>
<script>
var slider = document.getElementById("myRange");
var last_set = 1
var show_all_cps = true
slider.oninput = function() {
var output = document.getElementById("plt" + this.value);
var last_output = document.getElementById("plt" + last_set);
last_output.style.display = 'none';
output.style.display = 'block';
last_set = this.value
}
function toggle_cps_all_iframes() {
show_all_cps = !show_all_cps
$(".cp-all-button").each(function(i) {
if (show_all_cps) {
this.parentElement.classList.add("active")
this.checked = true
} else {
this.parentElement.classList.remove("active")
this.checked = false
}
})
$(".cp-button").each(function(i) {
if (show_all_cps) {
this.parentElement.classList.add("active")
this.checked = true
} else {
this.parentElement.classList.remove("active")
this.checked = false
}
})
$(".scene-plot").each(function(i) {
this.contentWindow.toggle_cps(show_all_cps)
})
}
function toggle_cp_class(ele, cp_cls_id) {
// 0. disable automatically checked checkbox (will be added again at step 3)
ele.checked = !ele.checked
// 1. find active scene plot
active_scene = $(".scene-plot-container").filter(function(i) {
return this.style.display !== "none"
})[0]
// 2. get CP pred. str for given cp_cls_id
cp_pred = active_scene.getElementsByClassName("scene-plot")[0].contentWindow.cp_predicates[cp_cls_id]
// 3. Toggle all buttons for this CP pred
$("label > span:contains(" + cp_pred + ")").each(function(i) {
this.parentElement.classList.toggle("active")
this.parentElement.querySelector(".cp-button").checked = !this.parentElement.querySelector(".cp-button").checked
})
// 4. check if (and where) CP pred. str is present in cp_predicates, pass the resulting index
$(".scene-plot").each(function(k) {
cp_cls_id_scene = -1
for (var i = 0; i < this.contentWindow.cp_predicates.length; i++) {
if (cp_pred === this.contentWindow.cp_predicates[i]) {
cp_cls_id_scene = i
}
}
if (cp_cls_id_scene >= 0) {
this.contentWindow.toggle_cp_class(cp_cls_id_scene, ele.checked)
}
})
}
</script>
"""
pl_html.append(html_body)
iframes = []
def get_color(p):
# Fetches a different color each time, but ensures that it has a readable contrast.
_LUMA_LIMIT = 170
color = 0
luma = _LUMA_LIMIT
while luma >= _LUMA_LIMIT:
color = random.randrange(0, 0xFFFFFF, 0xF)
luma = 0.2126 * ((color >> 16) & 0xff) + 0.7152 * ((color >> 8) & 0xff) + 0.0722 * ((color >> 0) & 0xff)
return "#" + "%06x" % color
# Create HTML for each scene
for i, scene in enumerate(scenes):
logger.info("Plotting scene " + str(i + 1) + " / " + str(len(scenes)))
scene_cps = [cp for cp in cps if cp.is_representable_in_scene(scene)]
cp_colors = list(map(get_color, range(len([x for c in scene_cps for x in c.subjects]))))
cp_color = 0
no_geo_entities = []
width = 24.5
height = 10
try:
primary_screens = list(filter(lambda x: x.is_primary, screeninfo.get_monitors()))
if len(primary_screens) > 0:
width = (primary_screens[0].width_mm / 25.4) * 0.73
height = (primary_screens[0].height_mm / 25.4) * 0.73
except screeninfo.common.ScreenInfoError:
logger.info("No screens found, using default plot size of " + str(width) + " in x " + str(height) + " in")
fig = plt.figure(figsize=(width, height))
plt.axis("equal")
entity_labels = []
entity_relations = []
relations_per_cp_class = dict()
cps_relations = []
cps_for_tooltips = []
centroids_x = []
centroids_y = []
plotted_labels = []
entity_points = dict()
traffic_entities = tqdm(scene.has_traffic_entity)
for entity in traffic_entities:
traffic_entities.set_description(str(entity))
if len(entity.hasGeometry) > 0:
for geo in entity.hasGeometry:
shape = wkt.loads(geo.asWKT[0])
entity_cp_relations = []
points = None
if hasattr(shape, "exterior"):
points = shape.exterior.xy
try:
hasattr(shape, "coords")
points = shape.coords.xy
except NotImplementedError:
pass
if points:
if (np.isclose(centroids_x, shape.centroid.x) & np.isclose(centroids_y, shape.centroid.y))\
.any():
x = shape.centroid.x + 0.0
y = shape.centroid.y + 0.8
plt.plot((shape.centroid.x, x), (shape.centroid.y, y), "k-")
else:
x = shape.centroid.x
y = shape.centroid.y
entity_points[entity] = (x, y)
centroids_x.append(x)
centroids_y.append(y)
plt.plot(*points, alpha=.6)
if auto.auto.get_ontology(auto.auto.Ontology.Physics, scenario).Dynamical_Object in \
entity.INDIRECT_is_a:
plt.fill(*points, alpha=.3)
if entity.has_yaw is not None:
x_dir = (0.9 * math.cos(math.radians(entity.has_yaw)))
y_dir = (0.9 * math.sin(math.radians(entity.has_yaw)))
plt.arrow(shape.centroid.x, shape.centroid.y, dx=x_dir, dy=y_dir, shape="full",
length_includes_head=True, color="gray", alpha=0.6, head_width=1)
entity_labels.append(_describe_entity(entity))
# Plot CPs
entity_scene_cps = list(filter(lambda scp: entity in scp.subjects, scene_cps))
if len(entity_scene_cps) > 0:
plt.plot(x, y, "o", color="r", mec="k", markersize=3, alpha=1)
ent_color = "red"
else:
ent_color = "black"
if entity.identifier and len(entity.identifier) > 0 and not entity.is_persistent and not \
(isinstance(entity.identifier[0], str) and entity.identifier[0].startswith("repr")):
plt.annotate(entity.identifier[0], (x+0.2, y+0.2), color=ent_color)
already_drawn_cps = []
# init dict
for cp in entity_scene_cps:
if cp.predicate not in relations_per_cp_class.keys():
relations_per_cp_class[cp.predicate] = []
for cp in entity_scene_cps:
if cp not in already_drawn_cps:
same_line_cps = [x for x in entity_scene_cps if
[y for z in x.objects.values() for y in z] ==
[y for z in cp.objects.values() for y in z]]
labels = [(x.predicate.split("(")[0],
(x.predicate.split("(")[1].replace(")", ""), str(x)))
for x in same_line_cps]
already_drawn_cps += same_line_cps
subj_x = x
subj_y = y
for objs in cp.objects.values():
for obj in objs:
if len(obj.hasGeometry) > 0:
if obj in entity_points.keys():
obj_x = entity_points[obj][0]
obj_y = entity_points[obj][1]
else:
geom_o = wkt.loads(obj.hasGeometry[0].asWKT[0])
obj_x = geom_o.centroid.x
obj_y = geom_o.centroid.y
m = (obj_y - subj_y) / (obj_x - subj_x)
b = subj_y - m * subj_x
head_width = 0.2
head_length = 1.5 * head_width
arrow = plt.arrow(subj_x, subj_y, dx=(obj_x - subj_x), dy=(obj_y - subj_y),
color=cp_colors[cp_color], shape="full",
length_includes_head=True, head_width=head_width,
head_length=head_length)
if len(labels[0]) > 1:
label_row = " ".join([label[0] for label in labels])
else:
label_row = labels[0]
x_offset = (len(label_row) * 0.055) / 2 - 0.055
if subj_x > obj_x:
label_x = obj_x + abs(subj_x - obj_x) / 2 - x_offset
else:
label_x = obj_x - abs(subj_x - obj_x) / 2 - x_offset
a = math.degrees(math.atan(m))
for l_i, label in enumerate(labels):
label_string = label[0].replace("CP_", "")
label_len = (len(label_string) * 0.09 + 0.1)
label_x_offset = abs(math.cos(math.atan(m)) * label_len)
while True:
# Finds a free space to plot label
label_y = m * label_x + b + 0.05
label_x_1 = label_x - label_x_offset / 2 + 0.05
label_y_1 = m * label_x_1 + b
label_x_2 = label_x + label_x_offset / 2 + 0.05
label_y_2 = m * label_x_2 + b
label_line1 = geometry.LineString([(label_x_1, label_y_1),
(label_x_2, label_y_2)])
new_bb = label_line1.buffer(0.1, cap_style=2)
new_bb_rect = list(zip(*new_bb.exterior.xy))[:-1]
if not _AVOID_LABEL_COLLISIONS or not \
_has_collision_with_bbs(plotted_labels, new_bb_rect):
break
label_x += label_x_offset / 10
annot = plt.annotate(label_string,
(label_x, label_y), color=cp_colors[cp_color],
rotation=a, fontsize=2, rotation_mode="anchor")
entity_cp_relations.append(annot)
cps_relations.append(annot)
relations_per_cp_class[same_line_cps[l_i].predicate] += [annot, arrow]
cps_for_tooltips.append(same_line_cps[l_i])
plotted_labels.append(new_bb_rect)
label_x += label_x_offset
subj_x = obj_x
subj_y = obj_y
entity_cp_relations += [arrow]
cp_color = (cp_color + 1) % len(cp_colors)
entity_relations.append(entity_cp_relations)
elif len(set([str(y) for y in entity.INDIRECT_is_a]).intersection(_NO_PRINTING_CLASSES)) == 0:
no_geo_entities.append(_describe_entity(entity))
logger.info("Done with layout, creating MPLD3 plot, JS plugins, and HTML string")
pl2 = plt.plot(centroids_x, centroids_y, "o", color="b", mec="k", markersize=2, mew=1, alpha=.4)
tooltip_individuals = ToolTipAndClickInfo(pl2[0], labels=entity_labels, targets=entity_relations,
targets_per_cp=relations_per_cp_class)
fig.tight_layout()
mpld3.plugins.connect(fig, tooltip_individuals)
for h, cp_text in enumerate(cps_relations):
tooltip_cp = CPTooltip(cp_text, cps_for_tooltips[h])
mpld3.plugins.connect(fig, tooltip_cp)
html = "\n\t\t<div class=\"container-fluid scene-plot-container\" id=\"plt" + str(i + 1) + "\" style =\""
if i != 0:
html += "display: none;"
html += "\">"
html += """
<div class="row">
<div class="col-md-1">
"""
cp_count_total = len([x for x in cps if (isinstance(x.traffic_model, list) and scene in x.traffic_model) or
x.traffic_model == scenario_inst])
html += """<div class="">
<label class="btn btn-primary active" style="margin-bottom: 10px; width: %s">
<input type="checkbox" class="cp-all-button" id="cp-all-button-%s" autocomplete="off" onclick="toggle_cps_all_iframes();" checked>
<span>Show all criticality phenomena (%s)</span>
</label>""" % ("100%", str(i), str(cp_count_total))
for l, pred in enumerate(sorted(relations_per_cp_class.keys(), key=natural_sort_key)):
cp_count = len([x for x in cps if x.predicate == pred and ((isinstance(x.traffic_model, list) and
scene in x.traffic_model) or x.traffic_model == scenario_inst)])
html += """
<br />
<label class="btn btn-secondary active" style="margin-bottom: 5px; width: %s">
<input type="checkbox" class="cp-button" id="cp-button-%s-%s" autocomplete="off" onclick="toggle_cp_class(this, %s);" checked>
<span>%s (%s)</span>
</label>""" % ("100%", str(i), str(l), str(l), pred, str(cp_count))
html += """
</div>
</div>
<div class="col-md-11">
"""
html += "<div class=\"embed-responsive embed-responsive-16by9\">\n"
html += "\t\t\t\t\t\t<iframe class=\"scene-plot\" src=\"scene" + str(i + 1) + ".html\" class=\"embed-responsive-item\" style=\"width: 100%; height: " + str(height*1.27) + "in\" allowfullscreen></iframe>\n\t\t\t\t\t</div>\n"
iframe_html = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta HTTP-EQUIV="Access-Control-Allow-Origin" CONTENT="localhost">
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
</head>
<body>"""
iframe_html += scene_css
iframe_html += """
<div class="d-flex flex-row justify-content-center">
<div class="btn-group btn-group-toggle" data-bs-toggle="buttons">
<label class="btn btn-secondary active">
<input type="checkbox" id="tooltip_button" checked autocomplete="off" onclick="toggle_tooltips(this);"> Show tooltip with information of individuals
</label>
<label class="btn btn-secondary active">
<input type="checkbox" id="descr_button" checked autocomplete="off" onclick="toggle_all_ind_relations(this);"> Show full individual relations in tooltip
</label>
</div>
</div>
<script>
var show_tooltips = true
var show_long_ind = true
cps = []
cp_targets = []
cp_targets_per_class = []
function toggle_tooltips(ele) {
ele.parentElement.classList.toggle("active")
show_tooltips = !show_tooltips
}
function toggle_all_ind_relations(ele) {
ele.parentElement.classList.toggle("active")
show_long_ind = !show_long_ind
}
function toggle_cp_targets(targets, state) {
for (let j = 0; j < targets.length; j++) {
var x = mpld3.get_element(targets[j])
if (x) {
if ("path" in x) {
tog = x.path
} else if ("obj" in x) {
tog = x.obj
}
for (var k = 0; k < tog._groups.length; k++) {
for (var l = 0; l < tog._groups[k].length; l++){
if (state) {
tog._groups[k][l].style.display = "block"
} else {
tog._groups[k][l].style.display = "none"
}
}
}
}
}
}
function toggle_cps(state) {
for (let i = 0; i < cp_targets.length; i++) {
toggle_cp_targets(cp_targets[i], state)
}
}
function toggle_cp_class(cp_class, state) {
targets = cp_targets_per_class[cp_class]
toggle_cp_targets(targets, state)
}
</script>
<div class="card m-2">
<div class="card-title d-flex flex-row justify-content-center m-1">
<h5>"""
if len(scene.inTimePosition) > 0 and len(scene.inTimePosition[0].numericPosition) > 0:
time = "%.2f s" % scene.inTimePosition[0].numericPosition[0]
if scenario_inst and len(scenario_inst.hasEnd) > 0 and len(scenario_inst.hasEnd[0].inTimePosition) > 0 and \
len(scenario_inst.hasEnd[0].inTimePosition[0].numericPosition) > 0:
time += " / %.2f s" % scenario_inst.hasEnd[0].inTimePosition[0].numericPosition[0]
else:
time += " / " + str(len(scenes))
else:
time = str(i) + " / " + str(len(scenes))
iframe_html += "Scene " + time + "<br />"
iframe_html += """
</h5>
</div>
<div class="card-body m-0 p-0 d-flex justify-content-center">
"""
scene_html = mpld3.fig_to_html(fig)
iframe_html += ''.join("\t\t"+line+"\n" for line in scene_html.splitlines())
iframe_html += """
</div>
</div>"""
if len(no_geo_entities) > 0:
iframe_html += """
<div class="d-flex flex-row justify-content-center">
<a class="btn btn-primary" data-bs-toggle="collapse" href="#noGeoCollapse" role="button" aria-expanded="false" aria-controls="noGeoCollapse">
Show scene individuals with no geometric representation (%s)
</a>
</div>
<div class="container-fluid collapse" id="noGeoCollapse">
<div class="card card-body m-2">""" % str(len(no_geo_entities))
iframe_html += "".join(no_geo_entities)
iframe_html += """
</div>
</div>"""
iframe_html += "\t</body>\n</html>"
iframes.append(iframe_html)
html += "\t\t\t\t</div>\n\t\t\t</div>\n\t\t</div>"
pl_html.append(html)
# Assemble main HTML
pl_html.append("\n\t</body>\n</html>")
# Write main HTML to index.html
tmp_dir = tempfile.mkdtemp()
index_path = tmp_dir + "/index.html"
with open(index_path, "w") as file:
for html in pl_html:
file.write(html)
# Write each scene HTML to a single file
for i, iframe in enumerate(iframes):
frame_path = tmp_dir + "/scene" + str(i + 1) + ".html"
with open(frame_path, "w") as file:
for html in iframe:
file.write(html)
# Starts webserver
os.chdir(tmp_dir)
threading.Thread(target=socketserver.TCPServer(("", 8000),
http.server.SimpleHTTPRequestHandler).serve_forever).start()
logger.info("Visualization is available at: http://localhost:8000")
webbrowser.open("http://localhost:8000")
return tmp_dir
def _describe_entity(entity):
"""
Describes the given traffic entity as an HTML list.
:param entity: An object of an owlready2 class.
:return: The HTML-representation of entity.
"""
cls = phenomena_extraction.get_most_specific_classes([entity])
label = "<table class=\"m-2\"><thead><tr><th>Individual</th><th>" + str(entity)
label += " (" + ", ".join(cls[0][1]) + ")</th></tr></thead><tbody><tr><td>is_a</td><td>"
label += ", ".join([str(x) for x in entity.is_a])
label += "</td></tr>"
for prop in entity.get_properties():
if str(prop.python_name) not in _NO_PRINTING_PROPERTIES:
label += "<tr>"
label += "<td>"
label += str(prop.python_name)
label += "</td>"
label += "<td>"
label += ", ".join([str(x) for x in prop[entity][:_MAX_PROPS_DISPLAY]])
if len(prop[entity]) > _MAX_PROPS_DISPLAY:
label += "<text class=\"extended_ind_props\">"
label += ", ".join([str(x) for x in prop[entity][_MAX_PROPS_DISPLAY:]]) + "</text>"
label += "<text class=\"extended_ind_props_dots\" style=\"display: none;\">...</text>"
label += "</td>"
label += "</tr>"
label += "</tbody></table>"
return label
def _describe_cp(cp):
label = "<table class=\"m-2\"><thead><tr><th>Criticality Phenomenon</th><th>" + \
str(cp.predicate).split("(")[1].replace(")", "")
label += "</th></tr></thead><tbody><tr><td>Start time</td><td>"
time = cp.at_time()
if isinstance(time, tuple):
label += str(time[0])
else:
label += str(time)
label += "</td></tr><tr><td>End time</td><td>"
if isinstance(time, tuple):
label += str(time[1])
else:
label += str(time)
label += "</td></tr><tr><td>Subject(s)</td><td>"
if len(cp.subjects) > 0:
subj_and_classes = phenomena_extraction.get_most_specific_classes(cp.subjects)
label += "<br />".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in subj_and_classes])
label += "</td></tr><tr><td>Predicate</td><td>"
label += str(cp.predicate)
label += "</td></tr><tr><td>Object(s)</td><td>"
if len(cp.objects) > 0:
for obj_predicate in cp.objects.keys():
obj_and_classes = phenomena_extraction.get_most_specific_classes(cp.objects[obj_predicate])
label += obj_predicate + ":<br/>" + "<br />".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in
obj_and_classes])
if len(cp.objects.keys()) > 1:
label += "<br/>"
label += "</td></tr>"
label += "</tbody></table>"
return label
#################
# MPLD3 Plugins #
#################
class ToolTipAndClickInfo(mpld3.plugins.PointHTMLTooltip):
# Handles:
# 1. the criticality phenomena toggling when clicking on CP subjects (red circles)
# 2. the mouse-overs when hovering over subjects
# 3. the Ctrl+Click new window action when clicking on subjects
JAVASCRIPT = """
var scene_css = `""" + scene_css + """`
mpld3.register_plugin("htmltooltip", HtmlTooltipPlugin);
HtmlTooltipPlugin.prototype = Object.create(mpld3.Plugin.prototype);
HtmlTooltipPlugin.prototype.constructor = HtmlTooltipPlugin;
HtmlTooltipPlugin.prototype.requiredProps = ["id"];
HtmlTooltipPlugin.prototype.defaultProps = {labels:null,
targets_per_cp:null,
cps:null,
hoffset:0,
voffset:10,
targets:null};
function HtmlTooltipPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
HtmlTooltipPlugin.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id)
var labels = this.props.labels
cps = obj.elements()
cp_targets = this.props.targets
cp_targets_per_class = this.props.targets_per_cp
cp_predicates = this.props.cps
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
function show_cp(d, i) {
if (!window.event.ctrlKey) {
for (let j = 0; j < cp_targets[i].length; j++) {
var x = mpld3.get_element(cp_targets[i][j]);
if (x) {
if ("path" in x) {
tog = x.path
} else if ("obj" in x) {
tog = x.obj
}
for (var k = 0; k < tog._groups.length; k++){
for (var l = 0; l < tog._groups[k].length; l++){
if (tog._groups[k][l].style.display === "none"){
tog._groups[k][l].style.display = "block"
} else {
tog._groups[k][l].style.display = "none"
}
}
}
}
}
}
}
obj.elements()
.on("mouseover", function(d, i) {
if (show_tooltips) {
tooltip.html(labels[i]).style("visibility", "visible");
var long_descrs = document.getElementsByClassName("extended_ind_props")
var dots_descrs = document.getElementsByClassName("extended_ind_props_dots")
for (let i = 0; i < long_descrs.length; i++) {
if(!show_long_ind) {
long_descrs[i].style.display = "none";
} else {
long_descrs[i].style.display = "inline";
}
}
for (let i = 0; i < dots_descrs.length; i++) {
if(!show_long_ind) {
dots_descrs[i].style.display = "inline";
} else {
dots_descrs[i].style.display = "none";
}
}
}
})
.on("mousemove", function(d, i) {
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mousedown.callout", show_cp)
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");
})
.on("click", function(d, i) {
if (window.event.ctrlKey) {
var newWindow = window.open();
newWindow.document.write(
`<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">` + scene_css + tooltip.html(labels[i])._groups[0][0].innerHTML
);
}
});
};
"""
def __init__(self, points, labels=None, targets=None, targets_per_cp=None, hoffset=0, voffset=10, css=None):
targets_ = []
for x in targets or []:
x_ = []
for y in x:
x_.append(mpld3.utils.get_id(y))
targets_.append(x_)
self.targets_per_cp = []
self.cps = []
if targets_per_cp:
self.cps = sorted(targets_per_cp.keys(), key=natural_sort_key)
for cp in self.cps:
x_ = []
for y in targets_per_cp[cp]:
x_.append(mpld3.utils.get_id(y))
self.targets_per_cp.append(x_)
super().__init__(points, labels, targets_, hoffset, voffset, css)
self.dict_["targets_per_cp"] = self.targets_per_cp
self.dict_["cps"] = self.cps
class CPTooltip(mpld3.plugins.PluginBase):
# Handles the Ctrl+Click action on criticality phenomena ID (opens a new tab).
JAVASCRIPT = """
var scene_css = `""" + scene_css + """`
mpld3.register_plugin("cpstooltip", CPTooltip);
CPTooltip.prototype = Object.create(mpld3.Plugin.prototype);
CPTooltip.prototype.constructor = CPTooltip;
CPTooltip.prototype.requiredProps = ["id", "tooltip_html"];
function CPTooltip(fig, props){
mpld3.Plugin.call(this, fig, props);
};
CPTooltip.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var tooltip_html = this.props.tooltip_html;
var tooltip = d3.select("body").append("div")
.attr("class", "cp-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.obj._groups[0][0].onmouseover = function(d, i) {
tooltip.html(tooltip_html).style("visibility", "visible");
};
obj.obj._groups[0][0].onmousemove = function(d, i) {
tooltip
.style("top", d.clientY + 10 + "px")
.style("left", d.clientX + 0 + "px");
}.bind(this);
obj.obj._groups[0][0].onclick = function(d, i) {
if (window.event.ctrlKey) {
var newWindow = window.open();
newWindow.document.write(
`<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">` + scene_css + tooltip_html
);
}
};
obj.obj._groups[0][0].onmouseout = function(d, i) {
tooltip.style("visibility", "hidden");
};
}
"""
def __init__(self, text, cp):
tooltip_html = _describe_cp(cp)
self.dict_ = {"type": "cpstooltip",
"id": mpld3.utils.get_id(text),
"tooltip_html": tooltip_html}
def _has_collision_with_bbs(existing_bbs, new_bb):
"""
Checks if the new rectangle (new_bb) collides with some existing rectangles.
"""
a_left = min([x[0] for x in new_bb])
a_right = max([x[0] for x in new_bb])
a_bottom = min([x[1] for x in new_bb])
a_top = max([x[1] for x in new_bb])
for bb in existing_bbs:
b_left = min([x[0] for x in bb])
b_right = max([x[0] for x in bb])
b_bottom = min([x[1] for x in bb])
b_top = max([x[1] for x in bb])
if a_left <= b_right and b_left <= a_right and a_top >= b_bottom and b_top >= a_bottom:
return True
return False
| 49.091995 | 298 | 0.497785 | [
"MIT"
] | lu-w/criticality-recognition | auto/auto_visualizer/auto_visualizer.py | 41,090 | Python |
import pytest
from cool_search import BaseClass, base_function
given = pytest.mark.parametrize
@given("fn", [BaseClass(), base_function])
def test_parameterized(fn):
assert "hello from" in fn()
def test_base_function():
assert base_function() == "hello from base function"
def test_base_class():
assert BaseClass().base_method() == "hello from BaseClass"
| 19.736842 | 62 | 0.733333 | [
"Unlicense"
] | khulaifi95/cool-search | tests/test_base.py | 375 | Python |
# coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import sib_api_v3_sdk
from sib_api_v3_sdk.models.get_extended_contact_details_statistics import GetExtendedContactDetailsStatistics # noqa: E501
from sib_api_v3_sdk.rest import ApiException
class TestGetExtendedContactDetailsStatistics(unittest.TestCase):
"""GetExtendedContactDetailsStatistics unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetExtendedContactDetailsStatistics(self):
"""Test GetExtendedContactDetailsStatistics"""
# FIXME: construct object with mandatory attributes with example values
# model = sib_api_v3_sdk.models.get_extended_contact_details_statistics.GetExtendedContactDetailsStatistics() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 44.926829 | 820 | 0.704126 | [
"MIT"
] | Danilka/APIv3-python-library | test/test_get_extended_contact_details_statistics.py | 1,842 | Python |
from rest_framework import generics
from ..models import Article, Country, Source
from .serializers import ArticleSerializer, CountrySerializer, SourceSerializer
class ArticleListView(generics.ListCreateAPIView):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
permission_classes = []
class ArticleDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
permission_classes = []
class CountryListView(generics.ListCreateAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
permission_classes = []
class CountryDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
permission_classes = []
class SourceListView(generics.ListCreateAPIView):
queryset = Source.objects.all()
serializer_class = SourceSerializer
permission_classes = []
class SourceDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = Source.objects.all()
serializer_class = SourceSerializer
permission_classes = []
| 28.725 | 79 | 0.781549 | [
"MIT"
] | XOyarz/polstats-django | main_app/api/views.py | 1,149 | Python |
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class Proj2062SpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class Proj2062DownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 35.096154 | 78 | 0.674521 | [
"MIT"
] | miccaldas/new_rss | support_files/scraping/entries/proj_2062/proj_2062/middlewares.py | 3,652 | Python |
import functools as ft
import inspect
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
import pydantic
from . import base
class PydanticValidator(base.BaseValidator):
"""
Parameters validator based on `pydantic <https://pydantic-docs.helpmanual.io/>`_ library.
Uses python type annotations for parameters validation.
:param coerce: if ``True`` returns converted (coerced) parameters according to parameter type annotation
otherwise returns parameters as is
"""
def __init__(self, coerce: bool = True, **config_args: Any):
self._coerce = coerce
config_args.setdefault('extra', 'forbid')
# https://pydantic-docs.helpmanual.io/usage/model_config/
self._model_config = type('ModelConfig', (pydantic.BaseConfig,), config_args)
def validate_method(
self, method: Callable, params: Optional[Union[list, dict]], exclude: Iterable[str] = (), **kwargs: Any,
) -> Dict[str, Any]:
"""
Validates params against method using ``pydantic`` validator.
:param method: method to validate parameters against
:param params: parameters to be validated
:param exclude: parameter names to be excluded from validation
:returns: coerced parameters if `coerce` flag is ``True`` otherwise parameters as is
:raises: ValidationError
"""
signature = self.signature(method, exclude)
schema = self.build_validation_schema(signature)
params_model = pydantic.create_model(method.__name__, **schema, __config__=self._model_config)
bound_params = self.bind(signature, params)
try:
obj = params_model(**bound_params.arguments)
except pydantic.ValidationError as e:
raise base.ValidationError(*e.errors()) from e
return {attr: getattr(obj, attr) for attr in obj.__fields_set__} if self._coerce else bound_params.arguments
@ft.lru_cache(maxsize=None)
def build_validation_schema(self, signature: inspect.Signature) -> Dict[str, Any]:
"""
Builds pydantic model based validation schema from method signature.
:param signature: method signature to build schema for
:returns: validation schema
"""
field_definitions = {}
for param in signature.parameters.values():
if param.kind is inspect.Parameter.VAR_KEYWORD:
field_definitions[param.name] = (
Optional[Dict[str, param.annotation]] if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else None,
)
elif param.kind is inspect.Parameter.VAR_POSITIONAL:
field_definitions[param.name] = (
Optional[List[param.annotation]] if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else None,
)
else:
field_definitions[param.name] = (
param.annotation if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else ...,
)
return field_definitions
| 40.385542 | 118 | 0.654236 | [
"Unlicense"
] | bernhardkaindl/pjrpc | xjsonrpc/server/validators/pydantic.py | 3,352 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
import re
from urllib.parse import urljoin
from requests import get
from time import sleep
from datetime import datetime
from pages.webview.about_this_book import AboutBook
from pages.webview.content_page import ContentPage
from tests import markers
from pages.webview.home import Home
from pages.webview.content import Content
from tests.utils import similar
@markers.webview
@markers.test_case("C193738")
@markers.nondestructive
@markers.parametrize(
"is_archive,path,expected_response_status_code",
[
# FIXME Requires varnish
# (False, '/content/col23946', 301),
(True, "/content/col23946", 301),
# FIXME Requires varnish
# (False, '/content/col23946/1.1', 301),
(True, "/content/col23946/1.1", 301),
(False, "/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e", 200),
(True, "/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e", 302),
(
False,
(
"/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e"
":7d039be2-93c6-4f32-a469-41689bab7225"
),
200,
),
(
True,
(
"/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e"
":7d039be2-93c6-4f32-a469-41689bab7225"
),
302,
),
(False, "/contents/[email protected]", 200),
(True, "/contents/[email protected]", 200),
(
False,
(
"/contents/[email protected]"
":7d039be2-93c6-4f32-a469-41689bab7225"
),
200,
),
(
False,
(
"/contents/[email protected]"
":7d039be2-93c6-4f32-a469-41689bab7225@5"
),
200,
),
(False, "/contents/TqqPA4io", 200),
(True, "/contents/TqqPA4io", 302),
(False, "/contents/TqqPA4io:fQOb4pPG", 200),
(True, "/contents/TqqPA4io:fQOb4pPG", 302),
(False, "/contents/[email protected]", 200),
(True, "/contents/[email protected]", 301),
(False, "/contents/[email protected]:fQOb4pPG", 200),
(True, "/contents/[email protected]:fQOb4pPG", 301),
(False, "/contents/[email protected]:fQOb4pPG@5", 200),
(True, "/contents/[email protected]:fQOb4pPG@5", 301),
],
)
def test_content_status_codes(
webview_base_url, archive_base_url, is_archive, path, expected_response_status_code
):
# GIVEN some URL and the expected redirect code
if is_archive:
url = urljoin(archive_base_url, path)
else:
url = urljoin(webview_base_url, path)
# WHEN we visit the URL
# NOTE: Don't bother trying to get status codes using Selenium
# https://github.com/seleniumhq/selenium-google-code-issue-archive/issues/141
response = get(url, allow_redirects=False)
# THEN we get the expected redirect code
assert response.status_code == expected_response_status_code
@markers.webview
@markers.test_case("C194465")
@markers.nondestructive
@markers.parametrize(
"id",
[
"[email protected]:E3XenWEQ",
"[email protected]:E3XenWEQ@2",
"[email protected]:rKBtkIWG@2",
"[email protected]:fQOb4pPG@2",
],
)
def test_canonical_link_is_correct(webview_base_url, selenium, id):
# GIVEN a book's content page
content = Content(selenium, webview_base_url, id=id).open()
section_title = content.section_title
# WHEN the book's canonical url is visited
selenium.get(content.canonical_url)
content.wait_for_page_to_load()
# THEN we end up in the same page
# NOTE: we check the section title instead of the url because the canonical link seems to
# take us to the latest version of the content, no matter which version we started on
# NOTE: Newer versions of the book may not have the section number. For this we check in the
# section_title string instead of an equality.
assert content.section_title in section_title
@markers.webview
@markers.test_case("C176232", "C176233")
@markers.nondestructive
def test_navs_and_elements_are_displayed(webview_base_url, selenium):
# GIVEN the home page
home = Home(selenium, webview_base_url).open()
# WHEN a book is clicked
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
# THEN the site navbar and content nav are displayed
assert content.header.is_nav_displayed
content_header = content.content_header
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
# Section title is on top of main content section (white area)
main_content_section = content.main_content_section
section_title_div_location = content.section_title_div_location
section_title_div_size = content.section_title_div_size
# Section title inside main content section
assert section_title_div_location["x"] >= main_content_section.location["x"]
assert section_title_div_location["y"] >= main_content_section.location["y"]
assert (
section_title_div_location["x"] + section_title_div_size["width"]
<= main_content_section.location["x"] + main_content_section.size["width"]
)
assert (
section_title_div_location["y"] + section_title_div_size["height"]
<= main_content_section.location["y"] + main_content_section.size["height"]
)
# Section title on top of main content section
assert (
section_title_div_location["y"] - main_content_section.location["y"]
<= section_title_div_size["height"]
)
@markers.webview
@markers.test_case("C132542")
@markers.nondestructive
def test_author_contains_openstax(webview_base_url, selenium):
# GIVEN the home page and a book
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
# WHEN the book's cover is clicked
content = book.click_book_cover()
# THEN the displayed author is OpenStax
content_header = content.content_header
assert content_header.is_book_by_displayed
assert content_header.are_authors_displayed
assert "OpenStax" in content_header.authors
@markers.webview
@markers.test_case("C176242")
@markers.nondestructive
def test_toc_is_displayed(webview_base_url, selenium):
# GIVEN a book's content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
# WHEN the contents button is clicked
content.header_nav.click_contents_button()
toc = content.table_of_contents
# THEN the table of contents is displayed
assert toc.is_displayed
assert toc.number_of_chapters > 0
assert toc.number_of_pages > 0
@markers.webview
@markers.smoke
@markers.test_case("C176243", "C176244")
@markers.nondestructive
def test_toc_navigation(webview_base_url, selenium):
# GIVEN a book's table of contents
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
content.header_nav.click_contents_button()
toc = content.table_of_contents
# WHEN a chapter is expanded and we navigate to one of its pages
chapter = toc.chapters[0]
chapter = chapter.click()
page = chapter.pages[1]
chapter_section = page.chapter_section
title = page.title
content = page.click()
# THEN we end up at the correct page
assert type(content) is Content
assert content.chapter_section == chapter_section
assert content.section_title == title
@markers.webview
@markers.smoke
@markers.test_case("C176257")
@markers.nondestructive
def test_share_on_top_right_corner(webview_base_url, selenium):
# GIVEN the home page
home = Home(selenium, webview_base_url).open()
# WHEN a book is clicked
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
# THEN social share links are displayed in the top right corner
share = content.share
assert share.is_displayed
assert share.is_facebook_share_link_displayed
assert share.is_twitter_share_link_displayed
root = content.share.root
# Top half
assert root.location["y"] + root.size["height"] < selenium.get_window_size()["height"] / 2
# Right half
assert root.location["x"] > selenium.get_window_size()["width"] / 2
@markers.webview
@markers.smoke
@markers.test_case("C132549", "C175148")
@markers.nondestructive
@markers.parametrize(
"uuid,query,has_results,result_index,has_os_figures,has_os_tables",
[
(
"36004586-651c-4ded-af87-203aca22d946",
"mitosis genetics gorilla",
False,
None,
None,
None,
),
("bb62933e-f20a-4ffc-90aa-97b36c296c3e", "Fizyka", True, 1, True, False),
("bb62933e-f20a-4ffc-90aa-97b36c296c3e", "Tabela", True, 3, True, True),
],
)
def test_in_book_search(
webview_base_url,
selenium,
uuid,
query,
has_results,
result_index,
has_os_figures,
has_os_tables,
):
# GIVEN a book's content page and a query
content = Content(selenium, webview_base_url, id=uuid).open()
# WHEN we search the book for the given query
search_results = content.header_nav.search(query)
# THEN search results are present (or not) and bolded and link to the matching content
results = search_results.results
result_count = search_results.result_count
assert len(results) == result_count
if not has_results:
assert result_count == 0
return
assert result_count > 0
words = query.split()
for result in results:
for word in words:
assert result.count_occurrences(word) == result.count_bold_occurrences(word)
result = results[result_index]
title = result.title
content = result.click_link()
assert content.section_title == title
content_region = content.content_region
assert content_region.has_os_figures == has_os_figures
for figure in content_region.os_figures:
assert figure.caption.is_labeled
assert figure.caption.is_numbered
assert content_region.has_os_tables == has_os_tables
for table in content_region.os_tables:
assert table.caption.is_labeled
assert table.caption.is_numbered
@markers.webview
@markers.smoke
@markers.test_case("C176258", "C176259", "C176261")
@markers.nondestructive
def test_share_links_displayed(webview_base_url, selenium):
# GIVEN the home page
home = Home(selenium, webview_base_url).open()
# WHEN a book is clicked
book = home.featured_books.openstax_list[32]
content = book.click_book_cover()
# THEN social share links have the expected urls
current_url = selenium.current_url
share = content.share
expected_facebook_url = f"https://facebook.com/sharer/sharer.php?u={current_url}"
assert share.facebook_share_url == expected_facebook_url
expected_twitter_url = f"https://twitter.com/share?url={current_url}"
assert expected_twitter_url in share.twitter_share_url
expected_linkedin_url = f"https://www.linkedin.com/shareArticle?mini=true&url={current_url}"
assert expected_linkedin_url in share.linkedin_share_url
@markers.webview
@markers.test_case("C193880")
@markers.nondestructive
@markers.parametrize("id", ["[email protected]:qVb4K8xR@3"])
def test_newer_version_leads_to_correct_page(webview_base_url, selenium, id):
# GIVEN the content page
content = Content(selenium, webview_base_url, id=id).open()
version = content.book_version
section_title = content.section_title
# WHEN the newer version link is clicked
content = content.click_newer_version_link()
# THEN we end up in a newer version of the same page
assert content.section_title == section_title
assert content.book_version > version
@markers.webview
@markers.smoke
@markers.test_case("C176234")
@markers.nondestructive
def test_get_this_book(webview_base_url, selenium):
# GIVEN a book's content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[32]
content = book.click_book_cover()
# WHEN we click the "Get This Book!" button
button_displayed = content.is_get_this_book_button_displayed
if button_displayed:
get_this_book = content.click_get_this_book_button()
pdf_displayed = get_this_book.is_pdf_link_displayed
offline_zip_displayed = get_this_book.is_offline_zip_link_displayed
# THEN links to download the pdf, epub and offline zip versions are displayed
# Look at the footer to see which downloads should have been available
downloads = content.content_footer.click_downloads_tab()
if not button_displayed:
assert not downloads.is_any_available
pytest.skip('No files available to download: "Get This Book!" button not present.')
else:
assert pdf_displayed or offline_zip_displayed
# Check the footer
if pdf_displayed:
assert downloads.is_pdf_available
if offline_zip_displayed:
assert downloads.is_offline_zip_available
@markers.webview
@markers.smoke
@markers.test_case("C195074")
@markers.nondestructive
@markers.parametrize("id", ["[email protected]:Zv6FJYpb@3"])
def test_page_with_unicode_characters_in_title_loads(webview_base_url, selenium, id):
# GIVEN the webview base url, the Selenium driver and the id of a page whose title has unicode
content = Content(selenium, webview_base_url, id=id)
# WHEN we load the content page
content = content.open()
# Ensure it has a figure element
assert content.content_region.has_figures
# THEN the page does not reload afterwards
# Wait 10 seconds to see if the page reloads
sleep(10)
# If we don't get a StaleElementReferenceException then the page didn't reload
assert content.content_region.os_figures
@markers.xfail
@markers.webview
@markers.smoke
@markers.test_case("C176236")
@markers.nondestructive
def test_content_and_figures_display_after_scrolling(webview_base_url, selenium):
# This is expected to fail as we ran out non-redirecting collections
# with figures on the main page
# GIVEN a book's content page with figures
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[60]
content_page = book.click_book_cover()
content_region = content_page.content_region
assert not content_region.is_blank
assert content_region.has_figures
# WHEN we scroll to a figure
figure = content_region.figures[0]
content_region.scroll_to(figure)
# THEN some figure is displayed
assert figure.is_displayed()
@markers.webview
@markers.smoke
@markers.test_case("C176235", "C176237")
@markers.nondestructive
def test_nav_and_menus_display_after_scrolling(webview_base_url, selenium):
# GIVEN a book's content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
content_header = content.content_header
original_content_header_y = content_header.root.location["y"]
# WHEN we scroll to the bottom
content.footer.scroll_to()
content_footer = content.content_footer
# THEN - the header nav is offscreen but still considered displayed
# - the content nav is displayed on top without the site navbar or any social links
assert content.header.is_nav_displayed
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert not content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
share = content.share
assert not share.is_displayed
assert not share.is_facebook_share_link_displayed
assert not share.is_twitter_share_link_displayed
# The footer is displayed at the bottom
assert content_footer.is_displayed
assert content_footer.is_downloads_tab_displayed
assert content_footer.is_history_tab_displayed
assert content_footer.is_attribution_tab_displayed
assert content_footer.is_more_information_tab_displayed
# Hard to check that the content_header is on top after scrolling, but we can check
# that it at least has the pinned class and is above the footer
assert content_header.is_pinned
assert not content_header.is_opened
assert not content_header.is_closed
assert content_header.root.location["y"] > original_content_header_y
assert content_header.root.location["y"] < content_footer.root.location["y"]
@markers.webview
@markers.smoke
@markers.test_case("C195232")
@markers.nondestructive
@markers.parametrize("width,height", [(480, 640)])
def test_mobile_nav_and_menus_hide_after_scrolling(webview_base_url, selenium, width, height):
# GIVEN a book's content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
content_header = content.content_header
original_content_header_y = content_header.root.location["y"]
# WHEN we scroll to the bottom
content.footer.scroll_to()
# THEN - the header nav is offscreen but still considered displayed
# - the content nav is offscreen without the site navbar or any social links
assert content.header.is_nav_displayed
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert not content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
share = content.share
assert not share.is_displayed
assert not share.is_facebook_share_link_displayed
assert not share.is_twitter_share_link_displayed
assert not content_header.is_pinned
assert content_header.root.location["y"] == original_content_header_y
# WHEN we scroll up
content.scroll_up()
# THEN - the header nav is offscreen but still considered displayed
# - the content nav is now pinned and onscreen without the site navbar or any social links
assert content.header.is_nav_displayed
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert not content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
share = content.share
assert not share.is_displayed
assert not share.is_facebook_share_link_displayed
assert not share.is_twitter_share_link_displayed
assert content_header.is_pinned
assert content_header.is_opened
assert not content_header.is_closed
previous_content_header_y = content_header.root.location["y"]
assert previous_content_header_y > original_content_header_y
# WHEN we scroll down again
content.scroll_down()
# THEN - the header nav is offscreen but still considered displayed
# - the content nav is now closed and offscreen without the site navbar or any social links
assert content.header.is_nav_displayed
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert not content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
share = content.share
assert not share.is_displayed
assert not share.is_facebook_share_link_displayed
assert not share.is_twitter_share_link_displayed
assert content_header.is_pinned
assert not content_header.is_opened
assert content_header.is_closed
assert content_header.root.location["y"] > previous_content_header_y
@markers.webview
@markers.smoke
@markers.test_case("C162171")
@markers.nondestructive
def test_attribution(webview_base_url, selenium):
# GIVEN a book's content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
# WHEN we click the attribution tab
attribution = content.content_footer.click_attribution_tab()
# THEN the attribution is displayed and has the correct support email
assert attribution.is_displayed
expected_sentence = "For questions regarding this license, please contact [email protected]."
assert expected_sentence in attribution.text
@markers.webview
@markers.smoke
@markers.test_case("C176241")
@markers.nondestructive
def test_back_to_top(webview_base_url, selenium):
# GIVEN a book's scrolled content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
footer = content.content_footer
content_header = content.content_header
original_content_header_y = content_header.root.location["y"]
# WHEN we scroll to the bottom then click the back to top link
content = footer.nav.click_back_to_top_link()
# THEN the content page is no longer scrolled
assert content.header.is_nav_displayed
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
share = content.share
assert share.is_displayed
assert share.is_facebook_share_link_displayed
assert share.is_twitter_share_link_displayed
# The footer is offscreen, but still considered displayed
assert footer.is_displayed
assert footer.is_downloads_tab_displayed
assert footer.is_history_tab_displayed
assert footer.is_attribution_tab_displayed
assert footer.is_more_information_tab_displayed
# The header is no longer pinned
assert not content_header.is_pinned
assert content_header.root.location["y"] == original_content_header_y
@markers.webview
@markers.smoke
@markers.test_case("C176238", "C176239", "C176240", "C176245")
@markers.nondestructive
def test_navigation(webview_base_url, selenium):
# GIVEN a book's content page and a sim_ratio
sim_ratio = 0.4
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
header_nav = content.header_nav
header_nav.click_contents_button()
toc = content.table_of_contents
num_pages = toc.number_of_pages
assert type(content) == Content
# Introduction should be the first section loaded
assert (
content.section_title == "Introduction"
or similar(content.section_title, "📎 Inquiry Organizer") > sim_ratio
)
# Preface is skipped by default
assert header_nav.progress_bar_fraction_is(2 / num_pages)
# WHEN we navigate next twice and then back twice using the header and footer controls
content = content.header_nav.click_next_link()
assert type(content) == Content
assert content.chapter_section == "1.2"
assert header_nav.progress_bar_fraction_is(3 / num_pages)
content = content.footer_nav.click_next_link()
assert type(content) == Content
assert content.chapter_section == "1.3"
assert header_nav.progress_bar_fraction_is(4 / num_pages)
content = content.footer_nav.click_back_link()
assert type(content) == Content
assert content.chapter_section == "1.2"
assert header_nav.progress_bar_fraction_is(3 / num_pages)
content = content.header_nav.click_back_link()
# THEN we arrive back at the initial page
assert header_nav.progress_bar_fraction_is(2 / num_pages)
@markers.webview
@markers.test_case("C195073")
@markers.slow
@markers.nondestructive
def test_ncy_is_not_displayed(webview_base_url, american_gov_uuid, selenium):
# GIVEN the webview base url, an American Government content page UUID, and the Selenium driver
# WHEN the page is fully loaded using the URL
page = Content(selenium, webview_base_url, id=american_gov_uuid).open()
# THEN :NOT_CONVERTED_YET is not displayed
assert page.is_ncy_displayed is False
@markers.webview
@markers.test_case("C132547", "C132548")
@markers.nondestructive
@markers.parametrize(
"page_uuid,is_baked_book_index",
[
("bb62933e-f20a-4ffc-90aa-97b36c296c3e:85036aed-fa1a-5d51-a9c2-c07ee673488d", True),
("6a0568d8-23d7-439b-9a01-16e4e73886b3", False),
],
)
def test_id_links_and_back_button(page_uuid, is_baked_book_index, webview_base_url, selenium):
# GIVEN an index page in a baked book or a page with anchor links in an unbaked book
content_page = Content(selenium, webview_base_url, id=page_uuid).open()
content_url = content_page.current_url
assert "#" not in content_url
# WHEN we click on a term (baked index) or an anchor link
content_region = content_page.content_region
if is_baked_book_index:
content_page = content_region.click_index_term()
else:
content_page = content_region.click_anchor_link(internal_only=True)
assert content_page.current_url.startswith(content_url)
# THEN we end up at the linked page and the element with the same id as the link is displayed
new_url = content_page.current_url
assert "#" in new_url
id = re.search("#(.+)$", new_url)[1]
assert id
assert content_page.is_element_id_displayed(id)
# WHEN we click the browser's back button
content_page.back()
# THEN we end up at the previous page
assert content_page.current_url == content_url
@markers.webview
@markers.test_case("C181754")
@markers.nondestructive
@markers.parametrize(
"ch_review_id", ["[email protected]:6IrsWVCW", pytest.param("[email protected]:aVXUrOzZ")]
)
def test_chapter_review_version_matches_book_version(webview_base_url, selenium, ch_review_id):
# GIVEN the webview base url, a chapter review id, and the Selenium driver
# WHEN we visit the chapter review page
content = Content(selenium, webview_base_url, id=ch_review_id).open()
# THEN the chapter review version matches the book version
assert content.page_version == content.book_version
@markers.webview
@markers.smoke
@markers.test_case("C195064")
@markers.nondestructive
@markers.parametrize("ch_review_id", ["e5fbbjPE"])
def test_books_containing_go_to_book_link(webview_base_url, selenium, ch_review_id):
# GIVEN the webview base url, a chapter review id, and the Selenium driver
content = ContentPage(selenium, webview_base_url, id=ch_review_id).open()
books = content.books_containing.book_list
# WHEN we click the link to the first book
title = books[0].title
book = books[0].click_go_to_book_link
# THEN we are on the About this Book page and it is displayed
assert type(book) == AboutBook
assert book.about_this_book_section.is_displayed
assert book.title == title
@markers.webview
@markers.test_case("C195063")
@markers.nondestructive
@markers.parametrize("ch_review_id", ["SjdU64Og@3"])
def test_books_containing_have_revised_date(webview_base_url, selenium, ch_review_id):
# GIVEN the webview base url, a chapter review id, and the Selenium driver
# WHEN the content_page is fully loaded and we have a list of books containing the page
content = ContentPage(selenium, webview_base_url, id=ch_review_id).open()
books = content.books_containing.book_list
# THEN all the Books should contain revision date
for book in books:
assert book.revision_date.is_displayed
@markers.webview
@markers.requires_complete_dataset
@markers.test_case("C195061")
@markers.nondestructive
@markers.parametrize("page_id", ["BWYBGK7C@2"])
def test_books_containing_title_not_limited(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit that page of the chapter and we have a list of books containing the page
content = ContentPage(selenium, webview_base_url, id=page_id).open()
books = content.books_containing.book_list
# THEN the title of the books are not truncated by ellipses
for book in books:
assert "..." not in book.title
@markers.webview
@markers.requires_complete_dataset
@markers.test_case("C195057", "C195058", "C195059", "C195072")
@markers.nondestructive
@markers.parametrize("page_id", ["mjO9LQWq@1", "bJs8AcSE@1", "4fGVMb7P@1"])
def test_books_containing_message_is_correct(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit the content page
# AND we have a books containing count
# AND we have the overview message
content = ContentPage(selenium, webview_base_url, id=page_id).open()
book_count = len(content.books_containing.book_list)
overview = content.books_containing.overview
# THEN ensure the proper books containing overview message is displayed
if book_count > 1:
assert overview == f"This page is in {book_count} books:"
elif book_count > 0:
assert overview == "This page is in this book:"
else:
assert overview == "This page is not in any books."
@markers.webview
@markers.test_case("C195062")
@markers.nondestructive
@markers.parametrize("page_id", ["SjdU64Og@3"])
def test_books_containing_have_authors(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit that page of the chapter and we have a list of books containing page
content = ContentPage(selenium, webview_base_url, id=page_id).open()
books = content.books_containing.book_list
# THEN the authors of the book should be displayed
for book in books:
assert book.author.is_displayed()
@markers.webview
@markers.requires_complete_dataset
@markers.test_case("C195065")
@markers.nondestructive
@markers.parametrize("page_id", ["HOATLqlR@5"])
def test_books_containing_list_in_sorted_order(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit that page of the chapter and we have a list of books containing page
content = Content(selenium, webview_base_url, id=page_id).open()
# AND store the main author
main_author = content.content_header.authors
# AND Save list of authors and dates
content = ContentPage(selenium, webview_base_url, id=page_id).open()
dates = content.books_containing.date_list
author = content.books_containing.author_list
# THEN main author should be the author of the first book listed
assert author[0][0] == main_author
# AND if there are more books with main author, they should be listed first
i = 1
while i < len(author) - 1 and author[i][0] == main_author:
i += 1
# AND for the rest of the books, the revision dates are sorted in decreasing order
date_list = []
for date in dates[i:]:
date_list.append(datetime.strptime(date[0], "%b %d, %Y"))
assert date_list == sorted(date_list, reverse=True)
@markers.webview
@markers.smoke
@markers.requires_complete_dataset
@markers.test_case("C195055")
@markers.nondestructive
@markers.parametrize("page_id", ["4fGVMb7P@1"])
def test_books_containing_button_toggles_and_labelled_books(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit a single content page (not a book)
content = ContentPage(selenium, webview_base_url, id=page_id).open()
books_containing = content.books_containing
# THEN the button that opens and closes the "ToC" is labelled "Books" instead of "Contents"
# AND the button opens and closes the "This page is in # books" side nav
contents_button = content.header_nav.contents_button
assert contents_button.text == "Books"
# The side nav area should be open by default
assert books_containing.is_displayed
content.header_nav.click_contents_button()
assert not books_containing.is_displayed
content.header_nav.click_contents_button()
content.books_containing.wait_for_region_to_display()
assert books_containing.is_displayed
@markers.webview
@markers.requires_complete_dataset
@markers.test_case("C195054")
@markers.nondestructive
@markers.parametrize("page_id", ["4fGVMb7P@1"])
def test_books_containing_list_is_on_left_of_page(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we load the page of the chapter and we have the width of the window
content = ContentPage(selenium, webview_base_url, id=page_id).open()
window_width = content.get_window_size("width")
# THEN check if the books list exists and on the left
assert content.books_containing.book_list
assert content.location["x"] < window_width / 2
@markers.webview
@markers.smoke
@markers.requires_complete_dataset
@markers.test_case("C195056")
@markers.nondestructive
@markers.parametrize("page_id", ["QlYg2VHd"])
@markers.parametrize("width,height", [(1024, 768), (630, 480)])
def test_button_open_with_certain_window_size(webview_base_url, selenium, page_id, width, height):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit that page of the chapter and we have a list of books containing the page
content = ContentPage(selenium, webview_base_url, id=page_id).open()
# THEN if window width >= 640, button should be open
if width >= 640:
assert content.books_containing.overview_is_displayed
# AND if window width < 640, button should be closed
else:
assert not content.books_containing.overview_is_displayed
@markers.webview
@markers.test_case("C195060")
@markers.nondestructive
@markers.parametrize("id", ["4fGVMb7P@1"])
@markers.parametrize("highlight_color", ["#78b04a"])
def test_book_title_link_and_highlight_on_view(webview_base_url, id, selenium, highlight_color):
# GIVEN the webview base url, a chapter page id, the color and the Selenium driver
# WHEN we visit that page of the chapter
content = ContentPage(selenium, webview_base_url, id=id).open()
content_page_title = content.title
# AND click the title
content.books_containing.book_list[0].click_title_link()
# AND get and click the Contents button
content.header_nav.click_contents_button()
# AND find the on viewing title and get the color
active_color = content.table_of_contents.active_page_color
# THEN make sure the section matches the original page title and the highlight color is correct
assert content_page_title == content.section_title_without_chapter_section
assert active_color == highlight_color
| 35.747563 | 100 | 0.7421 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | openstax/cnx-automation | tests/webview/ui/test_content.py | 36,680 | Python |
import os
from werkzeug.security import generate_password_hash
from flask_script import Manager, Shell, Command, Option
from flask_migrate import Migrate, MigrateCommand
from app import db
from app import create_app
from app.models import User
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
class CreateUser(Command):
option_list = (
Option('--name', '-n', dest='name'),
Option('--password', '-p', dest='password'),
Option('--email', '-e', dest='email')
)
def run(self, name, password, email):
user = User()
user.name = name
user.hash_pass = generate_password_hash(password)
user.email = email
db.session.add(user)
db.session.commit()
def make_shell_context():
return dict(app=app, db=db, User=User)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('create_user', CreateUser())
if __name__ == '__main__':
manager.run()
| 24.767442 | 68 | 0.684507 | [
"MIT"
] | Tianny/incepiton-mysql | manage.py | 1,065 | Python |
#
# Copyright 2019 Delphix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=missing-docstring
import os
import drgn
import sdb
def enum_lookup(enum_type_name: str, value: int) -> str:
"""return a string which is the short name of the enum value
(truncating off the common prefix) """
fields = sdb.get_type(enum_type_name).type.enumerators
enum_string: str = fields[value].name
prefix = os.path.commonprefix([f[0] for f in fields])
return enum_string[prefix.rfind("_") + 1:]
def nicenum(num: int, suffix: str = "B") -> str:
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if num < 1024:
return "{}{}{}".format(int(num), unit, suffix)
num = int(num / 1024)
return "{}{}{}".format(int(num), "Y", suffix)
def P2PHASE(x: drgn.Object, align: int) -> int:
return int(x & (align - 1))
def BF64_DECODE(x: drgn.Object, low: int, length: int) -> int:
return int(P2PHASE(x >> low, 1 << length))
def BF64_GET(x: drgn.Object, low: int, length: int) -> int:
return BF64_DECODE(x, low, length)
def WEIGHT_IS_SPACEBASED(weight: int) -> bool:
return weight == 0 or (BF64_GET(weight, 60, 1) != 0)
def WEIGHT_GET_INDEX(weight: int) -> int:
return BF64_GET((weight), 54, 6)
def WEIGHT_GET_COUNT(weight: int) -> int:
return BF64_GET((weight), 0, 54)
METASLAB_WEIGHT_PRIMARY = int(1 << 63)
METASLAB_WEIGHT_SECONDARY = int(1 << 62)
METASLAB_WEIGHT_CLAIM = int(1 << 61)
METASLAB_WEIGHT_TYPE = int(1 << 60)
METASLAB_ACTIVE_MASK = (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY |
METASLAB_WEIGHT_CLAIM)
BTREE_LEAF_SIZE = 4096
| 29.534247 | 77 | 0.674397 | [
"Apache-2.0"
] | PaulZ-98/sdb | sdb/commands/zfs/internal/__init__.py | 2,156 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
class _GetAttrMeta(type):
# https://stackoverflow.com/questions/33727217/subscriptable-objects-in-class
def __getitem__(cls, x):
return getattr(cls, x)
def __iter__(cls):
"""Getting subclasses which usually represent resolutions"""
for attr in vars(cls):
if not attr.startswith("_"):
yield cls[attr]
class DatasetTreeCore(metaclass=_GetAttrMeta):
pass
| 31.555556 | 81 | 0.672535 | [
"MIT"
] | earthobservations/python_dwd | wetterdienst/util/parameter.py | 568 | Python |
import os
import sys
import numpy as np
import random
import math
from PIL import Image, ImageOps, ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms as transform
from .base import BaseDataset
class NYUv2Segmentation(BaseDataset):
BASE_DIR = 'nyuv2'
NUM_CLASS = 40
def __init__(self, root=os.path.expanduser('~/.cvss/data'), split='train',
mode=None, transform=None, target_transform=None, **kwargs):
super(NYUv2Segmentation, self).__init__(
root, split, mode, transform, target_transform, **kwargs)
# assert exists and prepare dataset automatically
root = os.path.join(root, self.BASE_DIR)
assert os.path.exists(root), "Please setup the dataset using" + \
"cvss/scripts/prepare_nyuv2.py"
self.images, self.masks = _get_nyuv2_pairs(root, split)
if split != 'test':
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise(RuntimeError("Found 0 images in subfolders of: \
" + root + "\n"))
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
def _sync_transform(self, img, mask):
# random mirror
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
w, h = img.size
min_side = min(w, h)
scale = np.random.uniform(0.5, 2.0)
if min_side * scale < 350:
scale = 350 * 1.0 / min_side
long_size = int(self.base_size*scale)
if h > w:
oh = long_size
ow = int(1.0 * w * long_size / h + 0.5)
short_size = ow
else:
ow = long_size
oh = int(1.0 * h * long_size / w + 0.5)
short_size = oh
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1+crop_size, y1+crop_size))
mask = mask.crop((x1, y1, x1+crop_size, y1+crop_size))
# final transform
return img, self._mask_transform(mask)
def _val_sync_transform(self, img, mask):
# final transform
return img, self._mask_transform(mask)
def _mask_transform(self, mask):
target = np.array(mask).astype('int64') - 1
return torch.from_numpy(target)
def __len__(self):
return len(self.images)
@property
def pred_offset(self):
return 1
def _get_nyuv2_pairs(folder, split='train'):
def get_path_pairs(folder, split_file):
img_paths = []
mask_paths = []
with open(os.path.join(folder, split_file), 'r') as f:
for filename in f.readlines():
filename = filename.strip()
imgpath = os.path.join(folder, 'image', filename)
maskpath = os.path.join(folder, 'mask', filename)
if os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask:', maskpath)
return img_paths, mask_paths
img_paths, mask_paths = get_path_pairs(folder, split_file=split+'.txt')
return img_paths, mask_paths
| 37.904762 | 79 | 0.568677 | [
"MIT"
] | etmwb/cvsegmentation | cvss/datasets/nyuv2.py | 4,776 | Python |
# This file was scaffold by idol_mar, but it will not be overwritten, so feel free to edit.
# This file will be regenerated if you delete it.
from ...codegen.all.target.optional_method import (
AllTargetOptionalMethodSchema as OptionalMethodSchemaCodegen,
)
class OptionalMethodSchema(OptionalMethodSchemaCodegen):
pass
| 33 | 91 | 0.79697 | [
"MIT"
] | corps/idol | test/src/lib/idol/py_mar/all/target/optional_method.py | 330 | Python |
# -*- coding: utf-8 -*-
import os
import sqlite3
import logging
logger = logging.getLogger("xtc")
class sqlite_handle(object):
def __init__(self):
self.dbname = "Xsense.db"
self.conn = None
def db_init(self): # 初始化db task_info、apps、scripts、run_tasks
self.db_table_all()
conn = sqlite3.connect(self.dbname)
try:
for cre in self.create_dic:
conn.execute(cre)
# logger.info(cre)
except Exception as e:
logger.info("Create table failed: {}".format(e))
return False
finally:
conn.close()
def insert_task(self,taskdict): # 插入任务信息 for
conn = sqlite3.connect(self.dbname)
for task in taskdict:
conn.execute(
'INSERT INTO task_Info VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)',task
)
conn.commit()
conn.close()
def insert_script_one(self,scriptOne): # 插入脚本信息
conn = sqlite3.connect(self.dbname)
conn.execute(
'INSERT INTO scripts VALUES (?,?,?,?,?,?,?,?)',scriptOne
)
conn.commit()
conn.close()
def insert_task_many(self,script_data): # 插入任务信息 多项
conn = sqlite3.connect(self.dbname)
conn.executemany(
'INSERT INTO scripts VALUES (?,?,?,?,?,?,?,?)',script_data
)
conn.commit()
conn.close()
def db_table_all(self):
crt_task_info = '''CREATE TABLE IF NOT EXISTS task_info (
taskId INT, testTaskName TEXT, optType int,scriptId INT,scriptUrl TEXT,
startDate int, endDate int, exeBeginTime TEXT, exeEndTime TEXT,
exeType int, interval int, iterationNum int, startIterationNumber int
);'''
crt_scripts = '''CREATE TABLE IF NOT EXISTS scripts (
scriptId INT, scriptName TEXT, scriptType int,scriptUrl TEXT,
uploadDate int, scriptMaxRunTime int, scriptVersion int,
scriptCacheUrl TEXT
);'''
crt_apps = '''CREATE TABLE IF NOT EXISTS apps (
scriptId INT, appCheck int, appPackageName TEXT, appUrl TEXT, appMd5 TEXT,
appVersion TEXT, appVersionCode TEXT, appLastUpdateTime TEXT, appCacheUrl TEXT
);'''
run_tasks = '''CREATE TABLE IF NOT EXISTS run_tasks (
taskId INT, testTaskName TEXT, optType int,scriptId INT,scriptUrl TEXT,
startDate int, endDate int, exeBeginTime TEXT, exeEndTime TEXT,
exeType int, interval int, iterationNum int, startIterationNumber int
);'''
create_dic = []
create_dic.append(crt_task_info)
create_dic.append(crt_scripts)
create_dic.append(crt_apps)
create_dic.append(run_tasks) # 保存需要运行的任务 有必要么
self.create_dic = create_dic
def query_runtask(self):
conn = sqlite3.connect(self.dbname)
taskrows = [] #元素为tuple,(205937, 'pyclient-test', 1, 107864, 'http://202.105.193....69910.zip', 20191006000000, 20201231235959, '000000', '235959', 2, 1, 1, 1)
# 获取未完成的按次任务 不含重复项 新增+启动, exeType=2按次执行 exeType=1按时执行
# optType 1`=新增任务;`2`=暂停任务;`3`=启动任务;`4`=删除任务
for row in conn.execute('SELECT DISTINCT * FROM task_info WHERE optType=3 OR optType=1 AND exeType=2 AND startIterationNumber<=iterationNum'):
taskrows.append(row)
conn.close()
return taskrows
def dele_table(self):
pass
def query(self, sql, sqlstring=False):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
# cursor = self.conn.cursor()
if sqlstring:
cursor.executemany(sql, sqlstring)
else:
cursor.execute(sql)
data = cursor.fetchall()
cursor.close()
return data
def update(self, sql, sqlstring=False):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
# cursor = self.conn.cursor()
if sqlstring:
cursor.executemany(sql, sqlstring)
else:
cursor.execute(sql)
conn.commit()
cursor.close()
def _update(self, sql, value=None, querymany=True):
ret = True
try:
if querymany:
self.update(sql, value)
else:
self.update(sql)
#except SqliteException:
except Exception as e:
logger.info("error('执行sqlite: {} 时出错:{}')".format(sql, e))
ret = False
return ret
def del_task_byid(self, taskid):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
sql = 'DELETE FROM task_info WHERE taskid={}'.format(taskid)
cursor.execute(sql)
logger.info("刪除taskid={} cursor.rowcount={}".format(taskid, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def update_task_run_status(self, taskid, status):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
cursor.execute("UPDATE task_info SET optType={} WHERE taskid={}".format(status, taskid))
logger.info("更新taskid={},设置optType={},cursor.rowcount={}".format(taskid, status, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def update_task_run_count(self, taskid, run_count):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
cursor.execute("UPDATE task_info SET startIterationNumber={} WHERE taskid={}".format(run_count, taskid))
logger.info("更新taskid={},startIterationNumber={},cursor.rowcount={}".format(taskid, run_count, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def updata_table(self):
pass
if __name__ == "__main__":
handle = sqlite_handle()
if not os.path.isfile(handle.dbname):
handle.db_init()
#taskrows = handle.query_runtask()
#print("taskrows=" + str(taskrows))
#handle.del_task_byid("1235")
handle.update_task_run_count("206266", 60)
#handle.update_task_run_status("206266", "5")
# 更新/删除 单条任务、更新 脚本信息
# 下载前查询数据库,如果脚本id已经存在,且更新时间一致 则不下载,否则下载-->入库
# 任务运行,先检查是否有新任务,如果有新任务,则入库,
# 没有新任务,则查询数据库,任务id运行信息是否达到rm条件(过期、完成等)
# 如果运行 轮次达到 总轮次 则del
# 如果 结束时间超过当前时间 则del
# 此处需要增加 id 排序 后再运行
# 运行完成后,更新 id对应的轮次信息
# 今天搞定 脚本运行和结果文件 ,然后做db update 和 remove | 36.318436 | 167 | 0.591601 | [
"MIT"
] | zxypic/PublicPic | client-autosense/sense/sqlite_syn.py | 7,075 | Python |
def notas(*n, sit=False):
"""
Função para analisar notas e situação de varios alunos.
:param n: Uma ou mais notas dos alunos (aceita varias)
:param sit: Valor opcional, indicando se deve ou não adicionar a situação.
:return: Dicionario com varias informações sobre a situação da turma.
"""
dic = dict()
dic["total"] = len(n)
dic["maior"] = max(n)
dic["menor"] = min(n)
dic["media"] = sum(n) / len(n)
if sit:
if media < 5:
dic["situação"] = "Critica"
elif media < 7:
dic["situação"] = "Rasoavel"
else:
dic["situação"] = "Boa"
return dic
resp = notas(5, 4, 3, sit=True)
print(resp)
| 25.777778 | 78 | 0.566092 | [
"MIT"
] | Matheus-Henrique-Burey/Curso-de-Python | Modulo-03/ex105/ex105.py | 713 | Python |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
| 36.285714 | 77 | 0.740157 | [
"Apache-2.0"
] | Aniruddha120/qiskit-aqua | test/aqua/operators/__init__.py | 508 | Python |
import os
import csv
import shutil
from datetime import datetime
from numpy import logspace
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from torch.optim import Adam
from dataset.e_piano import create_epiano_datasets, create_pop909_datasets
from model.music_transformer import MusicTransformer
from model.discriminator import MusicDiscriminator
from model.classifier import CNNDiscriminator
from model.loss import SmoothCrossEntropyLoss
from utilities.constants import *
from utilities.WGAN_GP import WassersteinLoss
from utilities.device import get_device, use_cuda
from utilities.lr_scheduling import LrStepTracker, get_lr
from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params
from utilities.run_model import train_epoch, eval_model
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"]
dis_filter_sizes = [2, 3, 4, 5]
dis_num_filters = [300, 300, 300, 300]
# Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy
BASELINE_EPOCH = -1
# main
def main():
"""
----------
Author: Damon Gwinn
----------
Entry point. Trains a model specified by command line arguments
----------
"""
args = parse_train_args()
print_train_args(args)
if(args.force_cpu):
use_cuda(False)
print("WARNING: Forced CPU usage, expect model to perform slower")
print("")
eventid = f"{datetime.now().strftime('MusicTransformer-%Y.%m.%d')}_gan_{args.gan}_creative_{args.creative}_ce_{args.ce_smoothing}"
args.output_dir = args.output_dir + "/" + eventid
os.makedirs(args.output_dir, exist_ok=True)
##### Output prep #####
params_file = os.path.join(args.output_dir, "model_params.txt")
write_model_params(args, params_file)
weights_folder = os.path.join(args.output_dir, "weights")
os.makedirs(weights_folder, exist_ok=True)
results_folder = os.path.join(args.output_dir, "results")
os.makedirs(results_folder, exist_ok=True)
results_file = os.path.join(results_folder, "results.csv")
best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle")
best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle")
best_loss_critic_file = os.path.join(results_folder, "best_loss_critic_weights.pickle")
best_acc_critic_file = os.path.join(results_folder, "best_acc_critic_weights.pickle")
best_loss_classifier_file = os.path.join(
results_folder, "best_loss_classifier_weights.pickle")
best_acc_classifier_file = os.path.join(
results_folder, "best_acc_classifier_weights.pickle")
best_text = os.path.join(results_folder, "best_epochs.txt")
##### Tensorboard #####
if(args.no_tensorboard):
tensorboard_summary = None
else:
from torch.utils.tensorboard import SummaryWriter
tensorboad_dir = os.path.join(args.output_dir, "tensorboard/" + eventid)
tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir)
##### Datasets #####
# 데이터셋이 바뀌기 때문에 아래와같이 해주어야함
if args.interval and args.octave:
print("octave interval dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_interval_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding and args.absolute:
print("absolute dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_absolute_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/pop909_absolute', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.interval and not args.octave:
print("interval dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding:
print("Octave_fusion dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif not args.interval and args.octave and not args.fusion_encoding:
print("Octave dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/pop909_octave', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.logscale:
print("logscvale dataset")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_epiano0420', args.max_sequence, random_seq=True,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop0420', args.max_sequence, random_seq=True, condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
else:
classic_train, classic_val, classic_test = create_epiano_datasets(args.classic_input_dir, args.max_sequence,
condition_token = args.condition_token, octave = args.octave)
pop909_dataset = create_pop909_datasets('dataset/pop_pickle/', args.max_sequence, condition_token = args.condition_token, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1), len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
if args.data == 'both':
print("Dataset: both")
train_dataset = torch.utils.data.ConcatDataset([ classic_train, pop_train])
val_dataset = torch.utils.data.ConcatDataset([ classic_val, pop_valid])
elif args.data == 'classic':
print("Dataset: classic")
train_dataset = torch.utils.data.ConcatDataset([classic_train])
val_dataset = torch.utils.data.ConcatDataset([classic_val])
else:
print("Dataset: pop")
train_dataset = torch.utils.data.ConcatDataset([pop_train])
val_dataset = torch.utils.data.ConcatDataset([pop_valid])
test_dataset = torch.utils.data.ConcatDataset([classic_test, pop_test])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads,
d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr,
condition_token = args.condition_token, interval = args.interval, octave = args.octave,
fusion = args.fusion_encoding, absolute = args.absolute, logscale=args.logscale).to(get_device())
# EY critic
# num_prime = args.num_prime
critic = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
classifier = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
if args.creative:
classifier.load_state_dict(torch.load('best_classifier_acc_0.9883.pickle'))
##### Continuing from previous training session #####
start_epoch = BASELINE_EPOCH
if(args.continue_weights is not None):
if(args.continue_epoch is None):
print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights")
return
else:
model.load_state_dict(torch.load(args.continue_weights))
start_epoch = args.continue_epoch
elif(args.continue_epoch is not None):
print("ERROR: Need continue weights (-continue_weights) when using continue_epoch")
return
##### Lr Scheduler vs static lr #####
if(args.lr is None):
if(args.continue_epoch is None):
init_step = 0
else:
init_step = args.continue_epoch * len(train_loader)
lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step)
else:
lr = args.lr
##### Not smoothing evaluation loss #####
if args.interval and args.octave:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_INTERVAL)
elif args.interval and not args.octave:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_INTERVAL)
elif args.octave and args.fusion_encoding and args.absolute:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif args.octave and args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_RELATIVE)
else:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD)
##### SmoothCrossEntropyLoss or CrossEntropyLoss for training #####
if(args.ce_smoothing is None):
train_loss_func = eval_loss_func
else:
if args.interval and args.octave:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)
elif args.interval and not args.octave:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)
elif not args.interval and args.octave and args.fusion_encoding and args.absolute:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION_ABSOLUTE, ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif not args.interval and args.octave and args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION, ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE, ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_RELATIVE, ignore_index=TOKEN_PAD_RELATIVE)
else:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD)
##### EY - WGAN Loss #####
classifier_loss_func = nn.MSELoss()
##### Optimizer #####
opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
critic_opt = Adam(critic.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
classifier_opt = Adam(classifier.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
if(args.lr is None):
lr_scheduler = LambdaLR(opt, lr_stepper.step)
critic_lr_scheduler = LambdaLR(critic_opt, lr_stepper.step)
classifier_lr_scheduler = LambdaLR(classifier_opt, lr_stepper.step)
else:
lr_scheduler = None
##### Tracking best evaluation accuracy #####
best_eval_acc = 0.0
best_eval_acc_epoch = -1
best_eval_loss = float("inf")
best_eval_loss_epoch = -1
##### Results reporting #####
if(not os.path.isfile(results_file)):
with open(results_file, "w", newline="") as o_stream:
writer = csv.writer(o_stream)
writer.writerow(CSV_HEADER)
##### TRAIN LOOP #####
for epoch in range(start_epoch, args.epochs):
# Baseline has no training and acts as a base loss and accuracy (epoch 0 in a sense)
if(epoch >= BASELINE_EPOCH):
print(SEPERATOR)
print("NEW EPOCH:", epoch+1)
print(SEPERATOR)
print("")
# Train
# EY 고쳐야 할 부분의 시작
train_loss, train_acc, dis_loss, gen_loss, cre_loss, gan_accuracy, class_accuracy, creativity = train_epoch(epoch+1, model, critic, classifier, train_loader, train_loss_func, classifier_loss_func, opt, critic_opt, classifier_opt, lr_scheduler, critic_lr_scheduler, classifier_lr_scheduler, args)
print(SEPERATOR)
print("Evaluating:")
else:
print(SEPERATOR)
print("Baseline model evaluation (Epoch 0):")
# Eval
# train_loss, train_acc = eval_model(model, train_loader, train_loss_func)
eval_loss, eval_acc = eval_model(model, val_loader, eval_loss_func, args)
# Learn rate
lr = get_lr(opt)
print("Epoch:", epoch+1)
print("Avg train loss:", train_loss)
print("Avg train acc:", train_acc)
print("Avg eval loss:", eval_loss)
print("Avg eval acc:", eval_acc)
print(SEPERATOR)
print("")
new_best = False
if(eval_acc > best_eval_acc):
best_eval_acc = eval_acc
best_eval_acc_epoch = epoch+1
torch.save(model.state_dict(), best_acc_file)
torch.save(critic.state_dict(), best_acc_critic_file)
torch.save(classifier.state_dict(), best_acc_classifier_file)
new_best = True
if(eval_loss < best_eval_loss):
best_eval_loss = eval_loss
best_eval_loss_epoch = epoch+1
torch.save(model.state_dict(), best_loss_file)
torch.save(critic.state_dict(), best_loss_critic_file)
torch.save(classifier.state_dict(), best_loss_classifier_file)
new_best = True
# Writing out new bests
if(new_best):
with open(best_text, "w") as o_stream:
print("Best eval acc epoch:", best_eval_acc_epoch, file=o_stream)
print("Best eval acc:", best_eval_acc, file=o_stream)
print("")
print("Best eval loss epoch:", best_eval_loss_epoch, file=o_stream)
print("Best eval loss:", best_eval_loss, file=o_stream)
if(not args.no_tensorboard):
tensorboard_summary.add_scalar("Avg_CE_loss/train", train_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Avg_CE_loss/eval", eval_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/train", train_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/eval", eval_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Learn_rate/train", lr, global_step=epoch+1)
tensorboard_summary.add_scalar("Critic_loss/train", dis_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Gen_loss/train", gen_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity_loss/train", cre_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("GAN_accuracy/train", gan_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Class_accuracy/train", class_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity/train", creativity, global_step=epoch+1)
tensorboard_summary.flush()
if((epoch+1) % args.weight_modulus == 0):
epoch_str = str(epoch+1).zfill(PREPEND_ZEROS_WIDTH)
path = os.path.join(weights_folder, "epoch_" + epoch_str + ".pickle")
torch.save(model.state_dict(), path)
with open(results_file, "a", newline="") as o_stream:
writer = csv.writer(o_stream)
writer.writerow([epoch+1, lr, train_loss, train_acc, eval_loss, eval_acc])
# Sanity check just to make sure everything is gone
if(not args.no_tensorboard):
tensorboard_summary.flush()
return
if __name__ == "__main__":
main()
| 56.356021 | 307 | 0.645113 | [
"MIT"
] | yeong35/MusicTransformer-Pytorch | train.py | 21,588 | Python |
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code accompanies this codelab: https://codelabs.developers.google.com/codelabs/pyspark-bigquery/.
# This is a script for backfilling a set of data from Reddit into Google Cloud Storage
# Python imports
import re
import time
import sys
# A Spark Session is how we interact with Spark SQL to create Dataframes
from pyspark.sql import SparkSession
# PySpark function for replacing characters using a regex. We'll use this to remove newline characters.
from pyspark.sql.functions import regexp_replace, col
# Library for interacting with Google Cloud Storage
from google.cloud import storage
# This will help catch some PySpark errors
from py4j.protocol import Py4JJavaError
# Create a SparkSession under the name "reddit". Viewable via the Spark UI
spark = SparkSession.builder.appName("reddit").getOrCreate()
# Establish a set of years and months to iterate over
year = sys.argv[1]
month = sys.argv[2]
bucket_name = sys.argv[3]
# Establish a subreddit to process
subreddit = 'food'
# Set Google Cloud Storage temp location
path = "tmp" + str(time.time())
# Keep track of all tables accessed via the job
tables_read = []
# In the form of <project-id>.<dataset>.<table>
table = f"fh-bigquery.reddit_posts.{year}_{month}"
# If the table doesn't exist we will simply continue and not
# log it into our "tables_read" list
try:
df = spark.read.format('bigquery').option('table', table).load()
except Py4JJavaError:
print(f"{table} does not exist. ")
sys.exit(0)
print(f"Processing {table}.")
# Select the "title", "selftext" and "created_utc" columns of the designated subreddit and
# replace newline characters with a single space
subreddit_timestamps = (
df
.select(
regexp_replace(col("title"), "\n", " "),
regexp_replace(col("selftext"), "\n", " "),
"created_utc"
)
.where(df.subreddit == subreddit)
)
tmp_output_path = "gs://" + bucket_name + "/" + path + "/" + year + "/" + month
# Write output to our temp GCS bucket. Spark jobs can be written out to multiple files
# and partitions. By using coalesce, we ensure the output is consolidated to a single file.
# We then use .options to tell Spark to write out in a gzip format, and .csv to do the write.
(
subreddit_timestamps
# Data can get written out to multiple files / partition.
# This ensures it will only write to 1.
.coalesce(1)
.write
# Gzip the output file
.options(codec="org.apache.hadoop.io.compress.GzipCodec")
# Write out to csv
.csv(tmp_output_path)
)
# Lastly, we'll move the temp file to a new bucket and delete the temp directory.
regex = "part-[0-9a-zA-Z\-]*.csv.gz"
new_path = "/".join(["reddit_posts", year, month, subreddit + ".csv.gz"])
# Create the storage client
storage_client = storage.Client()
# Create an object representing the original bucket
source_bucket = storage_client.get_bucket(bucket_name)
# Grab all files in the source bucket. Typically there is also a _SUCCESS file, inside of the
# directory, so we'll make sure to find our single csv file.
buckets = list(source_bucket.list_blobs(prefix=path))
for bucket in buckets:
name = bucket.name
# Locate the file that represents our partition. Copy to new location and
# delete temp directory.
if re.search(regex, name):
blob = source_bucket.blob(name)
source_bucket.copy_blob(blob, source_bucket, new_path)
blob.delete()
| 35.561404 | 105 | 0.715836 | [
"Apache-2.0"
] | aosterloh/cloud-dataproc | codelabs/spark-bigquery/backfill.py | 4,054 | Python |
def sample_mean(a, b, c):
try:
a = int(a)
b = int(b)
c = int(c)
mean_numbers = [a, b, c]
d = len(mean_numbers)
result_mean = (a + b + c)/d
return float(result_mean)
except ZeroDivisionError:
print("Error: Number Not Valid")
except ValueError:
print("Error: Only Numeric Values")
| 25.928571 | 43 | 0.53168 | [
"MIT"
] | brittrubil/miniProject2-601 | Statistics/SampleMean.py | 363 | Python |
import random
import unittest
from domain_tree.tree import DomainTree, DomainNode, NodeNotFoundException
from domain_tree.domain import RealDomain, RealInterval
class TestDomainTree(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
pass
@classmethod
def tearDownClass(cls) -> None:
pass
def setUp(self):
# self.d0 = {"x0": (0, 1)}
self.d0 = RealDomain({"x0": RealInterval((0, 1), (True, False))})
def tearDown(self) -> None:
pass
def test_npartition(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
self.assertEqual(len(tree.leaves), 2)
tree = DomainTree(domains=self.d0, min_split=0.3)
self.assertIn(len(tree.leaves), [2, 3])
tree = DomainTree(domains=self.d0, min_split=0.2)
self.assertIn(len(tree.leaves), [3, 4, 5])
tree = DomainTree(domains=self.d0, min_split=0, depth_max=5)
n = (2 ** 5) / 2
self.assertEqual(len(tree.leaves), n)
def test_stress_functions(self):
for _ in range(10000):
tree = DomainTree(domains=self.d0, min_split=0, depth_max=5)
tree = DomainTree(domains=self.d0, min_split=0, depth_max=10)
for _ in range(10000):
tree.compute_f({"x0": random.random()})
with self.assertRaises(NodeNotFoundException):
for _ in range(10000):
tree.compute_f({"x0": random.random() + 1})
def test_contains(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
x = {"x0": 0}
self.assertTrue(tree.contains(x))
x = {"x0": 1}
self.assertFalse(tree.contains(x))
x = {"x0": 0.5}
self.assertTrue(tree.contains(x))
#d = {"x0": (0, 1), "x1": (2, 3)}
d = RealDomain({"x0": RealInterval((0, 1), (True, False)), "x1": RealInterval((2, 3), (True, False))})
tree = DomainTree(domains=d, min_split=0.5)
x = {"x0": 0, "x1": 2}
self.assertTrue(tree.contains(x))
x = {"x0": 1, "x1": 2}
self.assertFalse(tree.contains(x))
x = {"x0": 0.5, "x1": 2.99}
self.assertTrue(tree.contains(x))
d = RealDomain({"x0": RealInterval((0, 1), (True, True)), "x1": RealInterval((2, 3), (False, False))})
tree = DomainTree(domains=d, min_split=0.5)
#tree.print_tree()
x = {"x0": 0, "x1": 2}
self.assertFalse(tree.contains(x))
x = {"x0": 0, "x1": 2.5}
self.assertTrue(tree.contains(x))
def test_compute_f(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
with self.assertRaises(NodeNotFoundException):
tree.node_which_contains({"x0": -12})
x = {"x0": 0}
node = tree.node_which_contains(x)
self.assertIsNotNone(node.regression)
b = node.regression.coef_[0]
c = node.regression.intercept_
self.assertEqual(node.regression.predict([list(x.values())]), b * x[list(x.keys())[0]] + c)
self.assertEqual(tree.compute_f(x), node.regression.predict([list(x.values())]))
class TestDomainNode(unittest.TestCase):
def setUp(self):
self.val = 10
d = RealDomain({"x0": RealInterval((0, 1), (True, False)), "x1": RealInterval((2, 3), (True, False))})
self.node = DomainNode(name="nome", domains=d, val=self.val)
def tearDown(self) -> None:
pass
def test_generate_regression(self):
self.node.generate_regression()
self.assertIsNotNone(self.node.regression)
self.assertIsNotNone(self.node.regression.coef_)
self.assertIsNotNone(self.node.regression.intercept_)
def test_contains(self):
self.assertTrue(self.node.contains({"x0": 0, "x1": 2}))
self.assertTrue(self.node.contains({"x0": 0.5, "x1": 2.5}))
self.assertFalse(self.node.contains({"x0": 1, "x1": 2}))
self.assertFalse(self.node.contains({"x0": 1, "x1": 3}))
self.assertFalse(self.node.contains({"x0": 0.2, "x1": 3}))
def test_kill(self):
self.node.dostuff(random=0.5)
self.assertIn(self.node.val, [self.val - 2, self.val - 1])
self.node.kill()
self.assertEqual(self.node.val, 0)
if __name__ == "__main__":
unittest.main()
| 33.48855 | 111 | 0.57488 | [
"BSD-3-Clause"
] | virtualms/DomainTree | test/test_tree.py | 4,387 | Python |
from django.contrib import admin
from django.urls import path
from django.contrib.auth import views as auth_views
from dashboard.views import index
from visitantes.views import registrar_visitante, informacoes_visitante, finalizar_visita
urlpatterns = [
path('admin/', admin.site.urls),
path('', index, name='index'),
path('registrar-visitante/', registrar_visitante, name='registrar_visitante'),
path('visitantes/<int:id>/', informacoes_visitante, name='informacoes_visitante'),
path('visitantes/<int:id>/finalizar-visita/', finalizar_visita, name='finalizar_visita'),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='logout.html'), name='logout'),
]
| 46 | 95 | 0.757033 | [
"MIT"
] | lucasazevedo/visitor-control | project/urls.py | 782 | Python |
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
import octavia.common.config as config
import octavia.tests.unit.base as base
class TestConfig(base.TestCase):
def test_sanity(self):
config.init([])
config.setup_logging(cfg.CONF)
# Resetting because this will cause inconsistent errors when run with
# other tests
self.addCleanup(cfg.CONF.reset)
def test_validate_server_certs_key_passphrase(self):
conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF))
conf.config(
group="certificates",
server_certs_key_passphrase="insecure-key-do-not-use-this-key"
)
# Test too short
self.assertRaises(ValueError, conf.config,
group="certificates",
server_certs_key_passphrase="short_passphrase")
# Test too long
self.assertRaises(
ValueError, conf.config, group="certificates",
server_certs_key_passphrase="long-insecure-key-do-not-use-this")
# Test invalid characters
self.assertRaises(
ValueError, conf.config, group="certificates",
server_certs_key_passphrase="insecure-key-do-not-u$e-this-key")
| 37.423077 | 79 | 0.660843 | [
"Apache-2.0"
] | yi-cloud/octavia | octavia/tests/unit/common/test_config.py | 1,946 | Python |
import math
numero_turmas = int(input('Qual o número de turmas? '))
for _ in range(numero_turmas):
numero_alunos = int(input('Qual o número de alunos? '))
soma = 0
menor = math.inf
maior = 0
for i in range(numero_alunos):
nota = float(input(f'Qual a nota do aluno {i + 1}? '))
soma += nota
if menor > nota:
menor = nota
if maior < nota:
maior = nota
print(f'A média é {soma / numero_alunos}. A menor nota é {menor}, e a maior é {maior}.')
| 30.588235 | 92 | 0.578846 | [
"Unlicense"
] | profamaroca/Lista3-1 | 14.py | 526 | Python |
import getpass, platform, sys, threading
from .. util import log
from . control import ExtractedControl
# See https://stackoverflow.com/questions/42603000
DARWIN_ROOT_WARNING = """
In MacOS, pynput must to be running as root in order to get keystrokes.
Try running your program like this:
sudo %s <your commands here>
"""
INSTALL_ERROR = """
Please install the pynput library with
$ pip install pynput
"""
try:
import pynput
except ImportError:
pynput = Listener = None
else:
class Listener(pynput.keyboard.Listener):
def join(self, timeout=None):
# join() on pynput.keyboard.Listener waits on a queue...
self._queue.put(None)
return super().join(timeout)
def keyname(key):
return getattr(key, 'name', None) or getattr(key, 'char')
class Keyboard(ExtractedControl):
EXTRACTOR = {
'keys_by_type': {
'press': ['type', 'key'],
'release': ['type', 'key'],
},
'normalizers': {
'key': keyname,
},
}
def _press(self, key):
self.receive({'type': 'press', 'key': key})
def _release(self, key):
self.receive({'type': 'release', 'key': key})
def _make_thread(self):
if not pynput:
raise ValueError(INSTALL_ERROR)
if platform.platform().startswith('Darwin'):
if getpass.getuser() != 'root':
log.warning(DARWIN_ROOT_WARNING, sys.argv[0])
log.info('Starting to listen for keyboard input')
return Listener(self._press, self._release)
| 23.279412 | 71 | 0.60897 | [
"MIT"
] | 8cH9azbsFifZ/BiblioPixel | bibliopixel/control/keyboard.py | 1,583 | Python |
#
# Autogenerated by Frugal Compiler (3.4.7)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from frugal.util import make_hashable
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
class base_health_condition(int):
PASS = 1
WARN = 2
FAIL = 3
UNKNOWN = 4
_VALUES_TO_NAMES = {
1: "PASS",
2: "WARN",
3: "FAIL",
4: "UNKNOWN",
}
_NAMES_TO_VALUES = {
"PASS": 1,
"WARN": 2,
"FAIL": 3,
"UNKNOWN": 4,
}
class thing(object):
"""
Attributes:
- an_id
- a_string
"""
def __init__(self, an_id=None, a_string=None):
self.an_id = an_id
self.a_string = a_string
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.an_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.a_string = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('thing')
if self.an_id is not None:
oprot.writeFieldBegin('an_id', TType.I32, 1)
oprot.writeI32(self.an_id)
oprot.writeFieldEnd()
if self.a_string is not None:
oprot.writeFieldBegin('a_string', TType.STRING, 2)
oprot.writeString(self.a_string)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.an_id))
value = (value * 31) ^ hash(make_hashable(self.a_string))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class nested_thing(object):
"""
Attributes:
- things
"""
def __init__(self, things=None):
self.things = things
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.things = []
(_, elem78) = iprot.readListBegin()
for _ in range(elem78):
elem79 = thing()
elem79.read(iprot)
self.things.append(elem79)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('nested_thing')
if self.things is not None:
oprot.writeFieldBegin('things', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.things))
for elem80 in self.things:
elem80.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.things))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class api_exception(TException):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('api_exception')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 27.463054 | 84 | 0.535785 | [
"Apache-2.0"
] | dustyholmes-wf/frugal | test/expected/python.asyncio/actual_base/ttypes.py | 5,575 | Python |
from fastapi_utils.inferring_router import InferringRouter
from . import views
router = InferringRouter()
router.include_router(views.router, prefix='/api', tags=['api'])
| 25.571429 | 65 | 0.765363 | [
"MIT"
] | zhaojiejoe/fastapi-friendly-response-demo | api/__init__.py | 179 | Python |
import komand
from .schema import CreateRecordInput, CreateRecordOutput
# Custom imports below
class CreateRecord(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="create_record",
description="Create a new SObject record",
input=CreateRecordInput(),
output=CreateRecordOutput(),
)
def run(self, params={}):
object_name = params.get("object_name", "Account")
object_data = params.get("object_data")
record = self.connection.api.create_record(object_name, object_data)
try:
id_ = record["id"]
except KeyError:
self.logger.error("Error: id key is missing from record.")
id_ = "Not available"
if record.get("success"):
return {"id": id_}
else:
return {}
| 27.34375 | 76 | 0.595429 | [
"MIT"
] | GreyNoise-Intelligence/insightconnect-plugins | salesforce/komand_salesforce/actions/create_record/action.py | 875 | Python |
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder,LabelEncoder,StandardScaler
from sklearn.decomposition import TruncatedSVD,PCA
from sklearn.metrics.pairwise import cosine_similarity,pairwise_distances
from sklearn.feature_extraction.text import TfidfVectorizer
SEED = 2048
np.random.seed(SEED)
PATH = os.path.expanduser("~") + "/data/quora/"
train = pd.read_csv(PATH + "train_porter.csv")#, nrows=5000)
test = pd.read_csv(PATH + "test_porter.csv")#, nrows=5000)
test['is_duplicated'] = [-1]*test.shape[0]
len_train = train.shape[0]
data_all = pd.concat([train,test])
def calc_set_intersection(obj,target):
a = set(obj.split())
b = set(target.split())
return (len(a.intersection(b))*1.0) / (len(a)*1.0)
print('Generate intersection')
train_interaction = train.astype(str).apply(lambda x: calc_set_intersection(x['question1'],x['question2']),axis=1)
test_interaction = test.astype(str).apply(lambda x: calc_set_intersection(x['question1'],x['question2']),axis=1)
pd.to_pickle(train_interaction,PATH+"train_interaction.pkl")
pd.to_pickle(test_interaction,PATH+"test_interaction.pkl")
print('Generate porter intersection')
train_porter_interaction = train.astype(str).apply(lambda x:calc_set_intersection(x['question1_porter'],x['question2_porter']),axis=1)
test_porter_interaction = test.astype(str).apply(lambda x:calc_set_intersection(x['question1_porter'],x['question2_porter']),axis=1)
pd.to_pickle(train_porter_interaction, PATH+"train_porter_interaction.pkl")
pd.to_pickle(test_porter_interaction, PATH+"test_porter_interaction.pkl") | 45.542857 | 134 | 0.788582 | [
"MIT"
] | zonemercy/Kaggle | quora/pyfm/generate_interaction.py | 1,594 | Python |
class Animal:
_number_of_legs = 0
_pairs_of_eyes = 0
def __init__(self, age):
self._age = age
print("Animal created")
@property
def age(self):
return self._age
@age.setter
def age(self, age):
self._age = age
def print_legs_and_eyes(self):
print("I have " + str(self._number_of_legs) + " legs and " + str(self._pairs_of_eyes * 2) + " eyes.")
def print_age(self):
print("I am " + str(self._age) + " years old.")
class Mammal(Animal):
_pairs_of_eyes = 1
def __init__(self, age, is_pregnant=False):
super().__init__(age)
self._is_pregnant = is_pregnant
print("Mammal created")
@property
def is_pregnant(self):
return self._is_pregnant
@is_pregnant.setter
def is_pregnant(self, is_pregnant):
self._is_pregnant = is_pregnant
class DomesticMammal(Mammal):
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(age, is_pregnant)
self._name = name
self._favorite_toy = favorite_toy
print("DomesticMammal created")
@property
def name(self):
return self._name
@property
def favorite_toy(self):
return self._favorite_toy
@favorite_toy.setter
def favorite_toy(self, favorite_toy):
self._favorite_toy = favorite_toy
def talk(self):
print(self._name + ": talks")
class Dog(DomesticMammal):
_number_of_legs = 4
_breed = "Just a dog"
_breed_family = "Dog"
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(name, age, favorite_toy, is_pregnant)
print("Dog created")
def bark(self, times=1, other_domestic_mammal=None, is_angry=False):
message = self.name
if other_domestic_mammal is not None:
message += " to " + other_domestic_mammal.name + ": "
else:
message += ": "
if is_angry:
message += "Grr "
message += "Woof " * times
print(message)
def talk(self):
self.bark()
@classmethod
def print_breed(cls):
print(cls._breed)
@classmethod
def print_breed_family(cls):
print(cls._breed_family)
class TerrierDog(Dog):
_breed = "Terrier dog"
_breed_family = "Terrier"
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(name, age, favorite_toy, is_pregnant)
print("TerrierDog created")
class SmoothFoxTerrier(TerrierDog):
_breed = "Smooth Fox Terrier"
def __init__(self, name, age, favorite_toy, is_pregnant=False):
super().__init__(name, age, favorite_toy, is_pregnant)
print("SmoothFoxTerrier created")
class Animal:
_number_of_legs = 0
_pairs_of_eyes = 0
def __init__(self, age):
self._age = age
print("Animal created")
@property
def age(self):
return self._age
@age.setter
def age(self, age):
self._age = age
def print_legs_and_eyes(self):
print("I have " + str(self._number_of_legs) + " legs and " + str(self._pairs_of_eyes * 2) + " eyes.")
def print_age(self):
print("I am " + str(self._age) + " years old.")
def __lt__(self, other):
return self.age < other.age
def __le__(self, other):
return self.age <= other.age
def __gt__(self, other):
return self.age > other.age
def __ge__(self, other):
return self.age >= other.age
SmoothFoxTerrier.print_breed()
SmoothFoxTerrier.print_breed_family()
tom = SmoothFoxTerrier("Tom", 5, "Sneakers")
print(isinstance(tom, Animal))
print(isinstance(tom, Mammal))
print(isinstance(tom, DomesticMammal))
print(isinstance(tom, Dog))
print(isinstance(tom, TerrierDog))
print(isinstance(tom, SmoothFoxTerrier))
pluto = SmoothFoxTerrier("Pluto", 6, "Tennis ball")
goofy = SmoothFoxTerrier("Goofy", 8, "Soda bottle")
print(tom > pluto)
print(tom < pluto)
print(goofy >= tom)
print(tom <= goofy)
tom.bark()
tom.bark(2)
tom.bark(2, pluto)
tom.bark(3, pluto, True)
| 23.097297 | 110 | 0.608238 | [
"MIT"
] | Archive-42/Lambda-Resource-Static-Assets | 2-resources/_External-learning-resources/00-Javascript/Object-oriented-programming-for-JavaScript/Module 2/B04710_CodeBundle/Chapter 4/B04170_04_Python_Draft_01.py | 4,273 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
import lzma
from ... import TestUnitBase
from refinery.units.formats.office.xlxtr import _ref2rc, _rc2ref
class TestCellIndexConverter(TestUnitBase):
def test_concistency(self):
for row in range(1, 12):
for col in range(1, 12):
ref = _rc2ref(row, col)
r, c = _ref2rc(ref)
self.assertEqual((r, c), (row, col), F'({row},{col}) -> {ref} -> ({r}, {c}) != ({row},{col})')
class TestExcelExtractor(TestUnitBase):
def test_regular_xlsx(self):
data = self.TEST_XLSX
unit = self.load()
self.assertEqual(unit(data), B'Binary\nRefinery.\nBinary Refinery.')
xl1 = self.load('A1', 'R33', squeeze=True)(data)
xl2 = self.load('2#E10')(data)
xl3 = self.load('Refinery#E10')(data)
self.assertEqual(xl2, xl3)
self.assertEqual(xl1, b'BinaryRefinery.')
self.assertEqual(xl2, b'Binary Refinery.')
TEST_XLSX = lzma.decompress(base64.b85decode(
'{Wp48S^xk9=GL@E0stWa8~^|S5YJf5;3PvDAzc6{61-q2m(dT*lz$@h&uisO-M2S>G=qQEROhS?T`LVCl<0*Kr;j=qGZrTMa1_{74oZ0B;H_q6z{0fO2`#4p'
'Z(%@Rrb2l^+DIK4qbHHF_tmNDpz&Y$NlI-C6c(59S<hkLEM^A)s!{gk@qKO#f!<CU&7G31h2%4o%gM*%hC-@#t>rmqA<7aPOjP!YEkx*jkYln_Gs2{7ZcSSp'
'k%^+f{8_0fK#=AnGd4nKnS~b32=88*Gzk18vHibqY6IP;P8rsEd*hi%t(hYl<vzGV#mly+rRuPU?H$RjiOhkC&_Y^=3@n*lF-L-p{&*dA>A$-1cYhlULYXE~'
'9lRf#_`OFa&uH^H|E#>F1+<slwderZG)kz>f=O+S%CnbmT=-*EXvyp=?C!#p@e|yqJFol$s>T6*DyGIxp^}#q4f#_*{FEDNWty4CtIr9?l}dTd2ZvRe4c(lw'
'DABO4`<xHUA!rFO$CY0pMP$7Ch|~lYzBzW26csva+1m`if>ts<6(kc$R^2wfYI_u<Q|ve2LG39foqnwf%7wRQd2S-u4FHQJN@YT;52pT!6{VrFCidv$Fyf;}'
'rH559u)j4P7JILO$#(5+ZYcGMZALFyO?bVadG%NCWt)~F^p=Pm29lCFbYt)Fedzu<1zSy|M+}&@hOGrpf$f_=Y#DSA@|#f687|=g$UxDWWJKOTp)mW6TzZ=^'
'p2l)f#+eE2G<HArbYwZE!pb>bRES(cfK<g8_b)!Kft2?rXK}=vK3~G(CX^_QX)BQi&gU31F}4c4VcB7TrBk^r&0ca1okiuv1q4^388j~{y%RNKdMWD;q7$3l'
'#C;mMydS27!Koh*Bsd(dJ8m~*nz#&cRltJuz`RD02l;!L145|lg~%t7)#pZ6bT%^@aB5v|Mx2gU?|0@qMh{gR9r!(5QDnF8uc&l@Th{F@viY>d61j#TIyb8X'
'61@K*a|ghIpbVLNf7H)(W5>emQ41R#dw<#Af~ZpQO|)JqOd_Vj*kk+pzMMj@w+^G{FQH|dL4#ia(qX?XVK!~^yYHeq(&}Ngxfz31xqCY)rD*@_3Pyn>pc~Wn'
'MYDkF4kdF2tAi&B|JQ~s4)B9`NTUl4qos<(L1M+~{2d!BjkqBUb0%v1*kgIrF+ptfh}s0W$bSkIfJEba^sYW_lhRuUo-$5(Fftuy6p{|&N2JPAGBvqFg`%Q)'
'1cB<NMLt8qVvugS&hO*6_B9Kg?C_=TOZyGd>o8}DAXwo}7%+6|%=!Q&@h){<N`TgzUUJ67cJdcdXo;y#hyb@#8t&HY8P=kV)6}2jZhORE^Qab?zfQf7B_xQV'
'RK!+xABFg{33KMQ{4`>l&=iyiPUfI)c<LSMZ$G<RZa2rC=p3JGN`2;6a?#<4(EV$(=VK)cnGq^2NNZgPm;XW_n&r%)Tv0l1<R+xEEgpr*wA|*#_J_;WjMhx*'
'2_V1cq6SWKO|ImPFM#_s4uUlRF5$o<bxhE8EI!Cp;wWYl$Rwb5FtH|uR2(*WCRKe{RcePa){nOIYL{IHzSvbnG=TE4j4@A1=U$eDy?6P-nQ|;;P(T(jnSv=m'
'A&Rh1<Lz=W1J+!8u%iw8-_zZAtJcr2%@WV=+r{F4QyRi-NYdmBUk!FaGe5&&sf5vL_S1fe>CT`VFqQJ@BYH?72AFt;%Y}5m9zy2-<(iY_-&tjDSa4w0OtaO1'
'8tKtv_^&+^2ur(e<A~BD=}W({XC6cTLgOQNXL9dl25Uj~y?U_xM??>jmwHU+ICMbW#mHy;%;FmR7XxDT&|UA)JmOx6IY-%2Nzf6u%Ak^&L#DrA=cJ-qL+2V4'
'QaEix%b9zxe1xNE5#G23ON{#;_>8Kk9uORLt@ysrPLTL;n@tE%n;XrSU|Lbfw)ow=_ou8?#%|lEmF1WDbL}FKuGMr+{x400xau(;+mVCbvi;c!7;xGT@yFdV'
'O%KZ3Zd7>8k{6`<kvAq=;*cc=8so}&t<|n@0JZ0ilyz;t_j^nrUr_nSS-~|bLvwY%)Eezn(t5`=4(yJ3=C)R^NZ7aBvqw##zY<>uu=C59T>6kOvA{kgk@|v`'
's>pkG(&hxNnj-cSvL;G~#$Ew`FZiF$IM+7ut?;osAW_o%bvrhoYq6nZm9@=HAw>h4Pp#i=u)I}zReJI81}J1NlhYYmCJI!K?zcp6@Y#8Z3MQwQRUxzknnlp5'
'Rl_cFj`Wt<CU*@+s1`HvyHy~l=e_`sA<(R)nIRh{g7LFc>#eyLlRNK~<0x(GE1^FLwOTD6)j;!)u7?|Ed8uB8efa1bHZN)eQzTas@ce)BAOmvmldGs|(&vx<'
'5<<8Fy}}2W=u;!65A`@sm;bxZvSJ7?a@dwF?Hm9qA<e_Li%pFt+<IhChQmdjO{g%kg(jDtI-dwJFT9Gy@;{Nj;_p=$7QGZ6J(<db_mP^Z0@hL`fMm~^emi-<'
'#U}<C;1S7UX&q{)L&*;Bb4F4&hy!RF0|TGtm9!CB-zUI~7+XmC5f#gR?25`_79+(~-tv8S?S4f!r4*c$F!XRrO<4{vh^|w`l%t?0J>547bF1x6nFKL1FZME8'
'x>xF18ESM1s;wm*-x&m$NDpw?@x=<tlcE)STJnr9{NuK;#i6_2MYCPl%4Zq^9*$^R372ua6jwv>oH^mR0ioqk%%)Awns;#lrjXkIhYB_Vt*Pr*oTgse6Uazr'
'd)yUnaZ|Z`9?Q6aTHa2@m4`pd_?E;;Re)&<*otbim^DZ!V{~?+t%H;U2&V8O9CkMdW*tOzBErCD-E}{=Nl%~-`;W#E5$bMF8A-TOVDt09^K)tTG2cvWxLh%9'
'cuC?O7rL(QbGlAASV!M6dTB)pfy|#N5k4(Mdd*7+Mb<Fc^fR3BfFeEzF^|<<jpBXBM&T8{-77eX)1)UjzwbB1E&LZ4khDM^66En##rJ{5FB;62)1u0P(WW!?'
'lQ>ewk;iuv3T5ya!?u25bnj7}T|JgGJ#9v?s8&4#t^H+#psB8+5X2Nb(T)9WO*Vt|gLB|i#r-n1JMfe$j%Ph5SXMv_Tanlh$I>cVX}KMHqanK)`S{y}?Q*p%'
'q?-9=^4NCH4UFSGW?!(CtBYJuyypt+p0$nV^cK}KotkY2nSQndYOQFUvFVS3FW3?x>5yfLCco*5cW<@V1M^*WZG|(A0JM*3=9Sna%;2QH>md}mDc9$Mt3&b<'
'9G4eqoW1wvVYXkau#+Amms%7l0aoEO^`4|P4TnM0ZoXb_xoe`WfYVjGR)VLd+Q_@wE=eFJLr%5%w|=*hWf977@eZKekfJ3;&92d7q=M_xzybcYrXD3rWUx7T'
'YtP}VErR+Qx_;gt-vsQ=`UR=~2p9|w1mvGLTHTzpFy}ehnsV!-@9w;Br-4Iy$oZ!4*Ll%|=GkY0?kD^ebMpDWalI!>y!qU=-PH<$+%SHQox|bdqM~E30Lu?y'
'n3PZbZ?~4RkXMF4T;wYcr7pG)Y;}^m^8PA7N*9B(6278}V(4CuTj{g8cnHCBjFEVl$#zR(-FckDWBH2kXxgM8VN!zSNkFRsiLX1J0e7IR-ok22b<Fh{0Zygn'
'a->J1Tx<^V>tdmaeJ-AACUvHtR6ZqlAQc@|nfUvSjY9l8N}O1iL6tlkQNk$0EBJwV(D`Rl=MKmb{EZ(M+d9%;77%vNLbvj%X;Q>8k8h<6zf-kMENA;DDq9?9'
'-c<)(XUOK-37=JI@*2_!1<`E;#sXJ^h*;4qBLW;_Mqdg3;l@sO8%u?U%P9drSYd47l>^xT9m~sM>V(|XYphyEM=oa(c$R$_SoS+4>&;O_fr;olaT?C<i;vRU'
'>Z8O<b2dxzIAJbmw!O!q;jOe}<&^u*MaLUU@LxD!+r5~a9H*A^$_=p#3ZXmDXf(Ty2c+E9sKficRn4c|8+AF4uuF9VhW4%}>6syvgejhm`t$tpvg6Jz^Mj8-'
'eJGh$HQ4_nYI6{Gq5tdgPaPK)6ehDCQ26}`@0(w}Y^jsD<S<4|2sfQd4)8g&VMHyPnhehJDk?3y@tj=^?fTchQ<Z_k7Q{seld!f7y2Ywsq3-BjBL~RJ=5!>)'
'HrxQ9A#UUcI9OGd_dxu$A@8Czd8m&#<QJ`NMc2=__EFY=>wz*j8D_g9qx!^5p-44yDeVK-*Gq+h`Egsr8Zykb6#8*Md3@|MQtCqirE)!j#`xE3#3A;CNhhW6'
'@xeBsNwb7OLeXHM-mx$+KjZN~rhI!XRzXlWcBNb!0{QQkA>m)Ta~ke$Z)|_T1I7V2h|AKhLTNs87A1I@LGcUyR57K}(+;tyyC8y-FEcM0@?iXGNBemODlLlH'
'Mr&W(;)1Rbej$uqHn(yDH1F0kV@~eFf?-tYTXATJy75xajc$TygYO-K*F4I#iR*jVbT#0Sdc1yVJ~!nF1^f>mIxj#WHstZO4$~XMjt_&5m)E?ylIEe-l>(D!'
'Mw7{vPF6HG$F-4mG8(?dUrM(jcMhCc>w~{Ex93TcYS@D19c^KVJU^TjPDbY1#=Uo*b{(Gv7n|GEQI?et?&_b)@xjCL01(3wMnc**8<dg)VsKfN?;QKq*-WZ)'
'q@;?J7@QA^o5@YrEzLRbqL85Xn}ts4#pD44_rq|5fCqw#p~C9+4;y)=Dp3c|*;ZXTMF8FuRosDAR|5(w(ZGuW>E%_fgyG!r7?lqe3%xP?6V05D$y(VTsvUOT'
'RQ^YFF+kR~czqgECf1UH;jIk8r2hg10EZw_%qg1HJbY!EE)z8(=N8PB9wvri&LcM3CAHa~Zirs4h%N)MGC{rV+dfuhiX)QYc8+1rR=`2V+zGHRbmllDEAxHp'
'5<BjB;1rT_p7x*Z(v-bV+>i}0tw8REAnOZTGG7W$nnx$)6{BQ+R|g58X;%wAPn`#jR3qZx53X`$$S}|bEg91k*?nTro+A~2&E&c8bAL%TiOH-=B0Dj={_BRs'
'zN_c*A9%woCER;T-@U)QT6Y*KB@#oPZMMU^)_cLl=aG57!=?!dINhxjR`Ad2cib22ZA>g)GQ}!oy<&=n)X-%0d%FsL#aNFDW*P*JZ{;gPC=bY4!wS)S?l&6g'
'P6jM($%?=15;a!OkD@n`fxgQD^$w&KfMrNsA$(M<5bG)@`poZAgOs7zR6<b(_4gthE?vWQx9oH$gktbx6#eVoF&Xe5SGj?`c4Ao`3W{RMIdubs0e`X_6hiFK'
'>wynbkbfB+=3+_Q?eSa6QO0d~q7yubxNApHZEG1Hp||VtF*`Epn)YU>IO$zG_leh1K>qkB&wVr6gi`E{(q4nMnP9&;s(RCZ@vfO7zGg>mK5c_Y1Sg6{rCRjF'
'>nlWlf=PT6<0yV|00WvnG1-5Un}Qq#53Bat2Q+!&tPTzivUE>N5ydL&9B19kAevrDy(wr<id^TwwLC1O<k;_iWc3Al{%JZBDtYK^2QRE%g{XBQK>RO)dC9ur'
'@dAER%=sun5g7ZDw^S%4sIPS^s2JBddi`&zG>k9cE<1bsW}oa3e?YeDQ&KX<O;c9qMe=CF{Aa$9kInQ9TT5DSP>=GYt(Gg*5b{QCyON-vRaXXK>xC<i&$tt2'
'8|53#7Dg@~Q`bM<Zrh)ti1;$!Az6zi<f(9>`#JA?QiV1cR(HH_v>Ov#2ANK_#yB+M?#;Nxp?jzw_nBF?R|2yAURu=_MNoe$F@vzOw_rP{es)Mih4nvYQqY%f'
'>%2udb2Id;8z%n8M|N}@WUOK6lk%1+62-uL>X?x0^(=9Y%o;c`$8#a?kCmpiihl|Q+S^8)dNsvuEqmd)J<2`*U_(F9{q6Sj<v84blBU_=ikeoN_)5W<J!VAw'
'Sv$Ibl}+*I)>Qi(5y*2+-JLaxaUo`dNhioHs31)Ge(_tp+tQA>$|Gm~rxc`xRrz3dgbl<pfRlVz)6nvzGF>2$pK9lNm6NZx+A;1hh!Y^wq`e~y=n}-<6<e4<'
'`*ul_NDY@>-g1WZ7hMwR?&tw9dyu+yY)xfY(Dxz$RK4(dU`!)mqVpN&qWD~f^V+}I=fWT<KC$YC_833-rC&s|-&P@2ne_N3A*te>b=X=Eek@lN46s;fVDhJ3'
'^0`2@#<^2lA)H$6PSfhS?T3)G^?2IsKn{*Dcx9GZ>V1;)^ERS%jQdawwN@DFWmV_f?Max{4e??;&;7K<!{h(WPyGD{+@L*u(wzmx;X?xF{eUiKds($%ES*Ym'
'@~7q)`@30*YJ|TX!8tw+6+2AlC{V-75h&3sf|h$oyap$59-bkLE$lBVKy1<dt?%3gfzvf!xPmrvI?%b7BV-?+9fzF(^>Rh&lc!B=7&O#O89HJ}8FtKJ8;*`G'
'#oG&ackie*nZ<;gf4|RfL;3yJAyqllmLUY|?+yJh`Mg~?S^7{RY=Fzu=lz$Qg`QXCXTenb*>MO)qZKpGp?w@Wfo$u4oGUgZBL8~f!=1#)#f($a&NhjkJ@-g*'
'+|f`#ugApNgEbuU`g6DMU9FM%e5J^mP;<ieN^1hy#Qk2#I>7|+b#|2|XaIX$?zVFH1@WR&)QzgwuL-#U&fG=uM=T9yeNcpwB+pV^h(zB$ZU5<M5gGqvOeN#N'
'yVgbJ5<P11H}3-iK3WH)3&P%7HtVj_bQtmFcv{$s2yL*)Ii>v+ikrq*68vX=BgM4X#SvNA<ltrz-GE}KFtMrB(_&Z~V@}q;HCn15$x(Pijd=!-;U6Z~PoF&^'
'0bkjt88le{rYSw?&3;UjOaX^gf3jGo@-xA5b()&3rH;aQgcyLDn(s~vim6}iRS{UhiHDj6J>u2XPyEZpPa~5t#8t}Zs&SnD&E{>^&$saZ?Mq`u7T-2s^Y-Ng'
'5)+D+M@{nPIEmmA7yZb?N<>N0X_d)2EVrU~e?CqMCxLH~R^AVFzT{4dEXfA5k3DvQzw3Hs$VEW)xg^+5DPt<^7U9(JiWKa~nq2hxULBb*a&Y))x)#rQM8Z`j'
'5Mmpf+M1+Y+jwRI6l<q@v9rV32JHH@XZtkinW?VkC)c278{WH8UyCuUxSAM<df#~$a<VV$$*tKVxAvl{Ax%2MO(8?<9gzDAuo(}9Y#e<svKuK1bD~XdngQEg'
'L7nRHl|{{+DiK><=XU8^(;|agSqoyRyOB8*W9)6x|2vRE#7gKSkO^)4rPK~0v0)}fs&ZswK%<HY$uk`?OTLu>pD>T&rdIcf)>1>~PWqh-w8JOS&+-VlyMsOK'
'<$oB)VeHgqUh%v_4krv{i6I|6O&`lof_mK2O+00|a#}BwG(2&@xM;48<nSGP3J~DqIBzs?Qy5Q-@Kyh&!Fl6@HL+`8)!~4;G)Oa=ex1SlfM$5+Zs#1`37rEW'
'!z>3k19J|3fFOu?xIDa~SwA9%l3cKzCXIk>O75p|Bg)~|2;&k|mVGr+)MWRWCz;vY*&2yR97bK*S$>Ualdz*yplSY%%`-Yj!e!v%y*ROG3UlCsxgRcY70fqQ'
'I+EX+tv<@*&DUsq7bbCHUntXdFs5vDGP@MDqpto`ZT!$seb}vPzciItw_Z$+jnO(0Q5Ge{`CApXVtSioC!~KF;1mjl7zHO2z0YfqFLzph`avY-bbj?E;T^30'
'M0>~Bqjf8;WegI*+rs3kK<7hqTBy|v&jIUCfY+C*1mZJJbyU4ZEF!~_=0L~)Q#G|Ii{z+<I;P6A(s1E(@9b>FumB7oBm>X(NL!?}$KeF2{j3Ul)B_f84h4M5'
'r9)#GV2+28fa6fK6R4CHHh)K#0Fad@oZV4_Gua#}uAjxJ*>@g%+T|%ID!}k^BS2Je^`Ky*>aIoivXvF>-dgPgyt#In;bPorwRyWMLjuMWcW+c-9boYE)8iS>'
'q!Em8IIPsA1Y|^Xc@jro(IP
y0;na@27uAd3Z1T<ga0jjkKx&+RWCtm!fw0>lEr)3m(rj-=U)Zw-<dl;K4GSxkTx(VhK(SI=UN7dA?Lv=#D>Qsd{nfTXm'
'pxA`o(dC=F2E!ILT@*bC=AU*b$fz9Y`RM+&%tUiKh(1zr0b-tBkC^=#vjh`Aw~`^(Z}03wRH!x87TD<`J_|NamNx>q96dEcpLR`+0~*>P<hAWD^Q;hQo+5F<'
'jkThMTR3~)t79?MN$7I(KMPx$mkUjhroGlDzyqi{sBeG_$w)uw3xyWMeG8?|PVNM@^!iEg8ZFVzg+!q|&_T%AV79u_NzR%3;O-V&1mRqcD2rPxeHk7RDVwj+'
'TW~`L2g!$~bL55kst*mQ@YGUoVM@Q%(QGB!3x%5Ts?P*J5jLjM`8si3@#uU;K+U@o3R88*v$BeZFy>Z6<)6zkIfDg$P{F3Tl%R;1Iy!4f7pFwT{pda1v(L5Y'
'UAt4vr3g<_cO7kXPR6q&HzDpZU9JzHml~E4e~KjPSIg1zc8JX3ffWqT3X9rhxdhiZcI14+hrSC3geN)~9kc)SH6NaPEv7|+!C8lhOJHLhpn<#SnL<zbQ`F1d'
'F7z+X3NUnd;Cc@zZzz1@J)*=%vm5Kr|KqESpnKN`SrPmK$ZOI60Z#t#%ak|7wNPLIs_$bSRqYTpZCMnKd^q}R>)k?yVOgo)24Y*7v8)rsT^@GGq}6!!?oE!^'
'd+U-g60>iG7RE;8d~$5Nais62-MIq@rRX&o)QtxeW#N_%7vMGGro#IN7SIar0k*UrI@bNMf~JE^W&+Qnet4Kt7e#+qzFUEV{w~l8@%_@&J<W=gc7p!^u7cs7'
'000006<H{x300yM00F%;#7F=D8E*!*vBYQl0ssI200dcD'
))
| 96.943396 | 130 | 0.672635 | [
"BSD-3-Clause"
] | baderj/refinery | test/units/formats/office/test_xlxtr.py | 10,276 | Python |
from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zerver.models import Service, UserProfile
class Command(ZulipBaseCommand):
help = """Given an existing bot, converts it into an outgoing webhook bot."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser)
parser.add_argument('bot_email', metavar='<bot_email>', type=str,
help='email of bot')
parser.add_argument('service_name', metavar='<service_name>', type=str,
help='name of Service object to create')
parser.add_argument('base_url', metavar='<base_url>', type=str,
help='Endpoint URL of outgoing webhook')
# TODO: Add token and interface as arguments once OutgoingWebhookWorker
# uses these fields on the Service object.
def handle(self, *args: Any, **options: str) -> None:
bot_email = options['bot_email']
service_name = options['service_name']
base_url = options['base_url']
realm = self.get_realm(options)
if not bot_email:
print('Email of existing bot must be provided')
exit(1)
if not service_name:
print('Name for Service object must be provided')
exit(1)
if not base_url:
print('Endpoint URL of outgoing webhook must be provided')
exit(1)
# TODO: Normalize email?
bot_profile = self.get_user(email=bot_email, realm=realm)
if not bot_profile.is_bot:
print('User %s is not a bot' % (bot_email,))
exit(1)
if bot_profile.is_outgoing_webhook_bot:
print('%s is already marked as an outgoing webhook bot' % (bot_email,))
exit(1)
Service.objects.create(name=service_name,
user_profile=bot_profile,
base_url=base_url,
token='',
interface=1)
bot_profile.bot_type = UserProfile.OUTGOING_WEBHOOK_BOT
bot_profile.save()
print('Successfully converted %s into an outgoing webhook bot' % (bot_email,))
| 37.783333 | 86 | 0.599912 | [
"Apache-2.0"
] | abhigyank/zulip | zerver/management/commands/convert_bot_to_outgoing_webhook.py | 2,267 | Python |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# Copyright (C) 2014-2016 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from .. import factories as f
from tests.utils import disconnect_signals, reconnect_signals
from taiga.projects.services.stats import get_stats_for_project
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.user = f.UserFactory.create()
m.project = f.ProjectFactory(is_private=False, owner=m.user)
m.role1 = f.RoleFactory(project=m.project)
m.role2 = f.RoleFactory(project=m.project)
m.null_points = f.PointsFactory(project=m.project, value=None)
m.default_points = f.PointsFactory(project=m.project, value=0)
m.points1 = f.PointsFactory(project=m.project, value=1)
m.points2 = f.PointsFactory(project=m.project, value=2)
m.points3 = f.PointsFactory(project=m.project, value=4)
m.points4 = f.PointsFactory(project=m.project, value=8)
m.points5 = f.PointsFactory(project=m.project, value=16)
m.points6 = f.PointsFactory(project=m.project, value=32)
m.open_status = f.UserStoryStatusFactory(is_closed=False)
m.closed_status = f.UserStoryStatusFactory(is_closed=True)
m.project.default_points = m.default_points
m.project.save()
m.user_story1 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story1.role_points.filter(role=m.role1).update(points=m.points1)
m.user_story2 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story2.role_points.filter(role=m.role1).update(points=m.points2)
m.user_story3 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story3.role_points.filter(role=m.role1).update(points=m.points3)
m.user_story4 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story4.role_points.filter(role=m.role1).update(points=m.points4)
# 5 and 6 are inclosed milestones
m.user_story5 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone__closed=True,
milestone__project=m.project)
m.user_story5.role_points.filter(role=m.role1).update(points=m.points5)
m.user_story6 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone__closed=True,
milestone__project=m.project)
m.user_story6.role_points.filter(role=m.role1).update(points=m.points6)
return m
def test_project_defined_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["defined_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
data.user_story1.role_points.filter(role=data.role1).update(points=data.default_points)
data.user_story1.role_points.filter(role=data.role2).update(points=data.points1)
project_stats = get_stats_for_project(data.project)
assert project_stats["defined_points_per_role"] == {data.role1.pk: 62, data.role2.pk: 1}
def test_project_closed_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {}
data.user_story1.is_closed = True
data.user_story1.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 1, data.role2.pk: 0}
data.user_story2.is_closed = True
data.user_story2.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 3, data.role2.pk: 0}
data.user_story3.is_closed = True
data.user_story3.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 7, data.role2.pk: 0}
data.user_story4.is_closed = True
data.user_story4.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 15, data.role2.pk: 0}
data.user_story5.is_closed = True
data.user_story5.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 31, data.role2.pk: 0}
data.user_story6.is_closed = True
data.user_story6.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points"] == 63
assert project_stats["speed"] == 24
def test_project_assigned_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 48, data.role2.pk: 0}
data.user_story1.milestone = data.user_story6.milestone
data.user_story1.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 49, data.role2.pk: 0}
data.user_story2.milestone = data.user_story6.milestone
data.user_story2.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 51, data.role2.pk: 0}
data.user_story3.milestone = data.user_story6.milestone
data.user_story3.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 55, data.role2.pk: 0}
data.user_story4.milestone = data.user_story6.milestone
data.user_story4.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
| 44.447853 | 93 | 0.696894 | [
"MIT"
] | mattcongy/itshop | docker-images/taigav2/taiga-back/tests/integration/test_stats.py | 7,248 | Python |
import sys
import compileall
import importlib.util
import test.test_importlib.util
import os
import pathlib
import py_compile
import shutil
import struct
import tempfile
import time
import unittest
import io
from unittest import mock, skipUnless
try:
from concurrent.futures import ProcessPoolExecutor
_have_multiprocessing = True
except ImportError:
_have_multiprocessing = False
from test import support
from test.support import script_helper
from .test_py_compile import without_source_date_epoch
from .test_py_compile import SourceDateEpochTestMeta
class CompileallTestsBase:
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
self.bc_path = importlib.util.cache_from_source(self.source_path)
with open(self.source_path, 'w') as file:
file.write('x = 123\n')
self.source_path2 = os.path.join(self.directory, '_test2.py')
self.bc_path2 = importlib.util.cache_from_source(self.source_path2)
shutil.copyfile(self.source_path, self.source_path2)
self.subdirectory = os.path.join(self.directory, '_subdir')
os.mkdir(self.subdirectory)
self.source_path3 = os.path.join(self.subdirectory, '_test3.py')
shutil.copyfile(self.source_path, self.source_path3)
def tearDown(self):
shutil.rmtree(self.directory)
def add_bad_source_file(self):
self.bad_source_path = os.path.join(self.directory, '_test_bad.py')
with open(self.bad_source_path, 'w') as file:
file.write('x (\n')
def timestamp_metadata(self):
with open(self.bc_path, 'rb') as file:
data = file.read(12)
mtime = int(os.stat(self.source_path).st_mtime)
compare = struct.pack('<4sll', importlib.util.MAGIC_NUMBER, 0, mtime)
return data, compare
def recreation_check(self, metadata):
"""Check that compileall recreates bytecode when the new metadata is
used."""
if os.environ.get('SOURCE_DATE_EPOCH'):
raise unittest.SkipTest('SOURCE_DATE_EPOCH is set')
py_compile.compile(self.source_path)
self.assertEqual(*self.timestamp_metadata())
with open(self.bc_path, 'rb') as file:
bc = file.read()[len(metadata):]
with open(self.bc_path, 'wb') as file:
file.write(metadata)
file.write(bc)
self.assertNotEqual(*self.timestamp_metadata())
compileall.compile_dir(self.directory, force=False, quiet=True)
self.assertTrue(*self.timestamp_metadata())
def test_mtime(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(struct.pack('<4sll', importlib.util.MAGIC_NUMBER,
0, 1))
def test_magic_number(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(b'\0\0\0\0')
def test_compile_files(self):
# Test compiling a single file, and complete directory
for fn in (self.bc_path, self.bc_path2):
try:
os.unlink(fn)
except:
pass
self.assertTrue(compileall.compile_file(self.source_path,
force=False, quiet=True))
self.assertTrue(os.path.isfile(self.bc_path) and
not os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
self.assertTrue(compileall.compile_dir(self.directory, force=False,
quiet=True))
self.assertTrue(os.path.isfile(self.bc_path) and
os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
os.unlink(self.bc_path2)
# Test against bad files
self.add_bad_source_file()
self.assertFalse(compileall.compile_file(self.bad_source_path,
force=False, quiet=2))
self.assertFalse(compileall.compile_dir(self.directory,
force=False, quiet=2))
def test_compile_file_pathlike(self):
self.assertFalse(os.path.isfile(self.bc_path))
# we should also test the output
with support.captured_stdout() as stdout:
self.assertTrue(compileall.compile_file(pathlib.Path(self.source_path)))
self.assertRegex(stdout.getvalue(), r'Compiling ([^WindowsPath|PosixPath].*)')
self.assertTrue(os.path.isfile(self.bc_path))
def test_compile_file_pathlike_ddir(self):
self.assertFalse(os.path.isfile(self.bc_path))
self.assertTrue(compileall.compile_file(pathlib.Path(self.source_path),
ddir=pathlib.Path('ddir_path'),
quiet=2))
self.assertTrue(os.path.isfile(self.bc_path))
def test_compile_path(self):
with test.test_importlib.util.import_state(path=[self.directory]):
self.assertTrue(compileall.compile_path(quiet=2))
with test.test_importlib.util.import_state(path=[self.directory]):
self.add_bad_source_file()
self.assertFalse(compileall.compile_path(skip_curdir=False,
force=True, quiet=2))
def test_no_pycache_in_non_package(self):
# Bug 8563 reported that __pycache__ directories got created by
# compile_file() for non-.py files.
data_dir = os.path.join(self.directory, 'data')
data_file = os.path.join(data_dir, 'file')
os.mkdir(data_dir)
# touch data/file
with open(data_file, 'w'):
pass
compileall.compile_file(data_file)
self.assertFalse(os.path.exists(os.path.join(data_dir, '__pycache__')))
def test_optimize(self):
# make sure compiling with different optimization settings than the
# interpreter's creates the correct file names
optimize, opt = (1, 1) if __debug__ else (0, '')
compileall.compile_dir(self.directory, quiet=True, optimize=optimize)
cached = importlib.util.cache_from_source(self.source_path,
optimization=opt)
self.assertTrue(os.path.isfile(cached))
cached2 = importlib.util.cache_from_source(self.source_path2,
optimization=opt)
self.assertTrue(os.path.isfile(cached2))
cached3 = importlib.util.cache_from_source(self.source_path3,
optimization=opt)
self.assertTrue(os.path.isfile(cached3))
def test_compile_dir_pathlike(self):
self.assertFalse(os.path.isfile(self.bc_path))
with support.captured_stdout() as stdout:
compileall.compile_dir(pathlib.Path(self.directory))
line = stdout.getvalue().splitlines()[0]
self.assertRegex(line, r'Listing ([^WindowsPath|PosixPath].*)')
self.assertTrue(os.path.isfile(self.bc_path))
@mock.patch('concurrent.futures.ProcessPoolExecutor')
def test_compile_pool_called(self, pool_mock):
compileall.compile_dir(self.directory, quiet=True, workers=5)
self.assertTrue(pool_mock.called)
def test_compile_workers_non_positive(self):
with self.assertRaisesRegex(ValueError,
"workers must be greater or equal to 0"):
compileall.compile_dir(self.directory, workers=-1)
@mock.patch('concurrent.futures.ProcessPoolExecutor')
def test_compile_workers_cpu_count(self, pool_mock):
compileall.compile_dir(self.directory, quiet=True, workers=0)
self.assertEqual(pool_mock.call_args[1]['max_workers'], None)
@mock.patch('concurrent.futures.ProcessPoolExecutor')
@mock.patch('compileall.compile_file')
def test_compile_one_worker(self, compile_file_mock, pool_mock):
compileall.compile_dir(self.directory, quiet=True)
self.assertFalse(pool_mock.called)
self.assertTrue(compile_file_mock.called)
@mock.patch('concurrent.futures.ProcessPoolExecutor', new=None)
@mock.patch('compileall.compile_file')
def test_compile_missing_multiprocessing(self, compile_file_mock):
compileall.compile_dir(self.directory, quiet=True, workers=5)
self.assertTrue(compile_file_mock.called)
class CompileallTestsWithSourceEpoch(CompileallTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class CompileallTestsWithoutSourceEpoch(CompileallTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
class EncodingTest(unittest.TestCase):
"""Issue 6716: compileall should escape source code when printing errors
to stdout."""
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
with open(self.source_path, 'w', encoding='utf-8') as file:
file.write('# -*- coding: utf-8 -*-\n')
file.write('print u"\u20ac"\n')
def tearDown(self):
shutil.rmtree(self.directory)
def test_error(self):
try:
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(),encoding='ascii')
compileall.compile_dir(self.directory)
finally:
sys.stdout = orig_stdout
class CommandLineTestsBase:
"""Test compileall's CLI."""
@classmethod
def setUpClass(cls):
for path in filter(os.path.isdir, sys.path):
directory_created = False
directory = pathlib.Path(path) / '__pycache__'
path = directory / 'test.try'
try:
if not directory.is_dir():
directory.mkdir()
directory_created = True
with path.open('w') as file:
file.write('# for test_compileall')
except OSError:
sys_path_writable = False
break
finally:
support.unlink(str(path))
if directory_created:
directory.rmdir()
else:
sys_path_writable = True
cls._sys_path_writable = sys_path_writable
def _skip_if_sys_path_not_writable(self):
if not self._sys_path_writable:
raise unittest.SkipTest('not all entries on sys.path are writable')
def _get_run_args(self, args):
return [*support.optim_args_from_interpreter_flags(),
'-S', '-m', 'compileall',
*args]
def assertRunOK(self, *args, **env_vars):
rc, out, err = script_helper.assert_python_ok(
*self._get_run_args(args), **env_vars)
self.assertEqual(b'', err)
return out
def assertRunNotOK(self, *args, **env_vars):
rc, out, err = script_helper.assert_python_failure(
*self._get_run_args(args), **env_vars)
return rc, out, err
def assertCompiled(self, fn):
path = importlib.util.cache_from_source(fn)
self.assertTrue(os.path.exists(path))
def assertNotCompiled(self, fn):
path = importlib.util.cache_from_source(fn)
self.assertFalse(os.path.exists(path))
def setUp(self):
self.directory = tempfile.mkdtemp()
self.addCleanup(support.rmtree, self.directory)
self.pkgdir = os.path.join(self.directory, 'foo')
os.mkdir(self.pkgdir)
self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__')
# Create the __init__.py and a package module.
self.initfn = script_helper.make_script(self.pkgdir, '__init__', '')
self.barfn = script_helper.make_script(self.pkgdir, 'bar', '')
def test_no_args_compiles_path(self):
# Note that -l is implied for the no args case.
self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
self.assertRunOK(PYTHONPATH=self.directory)
self.assertCompiled(bazfn)
self.assertNotCompiled(self.initfn)
self.assertNotCompiled(self.barfn)
@without_source_date_epoch # timestamp invalidation test
def test_no_args_respects_force_flag(self):
self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
self.assertRunOK(PYTHONPATH=self.directory)
pycpath = importlib.util.cache_from_source(bazfn)
# Set atime/mtime backward to avoid file timestamp resolution issues
os.utime(pycpath, (time.time()-60,)*2)
mtime = os.stat(pycpath).st_mtime
# Without force, no recompilation
self.assertRunOK(PYTHONPATH=self.directory)
mtime2 = os.stat(pycpath).st_mtime
self.assertEqual(mtime, mtime2)
# Now force it.
self.assertRunOK('-f', PYTHONPATH=self.directory)
mtime2 = os.stat(pycpath).st_mtime
self.assertNotEqual(mtime, mtime2)
def test_no_args_respects_quiet_flag(self):
self._skip_if_sys_path_not_writable()
script_helper.make_script(self.directory, 'baz', '')
noisy = self.assertRunOK(PYTHONPATH=self.directory)
self.assertIn(b'Listing ', noisy)
quiet = self.assertRunOK('-q', PYTHONPATH=self.directory)
self.assertNotIn(b'Listing ', quiet)
# Ensure that the default behavior of compileall's CLI is to create
# PEP 3147/PEP 488 pyc files.
for name, ext, switch in [
('normal', 'pyc', []),
('optimize', 'opt-1.pyc', ['-O']),
('doubleoptimize', 'opt-2.pyc', ['-OO']),
]:
def f(self, ext=ext, switch=switch):
script_helper.assert_python_ok(*(switch +
['-m', 'compileall', '-q', self.pkgdir]))
# Verify the __pycache__ directory contents.
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
expected = sorted(base.format(sys.implementation.cache_tag, ext)
for base in ('__init__.{}.{}', 'bar.{}.{}'))
self.assertEqual(sorted(os.listdir(self.pkgdir_cachedir)), expected)
# Make sure there are no .pyc files in the source directory.
self.assertFalse([fn for fn in os.listdir(self.pkgdir)
if fn.endswith(ext)])
locals()['test_pep3147_paths_' + name] = f
def test_legacy_paths(self):
# Ensure that with the proper switch, compileall leaves legacy
# pyc files, and no __pycache__ directory.
self.assertRunOK('-b', '-q', self.pkgdir)
# Verify the __pycache__ directory contents.
self.assertFalse(os.path.exists(self.pkgdir_cachedir))
expected = sorted(['__init__.py', '__init__.pyc', 'bar.py',
'bar.pyc'])
self.assertEqual(sorted(os.listdir(self.pkgdir)), expected)
def test_multiple_runs(self):
# Bug 8527 reported that multiple calls produced empty
# __pycache__/__pycache__ directories.
self.assertRunOK('-q', self.pkgdir)
# Verify the __pycache__ directory contents.
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
cachecachedir = os.path.join(self.pkgdir_cachedir, '__pycache__')
self.assertFalse(os.path.exists(cachecachedir))
# Call compileall again.
self.assertRunOK('-q', self.pkgdir)
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
self.assertFalse(os.path.exists(cachecachedir))
@without_source_date_epoch # timestamp invalidation test
def test_force(self):
self.assertRunOK('-q', self.pkgdir)
pycpath = importlib.util.cache_from_source(self.barfn)
# set atime/mtime backward to avoid file timestamp resolution issues
os.utime(pycpath, (time.time()-60,)*2)
mtime = os.stat(pycpath).st_mtime
# without force, no recompilation
self.assertRunOK('-q', self.pkgdir)
mtime2 = os.stat(pycpath).st_mtime
self.assertEqual(mtime, mtime2)
# now force it.
self.assertRunOK('-q', '-f', self.pkgdir)
mtime2 = os.stat(pycpath).st_mtime
self.assertNotEqual(mtime, mtime2)
def test_recursion_control(self):
subpackage = os.path.join(self.pkgdir, 'spam')
os.mkdir(subpackage)
subinitfn = script_helper.make_script(subpackage, '__init__', '')
hamfn = script_helper.make_script(subpackage, 'ham', '')
self.assertRunOK('-q', '-l', self.pkgdir)
self.assertNotCompiled(subinitfn)
self.assertFalse(os.path.exists(os.path.join(subpackage, '__pycache__')))
self.assertRunOK('-q', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
def test_recursion_limit(self):
subpackage = os.path.join(self.pkgdir, 'spam')
subpackage2 = os.path.join(subpackage, 'ham')
subpackage3 = os.path.join(subpackage2, 'eggs')
for pkg in (subpackage, subpackage2, subpackage3):
script_helper.make_pkg(pkg)
subinitfn = os.path.join(subpackage, '__init__.py')
hamfn = script_helper.make_script(subpackage, 'ham', '')
spamfn = script_helper.make_script(subpackage2, 'spam', '')
eggfn = script_helper.make_script(subpackage3, 'egg', '')
self.assertRunOK('-q', '-r 0', self.pkgdir)
self.assertNotCompiled(subinitfn)
self.assertFalse(
os.path.exists(os.path.join(subpackage, '__pycache__')))
self.assertRunOK('-q', '-r 1', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertNotCompiled(spamfn)
self.assertRunOK('-q', '-r 2', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertCompiled(spamfn)
self.assertNotCompiled(eggfn)
self.assertRunOK('-q', '-r 5', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertCompiled(spamfn)
self.assertCompiled(eggfn)
def test_quiet(self):
noisy = self.assertRunOK(self.pkgdir)
quiet = self.assertRunOK('-q', self.pkgdir)
self.assertNotEqual(b'', noisy)
self.assertEqual(b'', quiet)
def test_silent(self):
script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax')
_, quiet, _ = self.assertRunNotOK('-q', self.pkgdir)
_, silent, _ = self.assertRunNotOK('-qq', self.pkgdir)
self.assertNotEqual(b'', quiet)
self.assertEqual(b'', silent)
def test_regexp(self):
self.assertRunOK('-q', '-x', r'ba[^\\/]*$', self.pkgdir)
self.assertNotCompiled(self.barfn)
self.assertCompiled(self.initfn)
def test_multiple_dirs(self):
pkgdir2 = os.path.join(self.directory, 'foo2')
os.mkdir(pkgdir2)
init2fn = script_helper.make_script(pkgdir2, '__init__', '')
bar2fn = script_helper.make_script(pkgdir2, 'bar2', '')
self.assertRunOK('-q', self.pkgdir, pkgdir2)
self.assertCompiled(self.initfn)
self.assertCompiled(self.barfn)
self.assertCompiled(init2fn)
self.assertCompiled(bar2fn)
def test_d_compile_error(self):
script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax')
rc, out, err = self.assertRunNotOK('-q', '-d', 'dinsdale', self.pkgdir)
self.assertRegex(out, b'File "dinsdale')
def test_d_runtime_error(self):
bazfn = script_helper.make_script(self.pkgdir, 'baz', 'raise Exception')
self.assertRunOK('-q', '-d', 'dinsdale', self.pkgdir)
fn = script_helper.make_script(self.pkgdir, 'bing', 'import baz')
pyc = importlib.util.cache_from_source(bazfn)
os.rename(pyc, os.path.join(self.pkgdir, 'baz.pyc'))
os.remove(bazfn)
rc, out, err = script_helper.assert_python_failure(fn, __isolated=False)
self.assertRegex(err, b'File "dinsdale')
def test_include_bad_file(self):
rc, out, err = self.assertRunNotOK(
'-i', os.path.join(self.directory, 'nosuchfile'), self.pkgdir)
self.assertRegex(out, b'rror.*nosuchfile')
self.assertNotRegex(err, b'Traceback')
self.assertFalse(os.path.exists(importlib.util.cache_from_source(
self.pkgdir_cachedir)))
def test_include_file_with_arg(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
with open(os.path.join(self.directory, 'l1'), 'w') as l1:
l1.write(os.path.join(self.pkgdir, 'f1.py')+os.linesep)
l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep)
self.assertRunOK('-i', os.path.join(self.directory, 'l1'), f4)
self.assertCompiled(f1)
self.assertCompiled(f2)
self.assertNotCompiled(f3)
self.assertCompiled(f4)
def test_include_file_no_arg(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
with open(os.path.join(self.directory, 'l1'), 'w') as l1:
l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep)
self.assertRunOK('-i', os.path.join(self.directory, 'l1'))
self.assertNotCompiled(f1)
self.assertCompiled(f2)
self.assertNotCompiled(f3)
self.assertNotCompiled(f4)
def test_include_on_stdin(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
p = script_helper.spawn_python(*(self._get_run_args(()) + ['-i', '-']))
p.stdin.write((f3+os.linesep).encode('ascii'))
script_helper.kill_python(p)
self.assertNotCompiled(f1)
self.assertNotCompiled(f2)
self.assertCompiled(f3)
self.assertNotCompiled(f4)
def test_compiles_as_much_as_possible(self):
bingfn = script_helper.make_script(self.pkgdir, 'bing', 'syntax(error')
rc, out, err = self.assertRunNotOK('nosuchfile', self.initfn,
bingfn, self.barfn)
self.assertRegex(out, b'rror')
self.assertNotCompiled(bingfn)
self.assertCompiled(self.initfn)
self.assertCompiled(self.barfn)
def test_invalid_arg_produces_message(self):
out = self.assertRunOK('badfilename')
self.assertRegex(out, b"Can't list 'badfilename'")
def test_pyc_invalidation_mode(self):
script_helper.make_script(self.pkgdir, 'f1', '')
pyc = importlib.util.cache_from_source(
os.path.join(self.pkgdir, 'f1.py'))
self.assertRunOK('--invalidation-mode=checked-hash', self.pkgdir)
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b11)
self.assertRunOK('--invalidation-mode=unchecked-hash', self.pkgdir)
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b01)
@skipUnless(_have_multiprocessing, "requires multiprocessing")
def test_workers(self):
bar2fn = script_helper.make_script(self.directory, 'bar2', '')
files = []
for suffix in range(5):
pkgdir = os.path.join(self.directory, 'foo{}'.format(suffix))
os.mkdir(pkgdir)
fn = script_helper.make_script(pkgdir, '__init__', '')
files.append(script_helper.make_script(pkgdir, 'bar2', ''))
self.assertRunOK(self.directory, '-j', '0')
self.assertCompiled(bar2fn)
for file in files:
self.assertCompiled(file)
@mock.patch('compileall.compile_dir')
def test_workers_available_cores(self, compile_dir):
with mock.patch("sys.argv",
new=[sys.executable, self.directory, "-j0"]):
compileall.main()
self.assertTrue(compile_dir.called)
self.assertEqual(compile_dir.call_args[-1]['workers'], 0)
class CommmandLineTestsWithSourceEpoch(CommandLineTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class CommmandLineTestsNoSourceEpoch(CommandLineTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
if __name__ == "__main__":
unittest.main()
| 43.419732 | 87 | 0.611708 | [
"MIT"
] | jasam/ciclo_vida_datos_scraping | python/Lib/test/test_compileall.py | 25,965 | Python |
from PIL import Image
def img_to_binary(img: Image, min_value=100) -> Image:
return img.convert('L').point(lambda p: p > min_value and 255).convert('1')
| 26.5 | 79 | 0.704403 | [
"BSD-3-Clause"
] | Amjad50/Fyp | utils/image.py | 159 | Python |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 22:41:54 2020
@author: mahjaf
Automatic sleep scoring implemented for Zmax headband.
"""
#%% Reading EDF section
#####===================== Importiung libraries =========================#####
import mne
import numpy as np
from numpy import loadtxt
import h5py
import time
import os
from ssccoorriinngg import ssccoorriinngg
import matplotlib.pyplot as plt
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import confusion_matrix, make_scorer, accuracy_score, precision_score, recall_score, f1_score, classification_report
import pandas as pd
import tensorflow as tf
from scipy import signal
#####==================== Defining required paths r=======================#####
Main_path = "P:/3013080.01/"
subject_Id_folder = Main_path + "Autoscoring/ssccoorriinngg/"
Data_folder = Main_path + "Zmax_Data/"
Hypnogram_folder = Main_path + "somno_scorings/Rathiga/"
#####===================== Reading EDF data files=========================#####
subject_ids = loadtxt(subject_Id_folder+"Zmax/Subject_ids_excluding 22_2.txt", dtype = 'str',delimiter='\n')
#####============= create an object of ssccoorriinngg class ==============#####
Object = ssccoorriinngg(filename='', channel='', fs = 256, T = 30)
#%% Load featureset and labels
path = "P:/3013080.01/Autoscoring/features/"
filename = "Zmax_Rathiga_scorings_ch-ch2+AccFeats_190620"
subjects_dic, hyp_dic = Object.load_dictionary(path, filename)
#%% ================================Training part==============================
# Training perentage
train_size = .7
n_train = round(train_size * len(subject_ids))
#######=== Randomly shuffle subjects to choose train and test splits ===#######
subject_ids = np.random.RandomState(seed=0).permutation(subject_ids)
#######=============== Initialize train and test arrays ================#######
sample_subject = "subjectP_12_night1_scoring.csv.spisop.new - Copy"
sample_hyp = "hypP_12_night1_scoring.csv.spisop.new - Copy"
X_train = np.empty((0, np.shape(subjects_dic[sample_subject])[1]))
X_test = np.empty((0, np.shape(subjects_dic[sample_subject])[1]))
y_train = np.empty((0, np.shape(hyp_dic[sample_hyp])[1]))
y_test = np.empty((0, np.shape(hyp_dic[sample_hyp])[1]))
########======= Picking the train subjetcs and concatenate them =======########
tic = time.time()
train_subjects_list = ["P_12_night1_scoring.csv.spisop.new - Copy",
"P_13_night2_scoring.csv.spisop.new - Copy",
"P_15_night2_scoring.csv.spisop.new - Copy",
"P_16_night1_scoring.csv.spisop.new - Copy",
"P_18_night1_scoring.csv.spisop.new - Copy",
"P_20_night1_scoring.csv.spisop.new - Copy",
"P_21_night1_scoring.csv.spisop.new - Copy",
"P_23_night1_scoring.csv.spisop.new - Copy"]
for c_subj in train_subjects_list:
# train hypnogram
str_train_hyp = 'hyp' + str(c_subj)
# train featureset
str_train_feat = 'subject' + str(c_subj)
# create template arrays for featurs and label
tmp_x = subjects_dic[str_train_feat]
tmp_y = hyp_dic[str_train_hyp]
# Concatenate features and labels
X_train = np.row_stack((X_train, tmp_x))
y_train = np.row_stack((y_train, tmp_y))
del tmp_x, tmp_y
print('Training set was successfully created in : {} secs'.format(time.time()-tic))
#%% ================================Test part==============================%%#
########======== Picking the test subjetcs and concatenate them =======########
tic = time.time()
test_subjects_list = []
tst_subj_list = ["P_12_night2_scoring.csv.spisop.new - Copy",
"P_12_night3_scoring.csv.spisop.new - Copy",
"P_13_night3_scoring.csv.spisop.new - Copy",
"P_14_night3_scoring.csv.spisop.new - Copy",
"P_15_night3_scoring.csv.spisop.new - Copy",
"P_16_night3_scoring.csv.spisop.new - Copy",
"P_18_night2_scoring.csv.spisop.new - Copy",
"P_18_night3_scoring.csv.spisop.new - Copy",
"P_20_night2_scoring.csv.spisop.new - Copy",
"P_20_night3_scoring.csv.spisop.new - Copy",
"P_21_night2_scoring.csv.spisop.new - Copy",
"P_21_night3_scoring.csv.spisop.new - Copy"]
for c_subj in tst_subj_list:
# test hypnogram
str_test_hyp = 'hyp' + str(c_subj)
# test featureset
str_test_feat = 'subject' + str(c_subj)
# create template arrays for featurs and label
tmp_x = subjects_dic[str_test_feat]
tmp_y = hyp_dic[str_test_hyp]
# Concatenate features and labels
X_test = np.row_stack((X_test, tmp_x))
y_test = np.row_stack((y_test, tmp_y))
# keep the subject id
test_subjects_list.append(str_test_feat)
# remove for next iteration
del tmp_x, tmp_y, str_test_feat, str_test_hyp
print('Test set was successfully created in : {} secs'.format(time.time()-tic))
print(f'Raw train and test data were created.')
########================== Replace any probable NaN ===================########
X_train = Object.replace_NaN_with_mean(X_train)
X_test = Object.replace_NaN_with_mean(X_test)
########================== Replace any probable inf ===================########
X_train = Object.replace_inf_with_mean(X_train)
X_test = Object.replace_inf_with_mean(X_test)
########==================== Z-score of features ======================########
X_train, X_test = Object.Standardadize_features(X_train, X_test)
########========== select features only on first iteration ============########
td = 5 # Time dependence: number of epochs of memory
X_train_td = Object.add_time_dependence_backward(X_train, n_time_dependence=td,
padding_type = 'sequential')
X_test_td = Object.add_time_dependence_backward(X_test, n_time_dependence=td,
padding_type = 'sequential')
########====================== Feature Selection ======================########
y_train_td = Object.binary_to_single_column_label(y_train)
########========== select features only on first iteration ============########
# =============================================================================
# ranks, Feat_selected, selected_feats_ind = Object.FeatSelect_Boruta(X_train_td,
# y_train_td[:,0], max_iter = 50, max_depth = 7)
#
# #######===================== Save selected feats =======================#######
#
# path = "P:/3013080.01/Autoscoring/features/"
# filename = "Selected_Features_BoturaNoTimeDependency_5_Backward_Zmax_ch1-ch2+Acc_200620"
# with open(path+filename+'.pickle',"wb") as f:
# pickle.dump(selected_feats_ind, f)
# =============================================================================
########################### Load selected feats ###############################
path = "P:/3013080.01/Autoscoring/features/"
filename = "Selected_Features_BoturaAfterTD=5_Backward_Zmax_ch1-ch2+Acc_200620"
#filename = "sleep_scoring_NoArousal_8channels_selected_feats_NEW"
with open(path + filename + '.pickle', "rb") as f:
selected_feats_ind = pickle.load(f)
########=================== Apply selected features ===================########
X_train = X_train_td[:, selected_feats_ind]
X_test = X_test_td[:, selected_feats_ind]
########============== Define classifier of interest ==================########
y_pred = Object.XGB_Modelling(X_train, y_train,X_test, y_test, n_estimators = 500)
#y_pred = Object.KernelSVM_Modelling(X_train, y_train,X_test, y_test, kernel='rbf')
y_pred = Object.ANN_classifier(X_train, y_train, X_test, units_h1=600, units_h2 = 300, units_output = 5,
activation_out = 'softmax',
init = 'uniform', activation = 'relu', optimizer = 'adam',
loss = 'categorical_crossentropy', metrics=[tf.keras.metrics.Recall()],
h3_status = 'deactive', units_h3 = 50, epochs = 100, batch_size = 100)
########===== Metrics to assess the model performance on test data ====########
Acc, Recall, prec, f1_sc, kappa, mcm= Object.multi_label_confusion_matrix(y_test, y_pred)
########================= Creating subjective outputs =================########
Object.create_subjecive_results(y_true=y_test, y_pred=y_pred,
test_subjects_list = test_subjects_list,
subjects_data_dic = subjects_dic,
fname_save = "results")
########============= find number of epochs per stage =================########
Object.find_number_of_samples_per_class(y_test, including_artefact = False)
########================== Comparative hypnogram ======================########
hyp_true = Object.binary_to_single_column_label(y_test)
Object.plot_comparative_hyp(hyp_true = hyp_true, hyp_pred = y_pred, mark_REM = 'active')
########==================== Plot subjectve hypnos ====================########
Object.plot_subjective_hypno(y_true=y_test, y_pred=y_pred,
test_subjects_list=test_subjects_list,
subjects_data_dic=subjects_dic,
save_fig = False,
directory="P:/3013080.01/Autoscoring/ssccoorriinngg/")
########=================== Plot overall conf-mat =======================######
Object.plot_confusion_matrix(y_test,y_pred, target_names = ['Wake','N1','N2','SWS','REM'],
title='Confusion matrix of ssccoorriinngg algorithm',
cmap = None,
normalize=True)
########================== Plot subjective conf-mat ==================########
Object.plot_confusion_mat_subjective(y_true=y_test, y_pred=y_pred,
test_subjects_list=test_subjects_list,
subjects_data_dic=subjects_dic)
########========================== Save figure =======================#########
Object.save_figure(saving_format = '.png',
directory="P:/3013080.02/Mahdad/Github/ssccoorriinngg/",
saving_name = 'test_subject_all' + str(c_subj), dpi = 900,
full_screen = False)
| 41.956693 | 137 | 0.569016 | [
"MIT"
] | MahdadJafarzadeh/ssccoorriinngg | Zmax_autoscoring_controlled_train_test_split.py | 10,657 | Python |
import warnings
from mlprimitives.utils import import_object
_RESAMPLE_AGGS = [
'mean',
'median',
'prod',
'quantile',
'std',
'sum',
'var',
]
def resample(df, rule, on=None, groupby=(), aggregation='mean',
reset_index=True, time_index=None):
"""pd.DataFrame.resample adapter.
Call the `df.resample` method on the given time_index
and afterwards call the indicated aggregation.
Optionally group the dataframe by the indicated columns before
performing the resampling.
If groupby option is used, the result is a multi-index datagrame.
Args:
df (pandas.DataFrame):
DataFrame to resample.
rule (str or int):
The offset string or object representing target conversion or an
integer value that will be interpreted as the number of seconds.
on (str or None):
Name of the column to use as the time index. If ``None`` is given, the
DataFrame index is used.
groupby (list):
Optional list of columns to group by.
aggregation (callable or str):
Function or name of the function to use for the aggregation. If a name is given, it
can either be one of the standard pandas aggregation functions or the fully qualified
name of a python function that will be imported and used.
reset_index (bool):
Whether to reset the index after aggregating
time_index (str or None):
Deprecated: This has been renamed to `on`.
Name of the column to use as the time index. If ``None`` is given, the
DataFrame is index is used.
Returns:
pandas.Dataframe:
resampled dataframe
"""
if on is None and time_index is not None:
message = (
'resample `time_series` argument deprecated and will be removed'
' in future versions of MLPrimitives. Please use `on` instead.'
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
on = time_index
if groupby:
df = df.groupby(groupby)
if isinstance(rule, int):
rule = '{}s'.format(rule)
dtir = df.resample(rule, on=on)
if not callable(aggregation) and aggregation not in _RESAMPLE_AGGS:
try:
aggregation = import_object(aggregation)
except (AttributeError, ImportError, ValueError):
pass
df = dtir.aggregate(aggregation)
for name in df.index.names:
if name in df:
del df[name]
if reset_index:
df.reset_index(inplace=True)
return df
def _join_names(names):
"""Join the names of a multi-level index with an underscore."""
levels = (str(name) for name in names if name != '')
return '_'.join(levels)
def unstack(df, level=-1, reset_index=True):
"""pd.DataFrame.unstack adapter.
Call the `df.unstack` method using the indicated level and afterwards
join the column names using an underscore.
Args:
df (pandas.DataFrame): DataFrame to unstack.
level (str, int or list): Level(s) of index to unstack, can pass level name
reset_index (bool): Whether to reset the index after unstacking
Returns:
pandas.Dataframe: unstacked dataframe
"""
df = df.unstack(level=level)
if reset_index:
df = df.reset_index()
df.columns = df.columns.map(_join_names)
return df
| 30.333333 | 97 | 0.633314 | [
"MIT"
] | AlexanderGeiger/MLPrimitives | mlprimitives/adapters/pandas.py | 3,458 | Python |
"""Rectify function"""
import torch
from torch.autograd import Function
from encoding import cpu
if torch.cuda.device_count() > 0:
from encoding import gpu
__all__ = ['rectify']
class _rectify(Function):
@staticmethod
def forward(ctx, y, x, kernel_size, stride, padding, dilation, average):
ctx.save_for_backward(x)
# assuming kernel_size is 3
kernel_size = [k + 2 * (d - 1) for k,d in zip(kernel_size, dilation)]
ctx.kernel_size = kernel_size
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.average = average
if x.is_cuda:
gpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)
else:
cpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)
ctx.mark_dirty(y)
return y
@staticmethod
def backward(ctx, grad_y):
x, = ctx.saved_variables
if x.is_cuda:
gpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,
ctx.padding, ctx.dilation, ctx.average)
else:
cpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,
ctx.padding, ctx.dilation, ctx.average)
ctx.mark_dirty(grad_y)
return grad_y, None, None, None, None, None, None
rectify = _rectify.apply
| 32.023256 | 83 | 0.6122 | [
"MIT"
] | Womcos/SCARF | encoding/functions/rectify.py | 1,377 | Python |
from pyspark.sql import SparkSession
def get_spark():
return (SparkSession.builder
.appName("simpleapp")
.master("local")
.getOrCreate())
from pyspark import SparkConf, SparkContext
import sys
def main(sc, args):
print("SimpleApp Arguments")
for x in args:
print x
simple_data = [
("Group A", "Section 1", 50),
("Group B", "Section 2", 75),
("Group A", "Section 1", 25),
("Group C", "section 2", 75)
]
simple_df = get_spark().createDataFrame(
simple_data,
["Group", "Section", "Amount"]
)
simple_df.show()
if __name__ == "__main__":
# Configure Spark
sc = get_spark()
# Execute Main functionality
main(sc, sys.argv)
| 20.891892 | 44 | 0.564036 | [
"Apache-2.0"
] | MediaIQ/databricks-client-java | src/test/resources/simpleapp.py | 773 | Python |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from itertools import product
from sklearn.preprocessing import LabelEncoder
# =============================================================================
# The lines where we processed our data
# =============================================================================
def lag_feature(df, lags, col):
tmp = df[['date_block_num','shop_id','item_id',col]]
for i in lags:
shifted = tmp.copy()
shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)]
shifted['date_block_num'] += i
df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left')
return df
items = pd.read_csv(r'dataset\items.csv')
shops = pd.read_csv(r'dataset\shops.csv')
cats = pd.read_csv(r'dataset\item_categories.csv')
train = pd.read_csv(r'dataset\sales_train.csv')
test = pd.read_csv(r'dataset\test.csv').set_index('ID')
train = train[train.item_price<100000]
train = train[train.item_cnt_day<1001]
median = train[(train.shop_id==32)&(train.item_id==2973)&(train.date_block_num==4)&(train.item_price>0)].item_price.median()
train.loc[train.item_price<0, 'item_price'] = median
train.loc[train.shop_id == 0, 'shop_id'] = 57
test.loc[test.shop_id == 0, 'shop_id'] = 57
train.loc[train.shop_id == 1, 'shop_id'] = 58
test.loc[test.shop_id == 1, 'shop_id'] = 58
train.loc[train.shop_id == 10, 'shop_id'] = 11
test.loc[test.shop_id == 10, 'shop_id'] = 11
shops['shop_name'] = shops['shop_name'].apply(lambda x: x.lower()).str.replace('[^\w\s]', '').str.replace('\d+','').str.strip()
shops['city'] = shops['shop_name'].str.partition(' ')[0]
shops['city_code'] = LabelEncoder().fit_transform(shops['city'])
shops['shop_type'] = shops['shop_name'].apply(lambda x: 'мтрц' if 'мтрц' in x else 'трц' if 'трц' in x else 'трк' if 'трк' in x else 'тц' if 'тц' in x else 'тк' if 'тк' in x else 'NO_DATA')
shops['shop_type'] = LabelEncoder().fit_transform(shops['shop_type'])
shops = shops[['shop_id','city_code','shop_type']]
cats['split'] = cats['item_category_name'].str.split('-')
cats['type'] = cats['split'].map(lambda x: x[0].strip())
cats['type_code'] = LabelEncoder().fit_transform(cats['type'])
# if subtype is nan then type
cats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x) > 1 else x[0].strip())
cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype'])
cats = cats[['item_category_id','type_code', 'subtype_code']]
items.drop(['item_name'], axis=1, inplace=True)
matrix = []
cols = ['date_block_num','shop_id','item_id']
for i in range(34):
sales = train[train.date_block_num==i]
matrix.append(np.array(list(product([i], sales.shop_id.unique(), sales.item_id.unique())), dtype='int16'))
matrix = pd.DataFrame(np.vstack(matrix), columns=cols)
matrix['date_block_num'] = matrix['date_block_num'].astype(np.int8)
matrix['shop_id'] = matrix['shop_id'].astype(np.int8)
matrix['item_id'] = matrix['item_id'].astype(np.int16)
matrix.sort_values(cols,inplace=True)
train['revenue'] = train['item_price'] * train['item_cnt_day']
item_price_lag = train.groupby(['date_block_num','item_id']).agg({'item_price':['mean']})
item_price_lag.columns = ['average_item_price']
item_price_by_shop_lag = train.groupby(['date_block_num','shop_id', 'item_id']).agg({'item_price':['mean']})
item_price_by_shop_lag.columns = ['average_item_price_by_shop']
group = train.groupby(['date_block_num','shop_id','item_id']).agg({'item_cnt_day': ['sum']})
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=cols, how='left')
matrix['item_cnt_month'] = (matrix['item_cnt_month'].fillna(0).clip(0,20).astype(np.float16))
test['date_block_num'] = 34
test['date_block_num'] = test['date_block_num'].astype(np.int8)
test['shop_id'] = test['shop_id'].astype(np.int8)
test['item_id'] = test['item_id'].astype(np.int16)
matrix = pd.concat([matrix, test], ignore_index=True, sort=False, keys=cols)
matrix.fillna(0, inplace=True) # 34 month
matrix = pd.merge(matrix, item_price_lag, on=['date_block_num','item_id'], how='left')
matrix['average_item_price'] = matrix['average_item_price'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'average_item_price')
matrix.drop(['average_item_price'], axis=1, inplace=True)
matrix = pd.merge(matrix, item_price_by_shop_lag, on=['date_block_num','shop_id','item_id'], how='left')
matrix['average_item_price_by_shop'] = matrix['average_item_price_by_shop'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'average_item_price_by_shop')
matrix.drop(['average_item_price_by_shop'], axis=1, inplace=True)
matrix = pd.merge(matrix, shops, on=['shop_id'], how='left')
matrix = pd.merge(matrix, items, on=['item_id'], how='left')
matrix = pd.merge(matrix, cats, on=['item_category_id'], how='left')
matrix['city_code'] = matrix['city_code'].astype(np.int8)
matrix['shop_type'] = matrix['shop_type'].astype(np.int8)
matrix['item_category_id'] = matrix['item_category_id'].astype(np.int8)
matrix['type_code'] = matrix['type_code'].astype(np.int8)
matrix['subtype_code'] = matrix['subtype_code'].astype(np.int8)
shop_mean = matrix.groupby(['shop_id']).agg({'item_cnt_month': ['mean']})
shop_mean.columns = ['shop_mean']
shop_mean.reset_index(inplace=True)
shop_item_mean = matrix.groupby(['item_id','shop_id']).agg({'item_cnt_month': ['mean']})
shop_item_mean.columns = ['shop_item_mean']
shop_item_mean.reset_index(inplace=True)
group = matrix.groupby(['date_block_num', 'item_id']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_item_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, shop_mean, on=['shop_id'], how='left')
matrix = pd.merge(matrix, shop_item_mean, on=['item_id','shop_id'], how='left')
matrix = pd.merge(matrix, group, on=['date_block_num','item_id'], how='left')
matrix['date_item_avg_item_cnt'] = matrix['date_item_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'date_item_avg_item_cnt')
matrix.drop(['date_item_avg_item_cnt'], axis=1, inplace=True)
matrix = lag_feature(matrix, [1,2,3], 'item_cnt_month')
matrix_last = matrix[matrix.date_block_num > 2]
def fill_na(df):
for col in df.columns:
if ('_lag_' in col) & (df[col].isnull().any()):
if ('item_cnt' in col):
df[col].fillna(0, inplace=True)
if ('shop_mean' in col):
df[col].fillna(0, inplace=True)
if ('average_item_price' in col):
df[col].fillna(0, inplace=True)
return df
matrix = fill_na(matrix_last)
matrix_last.to_pickle('dataset/traintest.pkl')
# =============================================================================
# correlation Matrix
# =============================================================================
cor_data = matrix_last[['shop_item_mean','date_block_num','date_item_avg_item_cnt_lag_1','item_category_id','average_item_price_lag_2','average_item_price_lag_1','item_cnt_month_lag_1','item_cnt_month']]
corr = cor_data.corr()
mask = np.zeros_like(corr, dtype=np.bool)
f,ax = plt.subplots(figsize=(15, 20))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5},annot=True)
plt.savefig('outputdata/correlation.png')
| 44.529412 | 204 | 0.660238 | [
"MIT"
] | enkaranfiles/bil476_predict-future-sales | preprocessing.py | 7,598 | Python |
from nepc import nepc
from nepc.util import util
import pandas as pd
import os
import pytest
import platform
# TODO: remove dependence on csv; put function in scraper that uses built-in
# readlines function
import csv
# TODO: test that all values in [nepc]/tests/data are in the nepc database
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_states_table_has_species_metadata(data_config, nepc_connect):
"""
check that the states table has a species_id column
"""
NEPC_DATA = data_config[0]
number_of_states = util.wc_fxn(NEPC_DATA + 'states.tsv') - 1
df_states = nepc.table_as_df(nepc_connect[1], 'states')
assert len(df_states) == number_of_states
assert 'species_id' in list(df_states.columns)
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_csdata_lines(data_config, nepc_connect):
DIR_NAMES = data_config[1]
cs_lines = 0
for directoryname in DIR_NAMES:
directory = os.fsencode(directoryname)
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".met") or filename.endswith(".mod"):
continue
else:
# subtract 1 to account for header
cs_lines += util.wc_fxn(directoryname + filename) - 1
assert cs_lines == nepc.count_table_rows(nepc_connect[1], "csdata")
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_data_entered(data_config, nepc_connect, local):
NEPC_DATA = data_config[0]
if local is False or platform.node() == 'ppdadamsonlinux':
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',
delimiter='\t')
else:
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',
delimiter='\t')
for index, row in cs_dat_files.iterrows():
cs_id = row['cs_id']
dat_file = row['filename']
df = pd.read_csv(NEPC_DATA + dat_file + '.dat', delimiter='\t',
usecols=['e_energy', 'sigma'])
e_energy, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)
# assert e_energy == pytest.approx(df['e_energy'].tolist())
assert sigma == pytest.approx(df['sigma'].tolist())
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_meta_entered(data_config, nepc_connect, local, dbug):
NEPC_DATA = data_config[0]
if local is False or platform.node() == 'ppdadamsonlinux':
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',
delimiter='\t')
else:
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',
delimiter='\t')
for index, row in cs_dat_files.iterrows():
cs_id = row['cs_id']
met_file = row['filename']
if dbug:
print(cs_id, met_file)
e, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)
meta_cols = ['cs_id', 'process', 'units_e',
'units_sigma', 'ref', 'lhsA',
'lhsB', 'rhsA', 'rhsB', 'threshold', 'wavelength',
'lhs_v', 'rhs_v', 'lhs_j', 'rhs_j',
'background', 'lpu', 'upu']
with open(NEPC_DATA + met_file + ".met", 'r', newline='') as f:
reader = csv.reader(f, delimiter='\t')
next(reader)
meta_disk = list(reader)[0]
meta_disk = [meta_disk[i] for i in list(range(len(meta_cols)))]
for i in [0, 11, 12, 13, 14]:
meta_disk[i] = (int(meta_disk[i]) if meta_disk[i] != '\\N'
else meta_disk[i])
for i in [2, 3, 9, 10, 16, 17]:
meta_disk[i] = (float(meta_disk[i]) if meta_disk[i] != '\\N'
else meta_disk[i])
meta_db = [nepc.cs_metadata(nepc_connect[1], cs_id)[i]
for i in list(range(0, len(meta_cols)))]
if dbug:
print('meta_db: {}\t from {}'.format(meta_db, met_file))
for i in range(len(meta_cols)):
if dbug:
print('meta_db[{}]: {}\t from {}'.format(str(i), str(meta_db[i]), met_file))
if (type(meta_db[i]) is float):
assert (pytest.approx(meta_disk[i]) ==
pytest.approx(meta_db[i]))
elif meta_db[i] is None:
assert meta_disk[i] == '\\N'
else:
assert meta_disk[i] == meta_db[i]
| 39.552632 | 92 | 0.578399 | [
"CC0-1.0"
] | USNavalResearchLaboratory/nepc | tests/test_mysql_build.py | 4,509 | Python |
# encoding: utf-8
import unittest
from cool.core import constants
class IntConstants(constants.Constants):
TEST0 = (0, 'test0')
TEST1 = (1, 'test1')
class IntStringCodeConstants(constants.Constants):
TEST = ('test', 'test0')
TEST1 = (1, 'test1')
class ConstantsTests(unittest.TestCase):
def test_unique(self):
with self.assertRaises(ValueError):
class TestUniqueConstants(constants.Constants):
TEST0 = (0, 'test0')
TEST1 = (0, 'test1')
def test_code(self):
self.assertEqual(IntConstants.TEST0, 0)
self.assertEqual(IntConstants.TEST1, 1)
def test_desc(self):
class TestDescConstants(constants.Constants):
TEST0 = (0, 'test')
TEST1 = (1, 'test')
TEST2 = (2, 'test2')
self.assertEqual(TestDescConstants.TEST0.desc, 'test')
self.assertEqual(TestDescConstants.TEST1.desc, 'test')
self.assertEqual(TestDescConstants.TEST2.desc, 'test2')
def test_equal(self):
class TestEqualConstants(constants.Constants):
TEST = (0, 'test')
class TestEqualConstants2(constants.Constants):
TEST = (0, 'test')
self.assertEqual(TestEqualConstants.TEST, TestEqualConstants.TEST)
self.assertNotEqual(TestEqualConstants.TEST, TestEqualConstants2.TEST)
self.assertEqual(TestEqualConstants.TEST, 0)
self.assertEqual(TestEqualConstants2.TEST, 0)
def test_string_code(self):
self.assertEqual(IntStringCodeConstants.TEST, 'test')
self.assertEqual(IntStringCodeConstants.TEST.code, 'test')
def test_choices_list(self):
self.assertListEqual(IntStringCodeConstants.get_choices_list(), [('test', 'test0'), (1, 'test1')])
def test_desc_dict(self):
self.assertListEqual(IntStringCodeConstants.get_desc_dict(name_key='name'), [
{'name': 'TEST', 'code': 'test', 'desc': 'test0'},
{'name': 'TEST1', 'code': 1, 'desc': 'test1'},
])
| 33.114754 | 106 | 0.641584 | [
"BSD-3-Clause"
] | 007gzs/django-cool | tests/core/test_constants.py | 2,020 | Python |
from pomodoro import main
if __name__ == "__main__":
main()
| 11 | 26 | 0.666667 | [
"MIT"
] | Dev3XOR/pomodoro | src/pomodoro/__main__.py | 66 | Python |
import pytest
from icepyx.core.visualization import Visualize
import icepyx.core.visualization as vis
@pytest.mark.parametrize(
"n, exp",
[
(
1,
[
"ATL06_20200702014158_01020810_004_01.h5",
"ATL06_20200703011618_01170810_004_01.h5",
],
),
(
2,
[
"ATL06_20200612151119_11920712_004_01.h5",
"ATL06_20200616021517_12450710_004_01.h5",
"ATL06_20200702014158_01020810_004_01.h5",
"ATL06_20200703011618_01170810_004_01.h5",
],
),
(
3,
[
"ATL06_20200612151119_11920712_004_01.h5",
"ATL06_20200616021517_12450710_004_01.h5",
"ATL06_20200702014158_01020810_004_01.h5",
"ATL06_20200703011618_01170810_004_01.h5",
],
),
],
)
def test_files_in_latest_cycles(n, exp):
files = [
"ATL06_20190710071617_01860412_004_01.h5",
"ATL06_20190713182016_02390410_004_01.h5",
"ATL06_20200612151119_11920712_004_01.h5",
"ATL06_20200616021517_12450710_004_01.h5",
"ATL06_20200702014158_01020810_004_01.h5",
"ATL06_20200703011618_01170810_004_01.h5",
]
cycles = [8, 7, 4]
obs = vis.files_in_latest_n_cycles(files, cycles=cycles, n=n)
assert obs == exp
@pytest.mark.parametrize(
"filename, expect",
[
('ATL06_20190525202604_08790310_004_01.h5', [879, 3, '2019-05-25']),
('ATL06_20190614194425_11840310_004_01.h5', [1184, 3, '2019-06-14']),
('ATL07-02_20190624063616_13290301_004_01.h5', [1329, 3, '2019-06-24']),
('ATL07-02_20190602190916_10010301_004_01.h5', [1001, 3, '2019-06-02']),
('ATL10-02_20190611072656_11310301_004_01.h5', [1131, 3, '2019-06-11']),
('ATL10-02_20190731045538_05060401_004_01.h5', [506, 4, '2019-07-31']),
('ATL12_20190615023544_11890301_004_01.h5', [1189, 3, '2019-06-15']),
('ATL12_20190721170332_03610401_004_01.h5', [361, 4, '2019-07-21']),
],
)
def test_gran_paras(filename, expect):
para_list = vis.gran_paras(filename)
assert para_list == expect
@pytest.mark.parametrize(
"product, date_range, bbox, expect",
[
("ATL06", ["2019-6-15", "2019-7-1"], [-64.5, -66, -63.5, -65], 3240),
("ATL07", ["2019-7-1", "2019-8-1"], [-65, -66, -64.5, -65], 7160),
("ATL08", ["2019-6-15", "2019-7-1"], [-18, 63, -17, 64], 852),
("ATL10", ["2019-8-1", "2019-9-1"], [-64, -67, -60, -60], 7375),
("ATL12", ["2019-7-1", "2019-10-1"], [-65.5, -65.5, -64.5, -65], 95),
("ATL13", ["2019-6-1", "2019-12-1"], [-75, -51, -74, -50], 20),
],
)
def test_visualization_date_range(product, date_range, bbox, expect):
region_viz = Visualize(product=product, spatial_extent=bbox, date_range=date_range)
data_size = region_viz.parallel_request_OA().size
assert data_size == expect
@pytest.mark.parametrize(
"product, bbox, cycles, tracks, expect",
[
("ATL06", [-64.5, -66, -63.5, -65], ["03"], ["1306"], 3240),
("ATL07", [-65, -66, -64.5, -65], ["04"], ["0186"], 7130),
("ATL08", [-18, 63, -17, 64], ["03"], ["1320"], 852),
("ATL10", [-64, -67, -60, -60], ["04"], ["0681"], 6015),
("ATL12", [-65.5, -65.5, -64.5, -65], ["05"], ["0041"], 95),
("ATL13", [-75, -51, -74, -50], ["05"], ["0293"], 20),
],
)
def test_visualization_orbits(product, bbox, cycles, tracks, expect):
region_viz = Visualize(
product=product, spatial_extent=bbox, cycles=cycles, tracks=tracks
)
data_size = region_viz.parallel_request_OA().size
assert data_size == expect
| 34.504587 | 87 | 0.576442 | [
"BSD-3-Clause"
] | ICESat2-SlideRule/icepyx | icepyx/tests/test_visualization.py | 3,761 | Python |
#CODE1---For preparing the list of DRUG side-effect relation from SIDER database---
#Python 3.6.5 |Anaconda, Inc.
import sys
import glob
import errno
import csv
path = '/home/16AT72P01/Excelra/SIDER1/output/adverse_effects.tsv'
files = glob.glob(path)
unique_sideeffect = set()
unique_drug = set()
unique_pair = set()
with open(path) as f1:
reader = csv.DictReader(f1, quotechar='"', delimiter='\t', quoting=csv.QUOTE_ALL, skipinitialspace=True)
print(reader)
for row in reader:
unique_drug.add(row['drug_name'])
unique_sideeffect.add(row['adverse_effect'])
val = row['drug_name']+"|"+row['adverse_effect']
unique_pair.add(val)
f1.close()
print(len(unique_drug))
print(len(unique_sideeffect))
print(len(unique_pair))
| 25.551724 | 105 | 0.735493 | [
"MIT"
] | ankita094/BioIntMed | src/dictionaryCode/other/siderData1.py | 741 | Python |
import csv
def save_statistics(experiment_name, line_to_add):
with open("{}.csv".format(experiment_name), 'a') as f:
writer = csv.writer(f)
writer.writerow(line_to_add)
def load_statistics(experiment_name):
data_dict = dict()
with open("{}.csv".format(experiment_name), 'r') as f:
lines = f.readlines()
data_labels = lines[0].replace("\n","").split(",")
del lines[0]
for label in data_labels:
data_dict[label] = []
for line in lines:
data = line.replace("\n","").split(",")
for key, item in zip(data_labels, data):
data_dict[key].append(item)
return data_dict
| 25.62963 | 58 | 0.58237 | [
"MIT"
] | likesiwell/DL_Code_Repos | MetaLearning/MatchingNetworks/storage.py | 692 | Python |
"""
ASGI config for apiproject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apiproject.settings')
application = get_asgi_application()
| 23.352941 | 78 | 0.788413 | [
"MIT"
] | vasulimited123/Django-Repository | apiproject/apiproject/asgi.py | 397 | Python |
"""
This module provides an interface for reading and writing to a HackPSU RaspberryPi Scanner config file
Methods:
getProperty(configFile, prop)
Get a property from a config file by reading the config file until the desired property is found
setProperty(configFile, prop, value)
Set a property by updating the config file (requries a total rewrite of the config file)
getProperties(configFile)
Read all properties into a dictionary, which is returned to the user
setProperties(configFile, dict)
Overwrite the configFile with a new configFile generated from the dictionary provided
"""
def getProperties(configFile):
"""
dictionary getProperties(str)
This funciton reads the entire config file and builds a dictionary from the config file
Args:
configFile: The configuration file to read from
Returns:
dictionary: A list of key value pairs from the config file
"""
dict = {}
#For each line in the file
with open(configFile) as file:
for line in file:
#Remove leading and trailing whitespace
line = line.strip()
#If the line is a comment, skip
if line.startswith('#'):
continue
#Find the equals sign, if not present, skip the line
loc = line.find('=')
if loc == -1:
continue
#parse out the key and value
key = line[:loc]
value = line[loc+1:]
dict[key] = value
return dict
def setProperties(configFile, dict):
"""
void setProperties (str, dictionary)
This function iterates over the entire dictionary and saves each dictionary entry to the specified config file
Args:
configFile: The file to overwrite with the new configuration
dict: The dictionary to write
"""
#Overwrite the file
#Foreach key in dictionary write a new line
with open(configFile, 'w') as file:
for key in dict:
file.write(key + '=' + dict[key] + '\n')
def getProperty(configFile, prop):
"""
str getProperty(str, str)
This function searches a configFile for a specific property and returns its value
Args:
configFile: The configuration file to open
prop: The property to search for
Returns:
string: The property value if found or None for no value found
"""
retVal = None
#Foreach line in the file
with open(configFile) as file:
for line in file:
#Remove leading and trailing whitespace
line = line.strip()
#Ignore comment lines
if line.startswith('#'):
continue
#If the line is the desired property, parse and return
if line.startswith(prop):
retVal = line.replace(prop, '')
retVal = retVal.strip()
retVal = retVal[1:]
retVal = retVal.lstrip()
break
return retVal
def setProperty(configFile, prop, value):
"""
void setProperty(str, str, str)
This function searches a config file for the specified propery and updates its value if found.
If the specified property is not found, then a new line for the property will be created
Args:
configFile: The configuration file to open and update
prop: The property key to update
value: The new value for the property
"""
written = False
with open(configFile) as inFile:
#Create a temp file to copy into
tmpHandle, outPath = mkstemp()
with fdopen(tmpHandle, 'w') as outFile:
#Foreach line in the original file
for line in inFile:
#If it's the prop line, rewrite the prop line
if line.startswith(prop):
outFile.write(prop + '=' + value + '\n')
written = True
#Otherwise keep the line as is
else:
outFile.write(line)
#If no update was performed, then add a new line for the prop
if not written:
outFile.write(prop + ':' + value + '\n')
#Move from tmp to actual file
remove(configFile)
move(outPath, configFile)
| 27.676692 | 111 | 0.710676 | [
"MIT"
] | hackpsu-tech/hackPSUS2018-rfid | HackPSUconfig.py | 3,681 | Python |
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='[email protected]', password='testpass'):
'''Creating sample user'''
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = '[email protected]'
password = 'Password123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_email_normalize(self):
"""Testing weather email is in normalize form or not"""
email = "[email protected]"
user = get_user_model().objects.create_user(email, "test123")
self.assertEqual(user.email, email.lower())
def test_email_validation(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_superuser(self):
"""Test for creating super user"""
email = '[email protected]'
password = 'Password123'
user = get_user_model().objects.create_superuser(
email=email,
password=password
)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
def test_tag_str(self):
tag = models.Tag.objects.create(user=sample_user(), name='vegan')
self.assertEqual(str(tag), tag.name)
| 33.102041 | 73 | 0.658446 | [
"MIT"
] | Rish1711/recipe-app-api | app/core/tests/test_models.py | 1,622 | Python |
import copy
import dask
import dask.array as da
from dask.distributed import Client
import datetime
import logging
import math
from multiprocessing.pool import ThreadPool
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Union, TypeVar, Tuple
import xarray as xr
import shutil
import warnings
import zarr
from .utils import infer_chunks
from .readers import DirectoryImageReader
Reader = TypeVar("Reader")
def write_transposed_dataset(
reader: Reader,
outfname: Union[Path, str],
start: datetime.datetime = None,
end: datetime.datetime = None,
chunks: dict = None,
memory: float = 2,
n_threads: int = 4,
zlib: bool = True,
complevel: int = 4,
distributed: Union[bool, Client] = False,
use_dask: bool = True,
):
"""
Creates a stacked and transposed netCDF file from a given reader.
WARNING: very experimental!
Parameters
----------
reader : XarrayImageReaderBase
Reader for the dataset.
outfname : str or Path
Output filename. Must end with ".nc" for netCDF output or with ".zarr"
for zarr output.
start : datetime.datetime, optional
If not given, start at first timestamp in dataset.
end : datetime.datetime, optional
If not given, end at last timestamp in dataset.
chunks : dictionary, optional
The chunk sizes that are used for the transposed file. If none are
given, chunks with a size of 1MB are used for netCDF, and chunks with a
size of 50MB are used for zarr output.
memory : float, optional
The amount of memory to be used for buffering in GB. Default is 2.
Higher is faster.
n_threads : int, optional
The amount of threads to use. Default is 4.
zlib : bool, optional
Whether to use compression when storing the files. Reduces file size,
but strongly increases write time, and maybe also access time. Default
is ``False``.
complevel : int, optional
Compression level to use. Default is 4. Range is from 1 (low) to 9
(high).
distributed : bool or Client, optional
Whether to use the local or the distributed dask scheduler. If a client
for a distributed scheduler is used, this is used instead.
use_dask : bool, optional
Whether to use dask for the transposing. Default is True, but sometimes
(especially with large datasets) this fails. If set to False, the data
is written to an intermediate zarr store.
"""
dask_config = {
"array.slicing.split_large_chunks": False,
}
args = (reader, outfname)
kwargs = {
"start": start,
"end": end,
"memory": memory,
"zlib": zlib,
"complevel": complevel,
"chunks": chunks,
}
if not use_dask:
_transpose_no_dask(*args, **kwargs)
elif isinstance(distributed, Client) or not distributed:
if not distributed:
dask_config.update(
{"scheduler": "threads", "pool": ThreadPool(n_threads)}
)
with dask.config.set(**dask_config):
_transpose(*args, **kwargs)
elif distributed:
with dask.config.set(**dask_config), Client(
n_workers=1,
threads_per_worker=n_threads,
memory_limit=f"{memory}GB",
) as client:
print("Dask dashboard accessible at:", client.dashboard_link)
_transpose(*args, **kwargs)
def _get_intermediate_chunks(array, chunks, new_last_dim, zarr_output, memory):
"""
Calculates chunk sizes for the given array for the intermediate output
files.
Parameters
----------
array : xr.DataArray
Array to rechunk and transpose
chunks : dict or None
Chunks passed to write_transposed_dataset, None if none were given.
new_last_dim : str
Name of the new last dimension, normally "time".
zarr_output : bool
Whether the final file will be a zarr file (True) or a netCDf (False).
memory : float
The amount of memory to be used for buffering in GB.
Returns
-------
tmp_chunks : dict
Chunks to be used for rechunking the array to a temporary file. The
order of keys corresponds to the order of dimensions in the transposed
array.
"""
dtype = array.dtype
dims = dict(zip(array.dims, array.shape))
transposed_shape = [
length for dim, length in dims.items() if dim != new_last_dim
]
transposed_shape.append(dims[new_last_dim])
# If the chunks argument was not given, we have to infer the spatial
# and temporal chunks for the intermediate file.
# The spatial chunks will be set such that for a continuous time
# dimension the chunk size is still reasonable.
if chunks is None:
if zarr_output:
chunksizes = infer_chunks(transposed_shape, 100, dtype)[:-1]
else:
chunksizes = infer_chunks(transposed_shape, 1, dtype)[:-1]
chunks = dict(
zip([dim for dim in dims if dim != new_last_dim], chunksizes)
)
chunks[new_last_dim] = -1
else:
chunks = copy.copy(chunks)
tmp_chunks = {dim: chunks[dim] for dim in dims if dim != new_last_dim}
# figure out temporary chunk sizes based on image size and available memory
size = dtype.itemsize
chunksizes = [size if size != -1 else dims[dim] for dim, size in chunks.items()]
chunksize_MB = np.prod(chunksizes) * size / 1024 ** 2
img_shape = transposed_shape[:-1]
len_time = transposed_shape[-1]
imagesize_GB = np.prod(img_shape) * size / 1024 ** 3
# we need to divide by two, because we need intermediate storage for
# the transposing
stepsize = int(math.floor(memory / imagesize_GB)) // 2
stepsize = min(stepsize, len_time)
tmp_chunks[new_last_dim] = stepsize
tmp_chunks_str = str(tuple(tmp_chunks.values()))
logging.info(
f"write_transposed_dataset: Creating chunks {tmp_chunks_str}"
f" with chunksize {chunksize_MB:.2f} MB"
)
return tmp_chunks
def _transpose(
reader: Reader,
outfname: Union[Path, str],
start: datetime.datetime = None,
end: datetime.datetime = None,
chunks: dict = None,
memory: float = 2,
zlib: bool = True,
complevel: int = 4,
):
zarr_output = str(outfname).endswith(".zarr")
new_last_dim = reader.timename
if isinstance(reader, DirectoryImageReader) and reader.chunks is None:
logging.info(
"You are using DirectoryImageReader without dask. If you run into"
" memory issues or have large datasets to transpose, consider"
" setting use_dask=True in the constructor of DirectoryImageReader."
)
ds = reader.read_block(start, end)
# We process each variable separately and store them as intermediately
# chunked temporary files. The chunk size in time dimension is inferred
# from the given memory.
variable_chunks = {}
variable_intermediate_fnames = {}
for var in reader.varnames:
tmp_outfname = str(outfname) + f".{var}.zarr"
variable_intermediate_fnames[var] = tmp_outfname
if Path(tmp_outfname).exists():
logging.info(
"Skipping generating intermediate file {tmp_outfname}"
" because it exists"
)
continue
tmp_chunks = _get_intermediate_chunks(
ds[var], chunks, new_last_dim, zarr_output, memory
)
# make sure that the time dimension will be continuous in the final
# output
chunks = copy.copy(tmp_chunks)
chunks[new_last_dim] = len(ds[var].time)
variable_chunks[var] = chunks
# now we can rechunk and transpose using xarray
rechunked_transposed = ds[var].chunk(tmp_chunks).transpose(
..., new_last_dim
)
rechunked_transposed.to_dataset().to_zarr(
tmp_outfname, consolidated=True
)
# Now we have to reassemble all variables to a single dataset and write the
# final chunks
variable_ds = []
variable_chunksizes = {}
for var in reader.varnames:
ds = xr.open_zarr(variable_intermediate_fnames[var], consolidated=True)
variable_ds.append(ds)
# for the encoding variable below we need the chunks as tuple in the
# right order, it's easier to get this here were we have easy access to
# the transposed DataArray
transposed_dims = ds[var].dims
variable_chunksizes[var] = tuple(
chunks[dim] for dim in transposed_dims
)
ds = xr.merge(
variable_ds,
compat="override",
join="override",
combine_attrs="override",
)
ds.attrs.update(reader.global_attrs)
encoding = {
var: {
"chunksizes": variable_chunksizes[var],
"zlib": zlib,
"complevel": complevel,
}
for var in reader.varnames
}
if not zarr_output:
ds.to_netcdf(outfname, encoding=encoding)
else:
for var in reader.varnames:
del ds[var].encoding["chunks"]
del ds[var].encoding["preferred_chunks"]
ds[var] = ds[var].chunk(variable_chunksizes[var])
ds.to_zarr(outfname, mode="w", consolidated=True)
for var in reader.varnames:
shutil.rmtree(variable_intermediate_fnames[var])
logging.info("write_transposed_dataset: Finished writing transposed file.")
def _transpose_no_dask(
reader: Reader,
outfname: Union[Path, str],
start: datetime.datetime = None,
end: datetime.datetime = None,
chunks: Tuple = None,
memory: float = 2,
zlib: bool = True,
complevel: int = 4,
):
warnings.warn(
"This is an experimental function and not yet ready for public use!"
)
zarr_output = str(outfname).endswith(".zarr")
new_last_dim = reader.timename
timestamps = reader.tstamps_for_daterange(start, end)
variable_fnames = {}
variable_dims = {}
for varname in reader.varnames:
tmp_outfname = str(outfname) + f".{varname}.zarr"
variable_fnames[varname] = tmp_outfname
# first, get some info about structure of the input file
first_img = reader.read_block(start=timestamps[0], end=timestamps[0])[
varname
]
tmp_chunks = _get_intermediate_chunks(
first_img, chunks, new_last_dim, zarr_output, memory
)
# get new dim names in the correct order
new_dim_names = list(tmp_chunks)
variable_dims[varname] = new_dim_names
# this happens this late because we need to set
# `variable_dims[varname]` in any case
if Path(tmp_outfname).exists():
logging.info(f"{str(tmp_outfname)} already exists, skipping.")
continue
logging.debug(
f"write_transposed_dataset: starting zarr array creation"
f" for {len(timestamps)} timestamps"
)
# get shape of transposed target array
dims = dict(zip(first_img.dims, first_img.shape))
transposed_shape = tuple(dims[dim] for dim in tmp_chunks.keys())
zarr_array = zarr.create(
tuple(new_dim_sizes),
chunks=tuple(size for size in tmp_chunks.values()),
store=tmp_outfname,
overwrite=True,
fill_value=np.nan,
)
logging.debug(f"write_transposed_dataset: Writing {tmp_outfname}")
print(f"Constructing array stack for {varname}:")
pbar = tqdm(range(0, len(timestamps), stepsize))
stepsize = tmp_chunks[new_last_dim]
for start_idx in pbar:
pbar.set_description("Reading")
end_idx = min(start_idx + stepsize - 1, len(timestamps) - 1)
block = reader.read_block(
timestamps[start_idx], timestamps[end_idx]
)[varname]
block = block.transpose(..., new_last_dim)
pbar.set_description("Writing")
zarr_array[..., start_idx : end_idx + 1] = block.values
variable_arrays = {}
encoding = {}
for varname, fname in variable_fnames.items():
logging.debug(f"Reading {str(fname)}")
arr = da.from_zarr(fname)
dims = variable_dims[varname]
metadata = reader.array_attrs[varname]
if chunks is None:
if zarr_output:
chunks = infer_chunks(new_dim_sizes, 100, dtype)
else:
# netCDF chunks should be about 1MB
chunks = infer_chunks(new_dim_sizes, 1, dtype)
encoding[varname] = {
"chunksizes": chunks,
"zlib": zlib,
"complevel": complevel,
}
chunk_dict = dict(zip(dims, chunks))
arr = xr.DataArray(data=arr, dims=dims, attrs=metadata)
arr = arr.chunk(chunk_dict)
arr.encoding = encoding[varname]
# we're writing again to a temporary file, because otherwise the
# dataset creation fails because dask sucks
# arr.to_dataset(name=varname).to_zarr(fname + ".tmp", consolidated=True)
# variable_arrays[varname] = xr.open_zarr(fname + ".tmp", consolidated=True)
variable_arrays[varname] = arr
logging.debug("Reading test image")
test_img = reader.read_block(start=timestamps[0], end=timestamps[0])[
reader.varnames[0]
]
coords = {
c: test_img.coords[c] for c in test_img.coords if c != reader.timename
}
coords[reader.timename] = timestamps
logging.debug("Creating dataset")
ds = xr.Dataset(
variable_arrays,
coords=coords,
)
ds.attrs.update(reader.global_attrs)
logging.info(
f"write_transposed_dataset: Writing combined file to {str(outfname)}"
)
if not zarr_output:
ds.to_netcdf(outfname, encoding=encoding)
else:
ds.to_zarr(outfname, mode="w", consolidated=True)
for fname in variable_fnames.values():
shutil.rmtree(fname)
logging.info("write_transposed_dataset: Finished writing transposed file.")
| 34.681373 | 84 | 0.637597 | [
"MIT"
] | awst-austria/qa4sm-preprocessing | src/qa4sm_preprocessing/nc_image_reader/transpose.py | 14,150 | Python |
#!/usr/bin/python
import sys
from PIL import Image
import argparse
parser = argparse.ArgumentParser(description='Convert animated or static images to CampZone2019 badge code')
parser.add_argument('image', help='The path to an image to read from (e.g. .gif, .jpg, .png)')
parser.add_argument('--start_x', type=int, default=0, help='The X offset in the image to start reading from')
parser.add_argument('--start_y', type=int, default=0, help='The Y offset in the image to start reading from')
parser.add_argument('--length_x', type=int, default=32, help='The width to read from the image, starting at start_x')
parser.add_argument('--length_y', type=int, default=8, help='The height to read from the image, starting at start_y')
parser.add_argument('--start_at_frame', type=int, default=0, help='The frame to start from, if the image is animated')
parser.add_argument('--lim_frames', type=int, default=16, help='The number of frames to parse, if the image is animated')
parser.add_argument('--skip_frames', type=int, default=1, help='The number of frames to parse, if the image is animated')
parser.add_argument('--is_icon', type=bool, default=False, help='Set to "true" to output rgb.image() instead of rgb.gif()')
args = parser.parse_args()
start_x = args.start_x
start_y = args.start_y
length_x = args.length_x
length_y = args.length_y
start_at_frame = args.start_at_frame
lim_frames = args.lim_frames
skip_frames = args.skip_frames
is_icon = args.is_icon
frames = []
image = Image.open(sys.argv[1])
n_frames, width, height = image.n_frames if hasattr(image, 'n_frames') else 1, image.width, image.height
used_frames = min((n_frames - start_at_frame) / skip_frames, lim_frames)
used_width = min(length_x, image.width)
used_height = min(length_y, image.height)
for frame_no in range(start_at_frame, start_at_frame + used_frames):
image.seek(frame_no)
frame = list(image.convert('RGBA').getdata())
cut_frame = []
for y in range(start_y, start_y + used_height):
for x in range(start_x, start_x + used_width):
cut_frame.append(frame[x + width * y])
frames.append(cut_frame)
if is_icon:
print('icon = ([0x' +
', 0x'.join([', 0x'.join([format(r << 24 | g << 16 | b << 8 | a, '08x') for r, g, b, a in frame]) for frame in
frames]) +
'], %d)' % used_frames)
else:
print('rgb.gif([0x' +
', 0x'.join([', 0x'.join([format(r << 24 | g << 16 | b << 8 | a, '08x') for r, g, b, a in frame]) for frame in
frames]) +
'], %d, %d, %d, %d, %d)' % (0, 0, used_width, used_height, used_frames))
| 45.912281 | 123 | 0.680168 | [
"MIT"
] | tjclement/cz19-badge | tools/convert.py | 2,617 | Python |
import asyncio
from pathlib import Path
from secrets import token_bytes
from typing import Optional
import aiosqlite
import pytest
from clvm_tools import binutils
from ceres.types.blockchain_format.coin import Coin
from ceres.types.blockchain_format.program import Program, SerializedProgram
from ceres.types.blockchain_format.sized_bytes import bytes32
from ceres.types.coin_spend import CoinSpend
from ceres.util.db_wrapper import DBWrapper
from ceres.util.ints import uint64
from ceres.wallet.wallet_pool_store import WalletPoolStore
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
def make_child_solution(coin_spend: CoinSpend, new_coin: Optional[Coin] = None) -> CoinSpend:
new_puzzle_hash: bytes32 = token_bytes(32)
solution = "()"
puzzle = f"(q . ((51 0x{new_puzzle_hash.hex()} 1)))"
puzzle_prog = Program.to(binutils.assemble(puzzle))
solution_prog = Program.to(binutils.assemble(solution))
if new_coin is None:
new_coin = coin_spend.additions()[0]
sol: CoinSpend = CoinSpend(
new_coin,
SerializedProgram.from_program(puzzle_prog),
SerializedProgram.from_program(solution_prog),
)
return sol
class TestWalletPoolStore:
@pytest.mark.asyncio
async def test_store(self):
db_filename = Path("wallet_store_test.db")
if db_filename.exists():
db_filename.unlink()
db_connection = await aiosqlite.connect(db_filename)
db_wrapper = DBWrapper(db_connection)
store = await WalletPoolStore.create(db_wrapper)
try:
await db_wrapper.begin_transaction()
coin_0 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
coin_0_alt = Coin(token_bytes(32), token_bytes(32), uint64(12312))
solution_0: CoinSpend = make_child_solution(None, coin_0)
solution_0_alt: CoinSpend = make_child_solution(None, coin_0_alt)
solution_1: CoinSpend = make_child_solution(solution_0)
assert store.get_spends_for_wallet(0) == []
assert store.get_spends_for_wallet(1) == []
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
# Idempotent
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 101)
# Rebuild cache, no longer present
await db_wrapper.rollback_transaction()
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == []
await store.rebuild_cache()
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_1_alt: CoinSpend = make_child_solution(solution_0_alt)
with pytest.raises(ValueError):
await store.add_spend(1, solution_1_alt, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_2: CoinSpend = make_child_solution(solution_1)
await store.add_spend(1, solution_2, 100)
await store.rebuild_cache()
solution_3: CoinSpend = make_child_solution(solution_2)
await store.add_spend(1, solution_3, 100)
solution_4: CoinSpend = make_child_solution(solution_3)
with pytest.raises(ValueError):
await store.add_spend(1, solution_4, 99)
await store.rebuild_cache()
await store.add_spend(1, solution_4, 101)
await store.rebuild_cache()
await store.rollback(101, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
(101, solution_4),
]
await store.rebuild_cache()
await store.rollback(100, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 105)
await store.add_spend(1, solution_4, 105)
solution_5: CoinSpend = make_child_solution(solution_4)
await store.add_spend(1, solution_5, 105)
await store.rollback(99, 1)
assert store.get_spends_for_wallet(1) == []
finally:
await db_connection.close()
db_filename.unlink()
| 36.557252 | 93 | 0.640426 | [
"Apache-2.0"
] | ales/ceres-combineharvester | tests/pools/test_wallet_pool_store.py | 4,789 | Python |
import time
import torch
import numpy as np
from collections import deque
from graphnas.trainer import Trainer
class Evolution_Trainer(Trainer):
"""
This class implements the Asyncronous Aging Evolution,
proposed by Real et. al. on:
Regularized Evolution for Image Classifier Architecture Search
available on: https://arxiv.org/abs/1802.01548
"""
def __init__(self, args):
super(Evolution_Trainer, self).__init__(args)
self.args = args
self.random_seed = args.random_seed
self.population = deque()
self.accuracies = deque()
self.population_size = args.population_size
self.sample_size = args.sample_size
self.cycles = args.cycles
self.init_time = 0
print('initializing population on evolution_trainer init, maybe not the best strategy')
self.__initialize_population()
def derive_from_population(self):
population = self._construct_action(self.population)
best_score_index, _ = \
self._get_best_individual_accuracy(self.accuracies)
best_structure = self.form_gnn_info(population[best_score_index])
print("[DERIVE] Best Structure:", str(best_structure))
# train from scratch to get the final score
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.cuda.manual_seed_all(self.random_seed)
test_scores_list = []
for i in range(10): # run 10 times to get Mean and Stddev
val_acc, test_acc = self.submodel_manager.evaluate(best_structure)
test_scores_list.append(test_acc)
print("[DERIVE] Best Results: ", best_structure, ": ",
np.mean(test_scores_list),
"+/-", np.std(test_scores_list))
def _mutate_individual(self, indiv):
# Choose a random position on the individual to mutate
position_to_mutate = np.random.randint(len(indiv))
# This position will receive a randomly chosen index
# of the search_spaces's list
# for the action corresponding to that position in the individual
sp_list = self.search_space[self.action_list[position_to_mutate]]
indiv[position_to_mutate] = \
np.random.randint(0, len(sp_list))
return indiv
def _get_best_individual_accuracy(self, accs):
max_acc_index = 0
max_acc = -1
for index, acc in enumerate(accs):
if acc > max_acc:
max_acc = acc
max_acc_index = index
return max_acc_index, max_acc
def __initialize_population(self):
print("\n\n===== Evaluating initial random population =====")
start_initial_population_time = time.time()
while len(self.population) < self.population_size:
# print('adding individual #:', len(population))
individual = self._generate_random_individual()
ind_actions = self._construct_action([individual])
gnn = self.form_gnn_info(ind_actions[0])
_, ind_acc = \
self.submodel_manager.train(gnn, format=self.args.format)
print("individual:", individual, " val_score:", ind_acc)
self.accuracies.append(ind_acc)
self.population.append(individual)
end_initial_pop_time = time.time()
self.init_time = end_initial_pop_time - start_initial_population_time
print("Time elapsed initializing population: " +
str(self.init_time))
print("===== Evaluating initial random population DONE ====")
def train(self):
print("\n\n===== Evolution ====")
start_evolution_time = time.time()
while self.cycles > 0:
sample = [] # list with indexes to population individuals
sample_accs = [] # accuracies of the sampled individuals
while len(sample) < self.sample_size:
candidate = np.random.randint(0, len(self.population))
sample.append(self.population[candidate])
sample_accs.append(self.accuracies[candidate])
# Get best individual on sample to serve as parent
max_sample_acc_index, max_sample_acc = \
self._get_best_individual_accuracy(sample_accs)
parent = sample[max_sample_acc_index]
# print('parent: ', parent)
child = parent.copy()
child = self._mutate_individual(child)
# print('child: ', child)
child_actions = self._construct_action([child])
gnn = self.form_gnn_info(child_actions[0])
_, child_acc = \
self.submodel_manager.train(gnn, format=self.args.format)
# print('child acc: ', child_acc)
print("parent: ", str(parent), " val_score: ", str(max_sample_acc),
"| child: ", str(child), ", val_score: ", str(child_acc))
self.accuracies.append(child_acc)
self.population.append(child)
if self.cycles % self.args.eval_cycle == 0:
self.derive_from_population()
# Remove oldest individual (Aging/Regularized evolution)
self.population.popleft()
self.accuracies.popleft()
print("[POPULATION STATS] Mean/Median/Best: ",
np.mean(self.accuracies),
np.median(self.accuracies),
np.max(self.accuracies))
self.cycles -= 1
end_evolution_time = time.time()
total_evolution_time = end_evolution_time - start_evolution_time
print('Time spent on evolution: ' +
str(total_evolution_time))
print('Total elapsed time: ' +
str(total_evolution_time + self.init_time))
print("===== Evolution DONE ====")
def derive(self, sample_num=None):
self.derive_from_population()
| 43.622222 | 95 | 0.626083 | [
"Apache-2.0"
] | mhnnunes/nas_gnn | graphnas/evolution_trainer.py | 5,889 | Python |
# Generated by Django 2.1.4 on 2018-12-28 02:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.275862 | 120 | 0.637266 | [
"MIT"
] | uzzal71/Django_blog | mysite/blog/migrations/0001_initial.py | 907 | Python |
# Make SweetPea visible regardless of whether it's been installed.
import sys
sys.path.append("..")
from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition
from sweetpea.constraints import no_more_than_k_in_a_row
from sweetpea import fully_cross_block, synthesize_trials_non_uniform, print_experiments
"""
Padmala & Pessoa (2011) design
***********************
factors (levels):
- reward (rewarded, non-rewarded)
- response (left, right)
- response Transition (repetition, switch). Factor dependent on response:
- congruency (congruent, incongruent, neutral)
- congruency Transition (congruent-congruent, congruent-incongruent, congruent-neutral, incongruent-congruent, incongruent-incongruent, incongruent-neutral, neutral-congruent, neutral-incongruent, neutral-neutral)
design:
- counterbalancing reward x response x response_transition x congruency_transition
"""
# DEFINE REWARD, RESPONSE and CONGRUENCY FACTORS
reward = Factor("reward", ["rewarded", "non-rewarded"])
response = Factor("response", ["building", "house"])
congruency = Factor("congruency", ["congruent", "incongruent", "neutral"])
# DEFINE CONGRUENCY TRANSITION FACTOR
def con_con(congruency):
return congruency[0] == "congruent" and congruency[1] == "congruent"
def con_inc(congruency):
return congruency[0] == "congruent" and congruency[1] == "incongruent"
def con_ntr(congruency):
return congruency[0] == "congruent" and congruency[1] == "neutral"
def inc_con(congruency):
return congruency[0] == "incongruent" and congruency[1] == "congruent"
def inc_inc(congruency):
return congruency[0] == "incongruent" and congruency[1] == "incongruent"
def inc_ntr(congruency):
return congruency[0] == "incongruent" and congruency[1] == "neutral"
def ntr_con(congruency):
return congruency[0] == "neutral" and congruency[1] == "congruent"
def ntr_inc(congruency):
return congruency[0] == "neutral" and congruency[1] == "incongruent"
def ntr_ntr(congruency):
return congruency[0] == "neutral" and congruency[1] == "neutral"
congruency_transition = Factor("congruency_transition", [
DerivedLevel("congruent-congruent", Transition(con_con, [congruency])),
DerivedLevel("congruent-incongruent", Transition(con_inc, [congruency])),
DerivedLevel("congruent-neutral", Transition(con_ntr, [congruency])),
DerivedLevel("incongruent-congruent", Transition(inc_con, [congruency])),
DerivedLevel("incongruent-incongruent", Transition(inc_inc, [congruency])),
DerivedLevel("incongruent-neutral", Transition(inc_ntr, [congruency])),
DerivedLevel("neutral-congruent", Transition(ntr_con, [congruency])),
DerivedLevel("neutral-incongruent", Transition(ntr_inc, [congruency])),
DerivedLevel("neutral-neutral", Transition(ntr_ntr, [congruency]))
])
# DEFINE RESPONSE TRANSITION FACTOR
def response_repeat(responses):
return responses[0] == responses[1]
def response_switch(responses):
return not response_repeat(responses)
response_transition = Factor("resp_transition", [
DerivedLevel("repeat", Transition(response_repeat, [response])),
DerivedLevel("switch", Transition(response_switch, [response]))
])
# DEFINE SEQUENCE CONSTRAINTS
constraints = []
# DEFINE EXPERIMENT
design = [congruency, reward, response, congruency_transition, response_transition]
crossing = [reward, response, congruency_transition, response_transition]
block = fully_cross_block(design, crossing, constraints)
# SOLVE
experiments = synthesize_trials_non_uniform(block, 5)
print_experiments(block, experiments)
| 38.634409 | 213 | 0.748678 | [
"MIT"
] | ahsanbutt95/sweetpea-py | example_programs/PadmalaPessoa2011.py | 3,593 | Python |
# coding: utf-8
import pprint
import re
import six
class KeyStatusInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'key_id': 'str',
'key_state': 'str'
}
attribute_map = {
'key_id': 'key_id',
'key_state': 'key_state'
}
def __init__(self, key_id=None, key_state=None):
"""KeyStatusInfo - a model defined in huaweicloud sdk"""
self._key_id = None
self._key_state = None
self.discriminator = None
if key_id is not None:
self.key_id = key_id
if key_state is not None:
self.key_state = key_state
@property
def key_id(self):
"""Gets the key_id of this KeyStatusInfo.
密钥ID
:return: The key_id of this KeyStatusInfo.
:rtype: str
"""
return self._key_id
@key_id.setter
def key_id(self, key_id):
"""Sets the key_id of this KeyStatusInfo.
密钥ID
:param key_id: The key_id of this KeyStatusInfo.
:type: str
"""
self._key_id = key_id
@property
def key_state(self):
"""Gets the key_state of this KeyStatusInfo.
密钥状态: - 2为启用状态 - 3为禁用状态 - 4为计划删除状态 - 5为等待导入状态 - 7为冻结状态
:return: The key_state of this KeyStatusInfo.
:rtype: str
"""
return self._key_state
@key_state.setter
def key_state(self, key_state):
"""Sets the key_state of this KeyStatusInfo.
密钥状态: - 2为启用状态 - 3为禁用状态 - 4为计划删除状态 - 5为等待导入状态 - 7为冻结状态
:param key_state: The key_state of this KeyStatusInfo.
:type: str
"""
self._key_state = key_state
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeyStatusInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.884058 | 74 | 0.534945 | [
"Apache-2.0"
] | Adek06/huaweicloud-sdk-python-v3 | huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py | 3,578 | Python |
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : [email protected]
@site : https://github.com/zhuyuanxiang/tensorflow_cookbook
---------------------------
@Software : PyCharm
@Project : TensorFlow_Machine_Learning_Cookbook
@File : C0106_operations.py
@Version : v0.1
@Time : 2019-10-29 14:11
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《TensorFlow机器学习实战指南,Nick McClure》, Sec0106,P110
@Desc : TensorFlow 基础,声明操作
"""
# common imports
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import sklearn
import tensorflow as tf
import winsound
from tensorflow.python.framework import ops
from tools import show_values
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
np.random.seed(42)
# 初始化默认的计算图
ops.reset_default_graph()
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Open graph session
sess = tf.Session()
show_values(tf.div(3, 4), "tf.div(3,4) = 整数除")
show_values(tf.truediv(3, 4), "tf.truediv(3,4) = 浮点除")
show_values(tf.floordiv(3.0, 4.0), "tf.floordiv(3.0,4.0) = 浮点取整除")
show_values(tf.mod(22.0, 5.0), "tf.mod(22.0,5.0) = 取模")
# 张量点积--Compute the pairwise cross product
# 张量点积:即两个向量的叉乘,又叫向量积、外积、叉积,叉乘的运算结果是一个向量而不是一个标量。
# 两个向量的点积与这两个向量组成的坐标平面垂直。
show_values(tf.cross([1., 0., 0.], [0., 1., 0.]),
"tf.cross([1., 0., 0.], [0., 1., 0.]) = 张量点积")
# 张量点积必须是三维的
# show_values(tf.cross([1., 0., 0., 0.], [0., 1., 0., 0.]),
# "tf.cross([1., 0., 0.,0.], [0., 1., 0.,0.]) = 张量点积")
# ToSee:P11,数学函数列表
show_values(tf.div(tf.sin(3.1416 / 4.), tf.cos(3.1416 / 4.)),
"tan(pi/4) = 1 = tf.div(tf.sin(3.1416/4.),tf.cos(3.1416/4.))")
test_nums = range(15)
# What should we get with list comprehension
expected_output = [3 * x * x - x + 10 for x in test_nums]
print('-' * 50)
print("[3 * x ^ 2 - x + 10 for x in test_nums] = ")
print(expected_output)
# 自定义函数
# 3x^2-x+10,x=11,=>
def custom_polynomial(value):
# return tf.subtract(3 * tf.square(value), value) + 10
return 3 * tf.square(value) - value + 10
show_values(custom_polynomial(11), "custom_polynomial(11) = 3x^2-x+10,x=11=>")
for num in test_nums:
show_values(custom_polynomial(num), "custom_polynomial({})".format(num))
# -----------------------------------------------------------------
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
| 30.842697 | 99 | 0.637887 | [
"MIT"
] | zhuyuanxiang/tensorflow_cookbook | 01_Introduction/C0106_operations.py | 3,173 | Python |
import numpy
# FIXME: copy the functions here
from sklearn.mixture.gmm import log_multivariate_normal_density, logsumexp
def sample_gaussian2(means, cv, size, random_state, mins, maxes):
def once(size1):
g = random_state.multivariate_normal(means, cv, size1).T
g = g.reshape(len(means), -1)
mask = (g >= mins[:, None]).all(axis=0)
mask &= (g <= maxes[:, None]).all(axis=0)
return g[:, mask]
g = once(size)
generated = size
while g.shape[1] < size:
fac = 1.0 * g.shape[1] / size
togen = (size - g.shape[1]) * generated // g.shape[1]
g1 = once(togen)
generated = generated + togen
g = numpy.append(g, g1, axis=1)
return g[:, :size]
class GMM(object):
def __init__(self, weights, means, covs, lims):
self.weights = numpy.array(weights)
self.means = numpy.array(means)
self.covs = numpy.array(covs)
self.lims = numpy.array(lims)
[nc] = self.weights.shape
assert self.means.shape[0] == nc
[nc, nf] = self.means.shape
assert self.covs.shape[0] == nc
assert self.covs.shape[1] == nf
assert self.covs.shape[2] == nf
[nc, nf, nf] = self.covs.shape
assert self.lims.shape[0] == nf
assert self.lims.shape[1] == 2
def score(self, X, return_responsibilities=False):
nc = len(self.weights)
X = numpy.array(X)
if X.ndim == 1:
X = X[:, None]
if X.shape[1] != self.means.shape[1]:
raise ValueError('The shape of X is not compatible with self')
mins = self.lims[:, 0]
maxes = self.lims[:, 1]
lpr = numpy.log(self.weights) + \
log_multivariate_normal_density(X,
self.means,
self.covs, 'full')
mask = (X >= mins[None, :]).all(axis=-1)
mask &= (X <= maxes[None, :]).all(axis=-1)
logprob = logsumexp(lpr, axis=1)
logprob[~mask] = -numpy.inf
if return_responsibilities:
responsibilities = numpy.exp(lpr - logprob[:, None])
responsibilities[~mask] = 0
return logprob, responsibilities
return logprob
def marginalize(self, axes):
return GMM(self.weights, self.means[..., axes], self.covs[..., axes][..., axes, :], self.lims[axes])
def sample(self, size, random_state=None):
"""Generate random samples from the model.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = numpy.random
mins = self.lims[:, 0]
maxes = self.lims[:, 1]
X = numpy.empty(size, ('f8', (self.means.shape[1],)))
# decide which component to use for each sample
comps = random_state.choice(len(self.weights), p=self.weights, size=size)
# for each component, generate all needed samples
for comp in range(len(self.weights)):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
cv = self.covs[comp]
g = sample_gaussian2(
self.means[comp], cv,
num_comp_in_X, random_state, mins, maxes).T
X[comp_in_X] = g
return X
@classmethod
def fit(kls, nc, X, lims):
# FIXME: get rid of this and add weights support
from sklearn import mixture
# XXX: Do not use DPGMM because the normalization is buggy
# https://github.com/scikit-learn/scikit-learn/issues/7371
model = mixture.GMM(nc, covariance_type='full', n_iter=1000)
model.fit(X)
if not model.converged_:
raise ValueError("Your data is strange. Gaussian mixture failed to converge")
return kls(model.weights_, model.means_, model.covars_, lims)
class Confidence(object):
def __init__(self, model, confidence_table):
self.model = model
self.confidence_table = confidence_table
def score(self, sc):
x, y = self.confidence_table
return numpy.interp(sc, x, y, left=1., right=0.)
@classmethod
def fit(kls, model, nsample=4*1024, vmin=-5, vmax=0, nb=100):
X = model.sample(nsample)
sc = model.score(X)
confidence_levels = 1 - numpy.logspace(vmin, vmax, num=nb)
# FIXME: add weight support here
sc_cl = numpy.percentile(sc, 100. - confidence_levels * 100.)
confidence_table = numpy.array([sc_cl, confidence_levels])
return kls(model, confidence_table)
class CombinedModel(object):
def __init__(self, models):
self.models = models
def score(self, X):
return sum([model.score(X) for model in self.models])
def marginalize(self, axes):
return CombinedModel([
model.marginalize(axes) for model in self.models])
def sample(self, nsample, random_state=None):
if random_state is None:
random_state = numpy.random
def once(size):
X = self.models[0].sample(size, random_state)
nf = X.shape[-1]
lnprob = sum([model.score(X) for model in self.models[1:]])
prob = numpy.exp(lnprob)
prob /= prob.max()
keep = random_state.rand(len(X)) < prob
return X[keep].reshape(-1, nf)
g = once(nsample)
ng = nsample
while len(g) < nsample:
togen = (nsample - len(g)) * ng // len(g)
g1 = once(togen)
ng = ng + togen
g = numpy.append(g, g1, axis=0)
return g[:nsample]
| 33.859649 | 108 | 0.56943 | [
"Apache-2.0"
] | bccp/bananaplots | bananas/model.py | 5,790 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.wagtailcore.fields
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0020_add_index_on_page_first_published_at'),
('tests', '0013_iconsetting_notyetregisteredsetting_testsetting'),
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(unique=True, max_length=80)),
],
),
migrations.CreateModel(
name='BlogCategoryBlogPage',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('category', models.ForeignKey(to='tests.BlogCategory', related_name='+')),
],
),
migrations.CreateModel(
name='ManyToManyBlogPage',
fields=[
(
'page_ptr',
models.OneToOneField(
primary_key=True,
serialize=False,
parent_link=True,
auto_created=True,
to='wagtailcore.Page'
)
),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('adverts', models.ManyToManyField(to='tests.Advert', blank=True)),
(
'blog_categories',
models.ManyToManyField(
to='tests.BlogCategory',
through='tests.BlogCategoryBlogPage',
blank=True
)
),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AddField(
model_name='blogcategoryblogpage',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.ManyToManyBlogPage', related_name='categories'),
),
]
| 34.606061 | 114 | 0.510508 | [
"BSD-3-Clause"
] | razisayyed/wagtail | wagtail/tests/testapp/migrations/0014_m2m_blog_page.py | 2,284 | Python |
import loquis
import subprocess
@loquis.command
def run(query,*args):
try:
L=[query.lower()]+list(args)
print(L)
return [subprocess.check_output(L)]
except:
return ["Failed to run command"]
languages={'en':{'run':run}}
| 15.533333 | 37 | 0.686695 | [
"MIT"
] | Steve132/loquis | modules/process.py | 233 | Python |
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='namelengthsrc',
parent_name='violin.hoverlabel',
**kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class NamelengthValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self,
plotly_name='namelength',
parent_name='violin.hoverlabel',
**kwargs
):
super(NamelengthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'none'),
min=kwargs.pop('min', -1),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='font', parent_name='violin.hoverlabel', **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Font'),
data_docs=kwargs.pop(
'data_docs', """
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
"""
),
**kwargs
)
import _plotly_utils.basevalidators
class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='bordercolorsrc',
parent_name='violin.hoverlabel',
**kwargs
):
super(BordercolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='bordercolor',
parent_name='violin.hoverlabel',
**kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='bgcolorsrc',
parent_name='violin.hoverlabel',
**kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name='bgcolor', parent_name='violin.hoverlabel', **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='alignsrc',
parent_name='violin.hoverlabel',
**kwargs
):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name='align', parent_name='violin.hoverlabel', **kwargs
):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'style'),
values=kwargs.pop('values', ['left', 'right', 'auto']),
**kwargs
)
| 28.8 | 78 | 0.590112 | [
"MIT"
] | Jo-Con-El/plotly.py | plotly/validators/violin/hoverlabel/__init__.py | 6,048 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from pytest import raises, approx
def test():
import pytq_crawlib
pass
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 15.388889 | 48 | 0.638989 | [
"MIT"
] | MacHu-GWU/pytq_crawlib-project | tests/test_import.py | 277 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (print_function, unicode_literals,
absolute_import, with_statement)
import os
import sys
if __name__ == '__main__':
if __package__ is None:
dir_name = os.path.dirname(__file__)
sys.path.append(
os.path.abspath(
os.path.join(dir_name, '..')))
from excel2mysql.migrate import migrate
migrate()
| 20.363636 | 57 | 0.609375 | [
"Apache-2.0"
] | zxjsdp/excel2mysql | excel2mysql/__main__.py | 448 | Python |
import biathlonresults as api
def test_cups():
res = api.cups(1819)
assert isinstance(res, list)
assert len(res) == 37
def test_cup_results():
res = api.cup_results("BT1819SWRLCP__SMTS")
assert isinstance(res, dict)
assert isinstance(res["Rows"], list)
assert res["Rows"][0]["Name"] == "BOE Johannes Thingnes"
def test_athletes():
res = api.athletes("boe", "johannes")
assert isinstance(res, dict)
assert isinstance(res["Athletes"], list)
assert "boe" in res["Athletes"][0]["FamilyName"].lower()
assert "johannes" in res["Athletes"][0]["GivenName"].lower()
def test_cisbios():
res = api.cisbios("BTNOR11605199301")
assert isinstance(res, dict)
assert res["FullName"] == "Johannes Thingnes BOE"
def test_all_results():
# Raphael Poiree
res = api.all_results("BTFRA10908197401")
assert isinstance(res, dict)
assert isinstance(res["Results"], list)
assert res["Results"][0]["SO"] == 2
assert len(res["Results"]) == 329
def test_events():
res = api.events(1819, 1)
assert isinstance(res, list)
assert len(res) == 10
assert res[0]["Level"] == 1
assert res[-1]["ShortDescription"] == "Oslo Holmenkollen"
def test_competitions():
# Pokljuka 1819
res = api.competitions("BT1819SWRLCP01")
assert isinstance(res, list)
assert len(res) == 8
assert res[-1]["ShortDescription"] == "Women 10km Pursuit"
def test_results():
# Pokljuka 1819 W PU
res = api.results("BT1819SWRLCP01SWPU")
assert isinstance(res, dict)
assert isinstance(res["Results"], list)
assert len(res["Results"]) == 60
assert res["Results"][0]["ResultOrder"] == 1
assert res["Results"][0]["Name"] == "MAKARAINEN Kaisa"
def test_stats():
# podiums men stat
res = api.stats("WCPOD_M", "WCPOD", "ATH", "M")
assert isinstance(res, dict)
assert isinstance(res["Rows"], list)
# in case someone breaks Bjoerndalen's record
assert int(res["Rows"][0]["Value"]) >= 199
| 27.479452 | 64 | 0.649053 | [
"MIT"
] | prtkv/biathlonresults | tests/test_api.py | 2,006 | Python |
from django.conf.urls.defaults import patterns
from satchmo_store.shop.satchmo_settings import get_satchmo_setting
ssl = get_satchmo_setting('SSL', default_value=False)
urlpatterns = patterns('',
(r'^$', 'payment.modules.cod.views.pay_ship_info', {'SSL':ssl}, 'COD_satchmo_checkout-step2'),
(r'^confirm/$', 'payment.modules.cod.views.confirm_info', {'SSL':ssl}, 'COD_satchmo_checkout-step3'),
(r'^success/$', 'payment.views.checkout.success', {'SSL':ssl}, 'COD_satchmo_checkout-success'),
)
| 46.181818 | 106 | 0.732283 | [
"BSD-3-Clause"
] | dokterbob/satchmo | satchmo/apps/payment/modules/cod/urls.py | 508 | Python |
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
"""
Run a job from hdf5.
"""
from pyiron.base.job.wrapper import job_wrapper_function
def register(parser):
parser.add_argument(
"-d", "--debug", action = "store_true",
help = "enable debug mode" # TODO: what's that mean?
)
parser.add_argument(
"-j", "--job-id",
help = "job id to run"
)
parser.add_argument(
"-p", "--project",
help = "directory where the HDF5 file of the job is located"
)
parser.add_argument(
"-f", "--file-path",
help = "path to the HDF5 file"
)
parser.add_argument(
"-s", "--submit", action = "store_true",
help = "submit to queuing system on remote host"
)
def main(args):
job_wrapper_function(
working_directory=args.project,
job_id=args.job_id,
file_path=args.file_path,
debug=args.debug,
submit_on_remote=args.submit
)
| 29.179487 | 108 | 0.593146 | [
"BSD-3-Clause"
] | srmnitc/pyiron | pyiron/cli/wrapper.py | 1,139 | Python |
import numpy as np
import wmf
import batched_inv
import batched_inv_mp
import solve_mp
import solve_gpu
np.random.seed(123)
B = np.load("test_matrix.pkl")
S = wmf.log_surplus_confidence_matrix(B, alpha=2.0, epsilon=1e-6)
num_factors = 40 + 1
num_iterations = 1
batch_size = 1000
solve = batched_inv.solve_sequential
# solve = solve_mp.solve_mp
# solve = solve_gpu.solve_gpu
U, V = wmf.factorize(S, num_factors=num_factors, lambda_reg=1e-5, num_iterations=num_iterations, init_std=0.01, verbose=True, dtype='float32',
recompute_factors=batched_inv_mp.recompute_factors_bias_batched_mp, batch_size=batch_size, solve=solve)
| 24.346154 | 142 | 0.793049 | [
"MIT"
] | Phdntom/wmf | test_batched_inv_mp.py | 633 | Python |
"""
ASGI config for cryptocurrency project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cryptocurrency.settings')
application = get_asgi_application()
| 23.823529 | 78 | 0.792593 | [
"Apache-2.0"
] | deepanshu-jain1999/cryptocurrencytracking | cryptocurrency/asgi.py | 405 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-function-args
# pylint: disable=too-many-statements, unused-argument, too-many-arguments
"""Tensorcore template for cuda backend"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from ..util import get_const_tuple, traverse_inline, simplify
from ..nn.pad import pad
from ..nn.util import get_pad_tuple
from .tensor_intrin import intrin_wmma_load_matrix_A
from .tensor_intrin import intrin_wmma_load_matrix_W
from .tensor_intrin import intrin_wmma_store_matrix
from .tensor_intrin import intrin_wmma_gemm
def nhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""Compute declaration for tensorcore"""
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = get_const_tuple(Input.shape)
kernel_h, kernel_w, _, num_filter = get_const_tuple(Filter.shape)
assert (batch % 16 == 0 and in_channel % 16 == 0 and num_filter % 16 == 0) or \
(batch % 8 == 0 and in_channel % 16 == 0 and num_filter % 32 == 0) or \
(batch % 32 == 0 and in_channel % 16 == 0 and num_filter % 8 == 0), \
"The shape of (batch, in_channel, num_filter) "\
"must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now"
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w))
out_channel = num_filter
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel), name='rc')
ry = te.reduce_axis((0, kernel_h), name='ry')
rx = te.reduce_axis((0, kernel_w), name='rx')
# convert data type of input feature maps and weights
TransPaddedInput = te.compute(
PaddedInput.shape,
lambda n, h, w, c: PaddedInput[n, h, w, c].astype('float16'))
TransFilter = te.compute(
Filter.shape, lambda h, w, i, o: Filter[h, w, i, o].astype('float16'))
Output = te.compute(
(batch, out_height, out_width, out_channel),
lambda nn, yy, xx, ff: te.sum(
TransPaddedInput[nn, yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w, rc].astype(out_dtype) *
TransFilter[ry, rx, rc, ff].astype(out_dtype), axis=[ry, rx, rc]),
name="Conv2dOutput", tag="conv2d_nhwc_tensorcore")
return Output
def schedule_nhwc_tensorcore_cuda(cfg, s, Conv):
"""Schedule tensorcore template"""
kh, kw, ic = s[Conv].op.reduce_axis
out_dtype = Conv.dtype
trans_paddata, kernel = s[Conv].op.input_tensors
in_dtype = trans_paddata.dtype
batch, _, _, _ = get_const_tuple(Conv.shape)
_, _, _, out_channels = get_const_tuple(kernel.shape)
paddata = s[trans_paddata].op.input_tensors
# inline the pad and dtype transform
s[trans_paddata].compute_inline()
s[kernel].compute_inline()
s[paddata[0]].compute_inline()
# Designate the memory hierarchy
AS = s.cache_read(trans_paddata, 'shared', [Conv])
WS = s.cache_read(kernel, 'shared', [Conv])
AF = s.cache_read(AS, 'wmma.matrix_a', [Conv])
WF = s.cache_read(WS, 'wmma.matrix_b', [Conv])
ConvF = s.cache_write(Conv, 'wmma.accumulator')
if Conv.op in s.outputs:
output = Conv
ConvS = s.cache_read(ConvF, 'shared', [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope('shared')
OL = Conv
# Schedule for autotvm
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("vector_width", [1, 2, 4, 8])
if (batch % 16 == 0 and out_channels % 16 == 0):
cfg.define_knob("wmma_m", [16, 8, 32])
elif (batch % 8 == 0 and out_channels % 32 == 0):
cfg.define_knob("wmma_m", [8, 16, 32])
elif (batch % 32 == 0 and out_channels % 8 == 0):
cfg.define_knob("wmma_m", [32, 16, 8])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.target_name, target.model, 'conv2d_nhwc_tensorcore.cuda')
cfg.fallback_with_reference_log(ref_log)
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
wmma_m = cfg["wmma_m"].val
vector_width = cfg["vector_width"].val
wmma_k = 16
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
warp_size = 32
block_x = te.thread_axis('blockIdx.x')
block_y = te.thread_axis('blockIdx.y')
block_z = te.thread_axis('blockIdx.z')
thread_x = te.thread_axis('threadIdx.x')
thread_y = te.thread_axis('threadIdx.y')
thread_z = te.thread_axis('threadIdx.z')
# Define the intrin strides
def get_strides(extents):
return [np.prod(extents[i:]).tolist() for i in range(len(extents))]
AS_align = chunk * wmma_k + offset
WS_align = warp_col_tiles * block_col_warps * wmma_n + offset
block_factor_n = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
CS_align = block_factor_o + offset
AS_strides = get_strides([1, 1, AS_align, 1])
AL_strides = get_strides([1, 1, wmma_k, 1])
WS_strides = get_strides([WS_align, 1])
WL_strides = get_strides([wmma_n * warp_col_tiles, 1])
CL_strides = get_strides([1, 1, wmma_n * warp_col_tiles, 1])
CS_strides = get_strides([1, 1, CS_align, 1])
# Schedule for output
nc, hc, wc, oc = output.op.axis
block_k = s[output].fuse(hc, wc)
s[output].bind(block_k, block_z)
block_i, nc = s[output].split(nc, factor=block_factor_n)
block_j, oc = s[output].split(oc, factor=block_factor_o)
s[output].reorder(block_k, block_i, block_j, nc, oc)
t = s[output].fuse(nc, oc)
t, ti = s[output].split(t, factor=vector_width)
t, tx = s[output].split(t, factor=warp_size)
t, ty = s[output].split(t, factor=block_row_warps)
t, tz = s[output].split(t, factor=block_col_warps)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tz, thread_z)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
s[output].vectorize(ti)
# Schedule wmma store
s[OL].compute_at(s[output], block_j)
nc, hc, wc, oc = OL.op.axis
s[OL].reorder(hc, wc, nc, oc)
s[OL].storage_align(wc, CS_align - 1, CS_align)
oc, ooc = s[OL].split(oc, factor=wmma_n)
oc, oci = s[OL].split(oc, factor=warp_col_tiles)
_, oc = s[OL].split(oc, factor=block_col_warps)
nc, nnc = s[OL].split(nc, factor=wmma_m)
nc, nci = s[OL].split(nc, factor=warp_row_tiles)
_, nc = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
# Schedule wmma computation
s[ConvF].compute_at(s[OL], oc)
n, h, w, o = ConvF.op.axis
n, nnf = s[ConvF].split(n, factor=wmma_m)
o, oof = s[ConvF].split(o, factor=wmma_n)
ic, ii = s[ConvF].split(ic, factor=wmma_k)
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(kh, kw, ko, ki, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], ki)
s[WF].compute_at(s[ConvF], ki)
# Schedule wmma load
n, h, w, i = AF.op.axis
n, nn = s[AF].split(n, factor=wmma_m)
i, ii = s[AF].split(i, factor=wmma_k)
s[AF].reorder(n, i, nn, ii)
kh, kw, i, o = WF.op.axis
i, ii = s[WF].split(i, factor=wmma_k)
o, oo = s[WF].split(o, factor=wmma_n)
s[WF].reorder(o, i, oo)
s[WF].reorder(i, o, ii, oo)
s[WS].compute_at(s[ConvF], ko)
s[AS].compute_at(s[ConvF], ko)
# Schedule for data's share memory
n, h, w, i = AS.op.axis
s[AS].reorder(h, w, n, i)
s[AS].storage_align(w, AS_align - 1, AS_align)
t = s[AS].fuse(n, i)
t, ti = s[AS].split(t, factor=vector_width)
t, tx = s[AS].split(t, factor=warp_size)
t, ty = s[AS].split(t, factor=block_row_warps)
_, tz = s[AS].split(t, factor=block_col_warps)
s[AS].bind(ty, thread_y)
s[AS].bind(tz, thread_z)
s[AS].bind(tx, thread_x)
s[AS].vectorize(ti)
# Schedule for kernel's share memory
kh, kw, ic, o = WS.op.axis
t = s[WS].fuse(ic, o)
s[WS].storage_align(ic, WS_align - 1, WS_align)
t, ti = s[WS].split(t, factor=vector_width)
t, tx = s[WS].split(t, factor=warp_size)
t, ty = s[WS].split(t, factor=block_row_warps)
_, tz = s[WS].split(t, factor=block_col_warps)
s[WS].bind(ty, thread_y)
s[WS].bind(tz, thread_z)
s[WS].bind(tx, thread_x)
s[WS].vectorize(ti)
shape = (wmma_m, wmma_n, wmma_k)
# tensorize the wmma process
AS_shape = (wmma_m, 1, 1, wmma_k)
AL_shape = (wmma_m, 1, 1, wmma_k)
WS_shape = (wmma_k, wmma_n)
WL_shape = (wmma_k, wmma_n)
CL_shape = (wmma_m, 1, 1, wmma_n)
CS_shape = (wmma_m, 1, 1, wmma_n)
AL_gemm = te.placeholder(AL_shape, name='A', dtype=in_dtype)
WL_gemm = te.placeholder(WL_shape, name='B', dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k")
CL_compute = te.compute(CL_shape, lambda ii, t0, t1, jj:
te.sum(AL_gemm[ii, t0, t1, k_gemm].astype(out_dtype) * \
WL_gemm[k_gemm, jj].astype(out_dtype), axis=k_gemm),
name='C')
s[AF].tensorize(nn, intrin_wmma_load_matrix_A(AL_strides, AS_strides, shape,
"row_major", AS_shape, AL_shape, in_dtype))
s[WF].tensorize(ii, intrin_wmma_load_matrix_W(WL_strides, WS_strides, shape,
"row_major", WS_shape, WL_shape, in_dtype))
s[OL].tensorize(nnc, intrin_wmma_store_matrix(CS_strides, CL_strides,
shape, out_dtype, CL_shape, CS_shape))
s[ConvF].tensorize(nnf, intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides,
WL_strides, CL_strides, shape))
N, OH, OW, CO = get_const_tuple(output.shape)
KH, KW, CI, _ = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
@autotvm.register_topi_compute("conv2d_nhwc_tensorcore.cuda")
def conv2d_nhwc_tensorcore(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with tensorcore for NCHW layout"""
return nhwc_tensorcore_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc_tensorcore.cuda")
def schedule_conv2d_nhwc_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if 'conv2d_nhwc_tensorcore' in op.tag:
schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| 40.068966 | 94 | 0.643561 | [
"Apache-2.0"
] | HatsuneMiku4/incubator-tvm | topi/python/topi/cuda/conv2d_nhwc_tensorcore.py | 12,782 | Python |
# Generated by Django 2.1.5 on 2019-05-03 15:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0035_post_video'),
]
operations = [
migrations.AlterField(
model_name='post',
name='video',
field=models.FileField(blank=True, null=True, upload_to='uploads/'),
),
]
| 21.157895 | 80 | 0.59204 | [
"MIT"
] | akindele214/181hub_2 | blog/migrations/0036_auto_20190503_1645.py | 402 | Python |
import asyncio
import os
import warnings
from datetime import date
from secedgar.cik_lookup import CIKLookup
from secedgar.client import NetworkClient
from secedgar.core._base import AbstractFiling
from secedgar.core.filing_types import FilingType
from secedgar.exceptions import FilingTypeError
from secedgar.utils import sanitize_date
class CompanyFilings(AbstractFiling):
"""Base class for receiving EDGAR filings.
Args:
cik_lookup (str): Central Index Key (CIK) for company of interest.
filing_type (Union[secedgar.core.filing_types.FilingType, None]): Valid filing type
enum. Defaults to None. If None, then all filing types for CIKs will be returned.
start_date (Union[str, datetime.datetime, datetime.date], optional): Date before
which not to fetch reports. Stands for "date after."
Defaults to None (will fetch all filings before ``end_date``).
end_date (Union[str, datetime.datetime, datetime.date], optional):
Date after which not to fetch reports.
Stands for "date before." Defaults to today.
count (int): Number of filings to fetch. Will fetch up to `count` if that many filings
are available. Defaults to all filings available.
ownership (str): Must be in {"include", "exclude"}. Whether or not to include ownership
filings.
match_format (str): Must be in {"EXACT", "AMEND", "ALL"}.
kwargs: See kwargs accepted for :class:`secedgar.client.network_client.NetworkClient`.
.. versionadded:: 0.1.5
"""
def __init__(self,
cik_lookup,
filing_type=None,
start_date=None,
end_date=date.today(),
client=None,
count=None,
ownership="include",
match_format="ALL",
**kwargs):
# Leave params before other setters
self._params = {
"action": "getcompany",
"output": "xml",
"owner": ownership,
"start": 0,
}
self.start_date = start_date
self.end_date = end_date
self.filing_type = filing_type
self.count = count
self.match_format = match_format
# Make default client NetworkClient and pass in kwargs
self._client = client if client is not None else NetworkClient(**kwargs)
# make CIKLookup object for users if not given
self.cik_lookup = cik_lookup
@property
def path(self):
"""str: Path added to client base."""
return "cgi-bin/browse-edgar"
@property
def params(self):
""":obj:`dict`: Parameters to include in requests."""
return self._params
@property
def client(self):
"""``secedgar.client._base``: Client to use to make requests."""
return self._client
@property
def start_date(self):
"""Union([datetime.date, datetime.datetime, str]): Date before which no filings fetched."""
return self._start_date
@property
def match_format(self):
"""The match format to use when searching for filings."""
return self._match_format
@match_format.setter
def match_format(self, val):
if val in ["EXACT", "AMEND", "ALL"]:
self._match_format = val
else:
raise ValueError("Format must be one of EXACT,AMEND,ALL")
@start_date.setter
def start_date(self, val):
if val is not None:
self._params["datea"] = sanitize_date(val)
self._start_date = val
else:
self._start_date = None
@property
def end_date(self):
"""Union([datetime.date, datetime.datetime, str]): Date after which no filings fetched."""
return self._end_date
@end_date.setter
def end_date(self, val):
self._params["dateb"] = sanitize_date(val)
self._end_date = val
@property
def filing_type(self):
"""``secedgar.core.FilingType``: FilingType enum of filing."""
return self._filing_type
@filing_type.setter
def filing_type(self, filing_type):
if isinstance(filing_type, FilingType):
self._params["type"] = filing_type.value
elif filing_type is not None:
raise FilingTypeError
self._filing_type = filing_type
@property
def count(self):
"""Number of filings to fetch."""
return self._count
@count.setter
def count(self, val):
if val is None:
self._count = None
elif not isinstance(val, int):
raise TypeError("Count must be positive integer or None.")
elif val < 1:
raise ValueError("Count must be positive integer or None.")
else:
self._count = val
self._params["count"] = val
@property
def cik_lookup(self):
"""``secedgar.cik_lookup.CIKLookup``: CIKLookup object."""
return self._cik_lookup
@cik_lookup.setter
def cik_lookup(self, val):
if not isinstance(val, CIKLookup):
val = CIKLookup(val, client=self.client)
self._cik_lookup = val
def get_urls(self, **kwargs):
"""Get urls for all CIKs given to Filing object.
Args:
**kwargs: Anything to be passed to requests when making get request.
See keyword arguments accepted for
``secedgar.client._base.AbstractClient.get_soup``.
Returns:
urls (list): List of urls for txt files to download.
"""
return {
key: self._get_urls_for_cik(cik, **kwargs)
for key, cik in self.cik_lookup.lookup_dict.items()
}
# TODO: Change this to return accession numbers that are turned into URLs later
def _get_urls_for_cik(self, cik, **kwargs):
"""Get all urls for specific company according to CIK.
Must match start date, end date, filing_type, and count parameters.
Args:
cik (str): CIK for company.
**kwargs: Anything to be passed to requests when making get request.
See keyword arguments accepted for
``secedgar.client._base.AbstractClient.get_soup``.
Returns:
txt_urls (list of str): Up to the desired number of URLs for that specific company
if available.
"""
self.params["CIK"] = cik
links = []
self.params["start"] = 0 # set start back to 0 before paginating
while self.count is None or len(links) < self.count:
data = self.client.get_soup(self.path, self.params, **kwargs)
links.extend([link.string for link in data.find_all("filinghref")])
self.params["start"] += self.client.batch_size
if len(data.find_all("filinghref")) == 0: # no more filings
break
txt_urls = [link[:link.rfind("-")].strip() + ".txt" for link in links]
if isinstance(self.count, int) and len(txt_urls) < self.count:
warnings.warn(
"Only {num} of {count} filings were found for {cik}.".format(
num=len(txt_urls), count=self.count, cik=cik))
# Takes `count` filings at most
return txt_urls[:self.count]
def save(self, directory, dir_pattern=None, file_pattern=None):
"""Save files in specified directory.
Each txt url looks something like:
https://www.sec.gov/Archives/edgar/data/1018724/000101872419000043/0001018724-19-000043.txt
Args:
directory (str): Path to directory where files should be saved.
dir_pattern (str): Format string for subdirectories. Default is "{cik}/{type}".
Valid options are {cik} and/or {type}.
file_pattern (str): Format string for files. Default is "{accession_number}".
Valid options are {accession_number}.
Returns:
None
Raises:
ValueError: If no text urls are available for given filing object.
"""
urls = self.get_urls_safely()
if dir_pattern is None:
dir_pattern = os.path.join("{cik}", "{type}")
if file_pattern is None:
file_pattern = "{accession_number}"
inputs = []
for cik, links in urls.items():
formatted_dir = dir_pattern.format(cik=cik,
type=self.filing_type.value)
for link in links:
formatted_file = file_pattern.format(
accession_number=self.get_accession_number(link))
path = os.path.join(directory, formatted_dir, formatted_file)
inputs.append((link, path))
loop = asyncio.get_event_loop()
loop.run_until_complete(self.client.wait_for_download_async(inputs))
| 36.359184 | 99 | 0.60586 | [
"Apache-2.0"
] | Ahrvo-Trading-Systems/sec-edgar | secedgar/core/company.py | 8,908 | Python |
from asolut.main import *
| 13 | 25 | 0.769231 | [
"MIT"
] | Marios-Mamalis/asolut | asolut/__init__.py | 26 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.