max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
code/python/src/vm/lua_type.py | ShakeM/luago-book | 723 | 11171327 | <gh_stars>100-1000
from enum import Enum, unique
@unique
class LuaType(Enum):
NONE = -1
NIL = 0
BOOLEAN = 1
LIGHT_USER_DATA = 2
NUMBER = 3
STRING = 4
TABLE = 5
FUNCTION = 6
USER_DATA = 7
THREAD = 8
|
examples/fetchDebianDependencyGraph.py | woehrer12/pyArango | 126 | 11171335 | #!/usr/bin/python
import sys
from pyArango.connection import *
from pyArango.graph import *
from asciitree import *
conn = Connection(username="USERNAME", password="<PASSWORD>")
db = conn["ddependencyGrahp"]
if not db.hasGraph('debian_dependency_graph'):
raise Exception("didn't find the debian dependency graph, please import first!")
ddGraph = db.graphs['debian_dependency_graph']
graphQuery = '''
FOR package, depends, path IN
1..2 ANY
@startPackage Depends RETURN path
'''
startNode = sys.argv[1]
bindVars = { "startPackage": "packages/" + startNode }
queryResult = db.AQLQuery(graphQuery, bindVars=bindVars, rawResults=True)
# sub iterateable object to build up the tree for draw_tree:
class Node(object):
def __init__(self, name, children):
self.name = name
self.children = children
def getChild(self, searchName):
for child in self.children:
if child.name == searchName:
return child
return None
def __str__(self):
return self.name
def iteratePath(path, depth, currentNode):
pname = path[depth]['name']
subNode = currentNode.getChild(pname)
if subNode == None:
subNode = Node(pname, [])
currentNode.children.append(subNode)
if len(path) > depth + 1:
iteratePath(path, depth + 1, subNode)
# Now we fold the paths substructure into the tree:
rootNode = Node(startNode, [])
for path in queryResult:
p = path['edges']
iteratePath(p, 0, rootNode)
print draw_tree(rootNode)
|
casper4/griefing_one_third_offline_simulator.py | kevaundray/research | 1,351 | 11171361 | import math
# Length of an epoch in seconds
epoch_len = 1400
# In-protocol penalization parameter
increment = 0.00002
# Parameters
NFP = 0
NCP = 3
NCCP = 3
NPP = 3
NPCP = 3
def sim_offline(p):
online, offline = 1-p, p
for i in range(1, 999999):
# Lost by offline validators
offline_loss = NFP + NPP + NPCP * (offline / (online + offline))
# Lost by online validators
online_loss = NFP + NPCP * (offline / (online + offline))
online *= 1 - increment * math.log(i) * online_loss
offline *= 1 - increment * math.log(i) * offline_loss
if i % 100 == 0 or online >= 2 * offline:
print("%d epochs (%.2f days): online %.4f offline %.4f" %
(i, epoch_len * i / 86400, online, offline))
# If the remaining validators can commit, break
if online >= 2 * offline:
return (1-p, online, epoch_len * i / 86400)
sim_offline(0.4)
#results = [sim_offline(i * 0.01) for i in range(34, 100)]
#for col in results:
# print("%.4f, %.4f, %.4f" % col)
|
SuperReads_RNA/global-1/jellyfish/swig/python/test_string_mers.py | wrf/stringtie | 255 | 11171394 | import unittest
import sys
import random
import jellyfish
class TestStringMers(unittest.TestCase):
def setUp(self):
bases = "ACGTacgt"
self.str = ''.join(random.choice(bases) for _ in range(1000))
self.k = random.randint(10, 110)
jellyfish.MerDNA.k(self.k)
def test_all_mers(self):
count = 0
good = True
mers = jellyfish.string_mers(self.str)
for m in mers:
m2 = jellyfish.MerDNA(self.str[count:count+self.k])
good = good and m == m2
count += 1
self.assertTrue(good)
self.assertEqual(len(self.str) - self.k + 1, count)
def test_canonical_mers(self):
good = True
mers = jellyfish.string_canonicals(self.str)
for count, m in enumerate(mers):
m2 = jellyfish.MerDNA(self.str[count:count+self.k])
rm2 = m2.get_reverse_complement()
good = good and (m == m2 or m == rm2)
good = good and (not (m > m2)) and (not (m > rm2))
# count += 1
self.assertTrue(good)
self.assertEqual(len(self.str) - self.k + 0, count)
if __name__ == '__main__':
data = sys.argv.pop(1)
unittest.main()
|
tests/test_memory_merge.py | r4b3rt/angr | 6,132 | 11171421 | # pylint:disable=isinstance-second-argument-not-valid-type
import claripy
from angr.storage.memory_mixins import (
DataNormalizationMixin,
SizeNormalizationMixin,
AddressConcretizationMixin,
UltraPagesMixin,
ListPagesMixin,
PagedMemoryMixin,
SymbolicMergerMixin,
ConvenientMappingsMixin,
)
from angr import SimState
class UltraPageMemory(
DataNormalizationMixin,
SizeNormalizationMixin,
AddressConcretizationMixin,
SymbolicMergerMixin,
ConvenientMappingsMixin,
UltraPagesMixin,
PagedMemoryMixin,
):
pass
class ListPageMemory(
DataNormalizationMixin,
SizeNormalizationMixin,
AddressConcretizationMixin,
SymbolicMergerMixin,
ConvenientMappingsMixin,
ListPagesMixin,
PagedMemoryMixin,
):
pass
def test_merge_memory_object_endness():
for memcls in [UltraPageMemory, ListPageMemory]:
state0 = SimState(arch='AMD64', mode='symbolic', plugins={'memory': memcls()})
state0.memory.store(0x20000, claripy.BVS("x", 64), endness="Iend_LE")
state1 = SimState(arch="AMD64", mode="symbolic", plugins={'memory': memcls()})
state1.memory.store(0x20000, claripy.BVS("y", 64), endness="Iend_LE")
state, _, _ = state0.merge(state1)
obj = state.memory.load(0x20000, size=8, endness="Iend_LE")
assert isinstance(obj, claripy.ast.Base)
# the original endness should be respected, and obj.op should not be Reverse
assert obj.op == "If"
def test_merge_seq():
state1 = SimState(arch='AMD64', mode='symbolic', plugins={'memory': UltraPageMemory()})
state2 = SimState(arch='AMD64', mode='symbolic', plugins={'memory': UltraPageMemory()})
state1.regs.rsp = 0x80000000
state2.regs.rsp = 0x80000000
state1.memory.store(state1.regs.rsp, 0x11, 1)
state1.memory.store(state1.regs.rsp + 1, 0x22, 1)
state2.memory.store(state2.regs.rsp, 0xAA, 1)
state2.memory.store(state2.regs.rsp + 1, 0xBB, 1)
state3, _, __ = state1.merge(state2)
vals = [v for v in state3.solver.eval_upto(state3.memory.load(state3.regs.rsp, 2), 10)]
assert set([0x1122, 0xaabb]) == set(vals)
if __name__ == '__main__':
test_merge_seq()
test_merge_memory_object_endness()
|
venv/Lib/site-packages/statsmodels/base/tests/test_transform.py | EkremBayar/bayar | 6,931 | 11171423 | import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_raises)
from statsmodels.base.transform import (BoxCox)
from statsmodels.datasets import macrodata
class TestTransform:
@classmethod
def setup_class(cls):
data = macrodata.load_pandas()
cls.x = data.data['realgdp'].values
cls.bc = BoxCox()
def test_nonpositive(self):
# Testing negative values
y = [1, -1, 1]
assert_raises(ValueError, self.bc.transform_boxcox, y)
# Testing nonzero
y = [1, 0, 1]
assert_raises(ValueError, self.bc.transform_boxcox, y)
def test_invalid_bounds(self):
# more than two bounds
assert_raises(ValueError, self.bc._est_lambda, self.x, (-3, 2, 3))
# upper bound <= lower bound
assert_raises(ValueError, self.bc._est_lambda, self.x, (2, -1))
def test_unclear_methods(self):
# Both _est_lambda and untransform have a method argument that should
# be tested.
assert_raises(ValueError, self.bc._est_lambda,
self.x, (-1, 2), 'test')
assert_raises(ValueError, self.bc.untransform_boxcox,
self.x, 1, 'test')
def test_unclear_scale_parameter(self):
# bc.guerrero allows for 'mad' and 'sd', for the MAD and Standard
# Deviation, respectively
assert_raises(ValueError, self.bc._est_lambda,
self.x, scale='test')
# Next, check if mad/sd work:
self.bc._est_lambda(self.x, scale='mad')
self.bc._est_lambda(self.x, scale='MAD')
self.bc._est_lambda(self.x, scale='sd')
self.bc._est_lambda(self.x, scale='SD')
def test_valid_guerrero(self):
# `l <- BoxCox.lambda(x, method="guerrero")` on a ts object
# with frequency 4 (BoxCox.lambda defaults to 2, but we use
# Guerrero and Perera (2004) as a guideline)
lmbda = self.bc._est_lambda(self.x, method='guerrero', window_length=4)
assert_almost_equal(lmbda, 0.507624, 4)
# `l <- BoxCox.lambda(x, method="guerrero")` with the default grouping
# parameter (namely, window_length=2).
lmbda = self.bc._est_lambda(self.x, method='guerrero', window_length=2)
assert_almost_equal(lmbda, 0.513893, 4)
def test_guerrero_robust_scale(self):
# The lambda is derived from a manual check of the values for the MAD.
# Compare also the result for the standard deviation on R=4: 0.5076,
# i.e. almost the same value.
lmbda = self.bc._est_lambda(self.x, scale='mad')
assert_almost_equal(lmbda, 0.488621, 4)
def test_loglik_lambda_estimation(self):
# 0.2 is the value returned by `BoxCox.lambda(x, method="loglik")`
lmbda = self.bc._est_lambda(self.x, method='loglik')
assert_almost_equal(lmbda, 0.2, 1)
def test_boxcox_transformation_methods(self):
# testing estimated lambda vs. provided. Should result in almost
# the same transformed data. Value taken from R.
y_transformed_no_lambda = self.bc.transform_boxcox(self.x)
y_transformed_lambda = self.bc.transform_boxcox(self.x, 0.507624)
assert_almost_equal(y_transformed_no_lambda[0],
y_transformed_lambda[0], 3)
# a perfectly increasing set has a constant variance over the entire
# series, hence stabilising should result in the same scale: lmbda = 1.
y, lmbda = self.bc.transform_boxcox(np.arange(1, 100))
assert_almost_equal(lmbda, 1., 5)
def test_zero_lambda(self):
# zero lambda should be a log transform.
y_transform_zero_lambda, lmbda = self.bc.transform_boxcox(self.x, 0.)
assert_equal(lmbda, 0.)
assert_almost_equal(y_transform_zero_lambda, np.log(self.x), 5)
def test_naive_back_transformation(self):
# test both transformations functions -> 0. and .5
y_zero_lambda = self.bc.transform_boxcox(self.x, 0.)
y_half_lambda = self.bc.transform_boxcox(self.x, .5)
y_zero_lambda_un = self.bc.untransform_boxcox(*y_zero_lambda,
method='naive')
y_half_lambda_un = self.bc.untransform_boxcox(*y_half_lambda,
method='naive')
assert_almost_equal(self.x, y_zero_lambda_un, 5)
assert_almost_equal(self.x, y_half_lambda_un, 5)
|
chatbotv2/my_seq2seq_v2.py | drpreetyrai/ChatBotCourse | 5,087 | 11171437 | <filename>chatbotv2/my_seq2seq_v2.py
# -*- coding: utf-8 -*-
import sys
import math
import tflearn
import tensorflow as tf
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import rnn
import chardet
import numpy as np
import struct
question_seqs = []
answer_seqs = []
max_w = 50
float_size = 4
word_vector_dict = {}
word_vec_dim = 200
max_seq_len = 8
word_set = {}
def load_word_set():
file_object = open('./segment_result_lined.3000000.pair.less', 'r')
while True:
line = file_object.readline()
if line:
line_pair = line.split('|')
line_question = line_pair[0]
line_answer = line_pair[1]
for word in line_question.decode('utf-8').split(' '):
word_set[word] = 1
for word in line_answer.decode('utf-8').split(' '):
word_set[word] = 1
else:
break
file_object.close()
def load_vectors(input):
"""从vectors.bin加载词向量,返回一个word_vector_dict的词典,key是词,value是200维的向量
"""
print "begin load vectors"
input_file = open(input, "rb")
# 获取词表数目及向量维度
words_and_size = input_file.readline()
words_and_size = words_and_size.strip()
words = long(words_and_size.split(' ')[0])
size = long(words_and_size.split(' ')[1])
print "words =", words
print "size =", size
for b in range(0, words):
a = 0
word = ''
# 读取一个词
while True:
c = input_file.read(1)
word = word + c
if False == c or c == ' ':
break
if a < max_w and c != '\n':
a = a + 1
word = word.strip()
vector = []
for index in range(0, size):
m = input_file.read(float_size)
(weight,) = struct.unpack('f', m)
vector.append(float(weight))
# 将词及其对应的向量存到dict中
if word_set.has_key(word.decode('utf-8')):
word_vector_dict[word.decode('utf-8')] = vector[0:word_vec_dim]
input_file.close()
print "load vectors finish"
def init_seq(input_file):
"""读取切好词的文本文件,加载全部词序列
"""
file_object = open(input_file, 'r')
vocab_dict = {}
while True:
question_seq = []
answer_seq = []
line = file_object.readline()
if line:
line_pair = line.split('|')
line_question = line_pair[0]
line_answer = line_pair[1]
for word in line_question.decode('utf-8').split(' '):
if word_vector_dict.has_key(word):
question_seq.append(word_vector_dict[word])
for word in line_answer.decode('utf-8').split(' '):
if word_vector_dict.has_key(word):
answer_seq.append(word_vector_dict[word])
else:
break
question_seqs.append(question_seq)
answer_seqs.append(answer_seq)
file_object.close()
def vector_sqrtlen(vector):
len = 0
for item in vector:
len += item * item
len = math.sqrt(len)
return len
def vector_cosine(v1, v2):
if len(v1) != len(v2):
sys.exit(1)
sqrtlen1 = vector_sqrtlen(v1)
sqrtlen2 = vector_sqrtlen(v2)
value = 0
for item1, item2 in zip(v1, v2):
value += item1 * item2
return value / (sqrtlen1*sqrtlen2)
def vector2word(vector):
max_cos = -10000
match_word = ''
for word in word_vector_dict:
v = word_vector_dict[word]
cosine = vector_cosine(vector, v)
if cosine > max_cos:
max_cos = cosine
match_word = word
return (match_word, max_cos)
class MySeq2Seq(object):
"""
思路:输入输出序列一起作为input,然后通过slick和unpack切分
完全按照论文说的编码器解码器来做
输出的时候把解码器的输出按照词向量的200维展平,这样输出就是(?,seqlen*200)
这样就可以通过regression来做回归计算了,输入的y也展平,保持一致
"""
def __init__(self, max_seq_len = 16, word_vec_dim = 200, input_file='./segment_result_lined.3000000.pair.less'):
self.max_seq_len = max_seq_len
self.word_vec_dim = word_vec_dim
self.input_file = input_file
def generate_trainig_data(self):
load_word_set()
load_vectors("./vectors.bin")
init_seq(self.input_file)
xy_data = []
y_data = []
for i in range(len(question_seqs)):
#for i in range(100):
question_seq = question_seqs[i]
answer_seq = answer_seqs[i]
if len(question_seq) < self.max_seq_len and len(answer_seq) < self.max_seq_len:
sequence_xy = [np.zeros(self.word_vec_dim)] * (self.max_seq_len-len(question_seq)) + list(reversed(question_seq))
sequence_y = answer_seq + [np.zeros(self.word_vec_dim)] * (self.max_seq_len-len(answer_seq))
sequence_xy = sequence_xy + sequence_y
sequence_y = [np.ones(self.word_vec_dim)] + sequence_y
xy_data.append(sequence_xy)
y_data.append(sequence_y)
#print "right answer"
#for w in answer_seq:
# (match_word, max_cos) = vector2word(w)
# if len(match_word)>0:
# print match_word, vector_sqrtlen(w)
return np.array(xy_data), np.array(y_data)
def model(self, feed_previous=False):
# 通过输入的XY生成encoder_inputs和带GO头的decoder_inputs
input_data = tflearn.input_data(shape=[None, self.max_seq_len*2, self.word_vec_dim], dtype=tf.float32, name = "XY")
encoder_inputs = tf.slice(input_data, [0, 0, 0], [-1, self.max_seq_len, self.word_vec_dim], name="enc_in")
decoder_inputs_tmp = tf.slice(input_data, [0, self.max_seq_len, 0], [-1, self.max_seq_len-1, self.word_vec_dim], name="dec_in_tmp")
go_inputs = tf.ones_like(decoder_inputs_tmp)
go_inputs = tf.slice(go_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim])
decoder_inputs = tf.concat(1, [go_inputs, decoder_inputs_tmp], name="dec_in")
# 编码器
# 把encoder_inputs交给编码器,返回一个输出(预测序列的第一个值)和一个状态(传给解码器)
(encoder_output_tensor, states) = tflearn.lstm(encoder_inputs, self.word_vec_dim, return_state=True, scope='encoder_lstm')
encoder_output_sequence = tf.pack([encoder_output_tensor], axis=1)
# 解码器
# 预测过程用前一个时间序的输出作为下一个时间序的输入
# 先用编码器的最后一个输出作为第一个输入
if feed_previous:
first_dec_input = go_inputs
else:
first_dec_input = tf.slice(decoder_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim])
decoder_output_tensor = tflearn.lstm(first_dec_input, self.word_vec_dim, initial_state=states, return_seq=False, reuse=False, scope='decoder_lstm')
decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1)
decoder_output_sequence_list = [decoder_output_tensor]
# 再用解码器的输出作为下一个时序的输入
for i in range(self.max_seq_len-1):
if feed_previous:
next_dec_input = decoder_output_sequence_single
else:
next_dec_input = tf.slice(decoder_inputs, [0, i+1, 0], [-1, 1, self.word_vec_dim])
decoder_output_tensor = tflearn.lstm(next_dec_input, self.word_vec_dim, return_seq=False, reuse=True, scope='decoder_lstm')
decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1)
decoder_output_sequence_list.append(decoder_output_tensor)
decoder_output_sequence = tf.pack(decoder_output_sequence_list, axis=1)
real_output_sequence = tf.concat(1, [encoder_output_sequence, decoder_output_sequence])
net = tflearn.regression(real_output_sequence, optimizer='sgd', learning_rate=0.1, loss='mean_square')
model = tflearn.DNN(net)
return model
def train(self):
trainXY, trainY = self.generate_trainig_data()
model = self.model(feed_previous=False)
model.fit(trainXY, trainY, n_epoch=1000, snapshot_epoch=False, batch_size=1)
model.save('./model/model')
return model
def load(self):
model = self.model(feed_previous=True)
model.load('./model/model')
return model
if __name__ == '__main__':
phrase = sys.argv[1]
if 3 == len(sys.argv):
my_seq2seq = MySeq2Seq(word_vec_dim=word_vec_dim, max_seq_len=max_seq_len, input_file=sys.argv[2])
else:
my_seq2seq = MySeq2Seq(word_vec_dim=word_vec_dim, max_seq_len=max_seq_len)
if phrase == 'train':
my_seq2seq.train()
else:
model = my_seq2seq.load()
trainXY, trainY = my_seq2seq.generate_trainig_data()
predict = model.predict(trainXY)
for sample in predict:
print "predict answer"
for w in sample[1:]:
(match_word, max_cos) = vector2word(w)
#if vector_sqrtlen(w) < 1:
# break
print match_word, max_cos, vector_sqrtlen(w)
|
backend/app/app/schemas/reponse.py | jimorsm/vue-element-admin-fastapi | 137 | 11171448 | from typing import Optional, Any
from pydantic import BaseModel
# Shared properties
class Response(BaseModel):
code: Optional[int] = None
data : Optional[Any] = None
message: Optional[str] = None
|
ciphey/basemods/Resources/__init__.py | emadfuel/Ciphey | 9,908 | 11171449 | from . import cipheydists, files
|
utils/logger.py | bfortuner/VOCdetect | 336 | 11171462 | <reponame>bfortuner/VOCdetect<gh_stars>100-1000
import os
import logging
import imp
import time
def get_logger(log_path='',
logger_name='logger',
ch_log_level=logging.ERROR,
fh_log_level=logging.INFO):
logging.shutdown()
imp.reload(logging)
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
# Console Handler
if ch_log_level:
ch = logging.StreamHandler()
ch.setLevel(ch_log_level)
ch.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(ch)
# File Handler
if fh_log_level:
fh = logging.FileHandler(os.path.join(log_path,logger_name+'.log'))
fh.setLevel(fh_log_level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def get_time_msg(start_time):
time_elapsed = time.time() - start_time
msg = 'Time {:.1f}m {:.2f}s'.format(
time_elapsed // 60, time_elapsed % 60)
return msg |
tests/__init__.py | rexyeah/jira-cli | 125 | 11171463 | """
"""
import types
import unittest
if not hasattr(unittest.TestCase, "assertIsNotNone"):
def assertIsNotNone(self, value, message=""):
self.assertNotEqual(value, None, message)
unittest.TestCase.assertIsNotNone = types.MethodType(assertIsNotNone, None, unittest.TestCase)
|
Chapter5/ex_5_19.py | zxjzxj9/PyTorchIntroduction | 205 | 11171534 | """ 该代码仅为演示函数签名和所用方法,并不能实际运行
"""
class torch.nn.TransformerEncoder(encoder_layer, num_layers, norm=None)
# TransformerEncoder对应的forward方法定义
forward(src, mask=None, src_key_padding_mask=None)
class torch.nn.TransformerDecoder(decoder_layer, num_layers, norm=None)
# TransformerDecoder对应的forward方法定义
forward(tgt, memory, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None)
class torch.nn.Transformer(d_model=512, nhead=8, num_encoder_layers=6,
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
custom_encoder=None, custom_decoder=None)
# Transformer对应的forward方法定义
forward(src, tgt, src_mask=None, tgt_mask=None, memory_mask=None,
src_key_padding_mask=None, tgt_key_padding_mask=None,
memory_key_padding_mask=None)
|
Benchmarks/scripts/bench_batch_streaming_ingest.py | Pahandrovich/omniscidb | 868 | 11171552 | <gh_stars>100-1000
import sys
import pymapd
import pyarrow as pa
import pandas as pd
import numpy as np
import time
import argparse
def getOptions(args=None):
parser = argparse.ArgumentParser(description='Benchmark OmniSci batch and streaming table ingest')
parser.add_argument('-s','--host', help='OmniSci server address', default='localhost')
parser.add_argument('-p','--port', help='OmniSci server port', default='6273')
parser.add_argument('-d','--db', help='OmniSci database name', default='omnisci')
parser.add_argument('-u','--user', help='OmniSci user name', default='admin')
parser.add_argument('-w','--password', help='OmniSci password', default='<PASSWORD>')
parser.add_argument('-e','--max_rollback_epochs', help='Max Rollback Epochs', type=int, default=-1)
parser.add_argument('-t','--temp_table', help='Use temporary table', type=bool, default=False)
parser.add_argument('-r','--num_rows', help='Number of rows to benchmark with', type=int, default=10000)
return parser.parse_args(args)
class OmniCon:
def __init__(self, user, pw, dbname):
self.con = pymapd.connect(user=user, password=pw, dbname=dbname, host="localhost")
self.cursor = self.con.cursor()
def query(self, sql):
return self.cursor.execute(sql)
def create_table(omni_con, table_name, is_temporary=False, max_rollback_epochs=-1):
drop_sql = "DROP TABLE IF EXISTS " + table_name
optional_temp_stmt = "TEMPORARY" if is_temporary else ""
optional_max_rollback_stmt = "WITH (max_rollback_epochs={max_rollback_epochs})".format(max_rollback_epochs=max_rollback_epochs) if max_rollback_epochs >= 0 else ""
create_sql = "CREATE {optional_temp_stmt} TABLE {table_name} (a INTEGER, b INTEGER, c INTEGER, d INTEGER) {optional_max_rollback_stmt}".format(optional_temp_stmt = optional_temp_stmt, table_name=table_name, optional_max_rollback_stmt=optional_max_rollback_stmt)
omni_con.query(drop_sql)
omni_con.query(create_sql)
def gen_data(num_rows):
df = pd.DataFrame(np.random.randint(0,100,size=(num_rows, 4)), columns=['a','b','c','d'])
df = df.astype(np.int32)
return df
def bench_streaming_sql_inserts(omni_con, table_name, data):
num_rows = len(data.index)
base_insert_sql = "INSERT INTO " + table_name + "(a, b, c, d) VALUES ({0}, {1}, {2}, {3})"
insert_statements = []
for r in range(num_rows):
insert_statements.append(base_insert_sql.format(data.iat[r,0], data.iat[r,1], data.iat[r,2], data.iat[r,3]))
start_time = time.perf_counter()
for r in range(num_rows):
omni_con.query(insert_statements[r])
end_time = time.perf_counter()
time_diff = end_time - start_time
rows_per_second = num_rows / time_diff
print("Streaming – SQL Inserts: {0} rows in {1} seconds at {2} rows/sec".format(num_rows, time_diff, rows_per_second))
def bench_bulk_columnar(omni_con, table_name, data):
num_rows = len(data.index)
start_time = time.perf_counter()
omni_con.con.load_table_columnar(table_name, data, preserve_index=False)
end_time = time.perf_counter()
time_diff = end_time - start_time
rows_per_second = num_rows / time_diff
print("Bulk load – Columnar: {0} rows in {1} seconds at {2} rows/sec".format(num_rows, time_diff, rows_per_second))
def bench_bulk_arrow(omni_con, table_name, data):
num_rows = len(data.index)
arrow_data = pa.Table.from_pandas(data)
start_time = time.perf_counter()
omni_con.con.load_table_arrow(table_name, arrow_data, preserve_index=False)
end_time = time.perf_counter()
time_diff = end_time - start_time
rows_per_second = num_rows / time_diff
print("Bulk load – Arrow: {0} rows in {1} seconds at {2} rows/sec".format(num_rows, time_diff, rows_per_second))
def bench_streaming_columnar(omni_con, table_name, data):
num_rows = len(data.index)
start_time = time.perf_counter()
for r in range(num_rows):
row_df = data.iloc[r:r+1]
omni_con.con.load_table_columnar(table_name, row_df, preserve_index=False)
end_time = time.perf_counter()
time_diff = end_time - start_time
rows_per_second = num_rows / time_diff
print("Streaming – Columnar: {0} rows in {1} seconds at {2} rows/sec".format(num_rows, time_diff, rows_per_second))
def main(argv):
options = getOptions(argv)
omni_con = OmniCon(options.user, options.password, options.db)
data = gen_data(options.num_rows)
table_name = "stream_insert_sql"
create_table(omni_con, table_name, options.temp_table, options.max_rollback_epochs)
bench_streaming_sql_inserts(omni_con, table_name, data)
#Below is too slow to bench at any real scale
#table_name = "stream_columnar"
#create_table(omni_con, table_name, options.temp_table, options.max_rollback_epochs)
#bench_streaming_columnar(omni_con, table_name, data)
table_name = "bulk_columnar"
create_table(omni_con, table_name, options.temp_table, options.max_rollback_epochs)
bench_bulk_columnar(omni_con, table_name, data)
table_name = "bulk_arrow"
create_table(omni_con, table_name, options.temp_table, options.max_rollback_epochs)
bench_bulk_arrow(omni_con, table_name, data)
if __name__ == "__main__":
main(sys.argv[1:]) |
samples/aci-create-vmw-domain.py | richardstrnad/acitoolkit | 351 | 11171569 | #!/usr/bin/env python
################################################################################
# _ ____ ___ _____ _ _ _ _ #
# / \ / ___|_ _| |_ _|__ ___ | | | _(_) |_ #
# / _ \| | | | | |/ _ \ / _ \| | |/ / | __| #
# / ___ \ |___ | | | | (_) | (_) | | <| | |_ #
# ____ /_/ \_\____|___|___|_|\___/ \___/|_|_|\_\_|\__| #
# / ___|___ __| | ___ / ___| __ _ _ __ ___ _ __ | | ___ ___ #
# | | / _ \ / _` |/ _ \ \___ \ / _` | '_ ` _ \| '_ \| |/ _ \/ __| #
# | |__| (_) | (_| | __/ ___) | (_| | | | | | | |_) | | __/\__ \ #
# \____\___/ \__,_|\___| |____/ \__,_|_| |_| |_| .__/|_|\___||___/ #
# |_| #
################################################################################
# #
# Copyright (c) 2015 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
"""
It logs in to the APIC and will create a VMM Domain.
NOTE: additional configuration is likely required, as the newly created VMM domain will need to be associated
with an AEP for the ESX hosts, as well as attaching the ESX hosts to the DVS.
"""
import acitoolkit.acitoolkit as aci
# Define static values to pass (edit these for your environment)
VMM_TYPE = 'VMware'
DVS_NAME = 'aci-test-dvs'
ACI_USER = 'admin'
ACI_PASS = 'password'
APIC_IP = 'http://apic'
DATACENTER_NAME = 'DATACENTER' # Must match the data center name in vCenter
VCENTER_IP = '1.1.1.1'
VCENTER_USER = 'administrator'
VCENTER_CREDS = VCENTER_USER
VCENTER_PASS = '<PASSWORD>'
POOL_NAME = 'dvs-vlans'
ENCAP_TYPE = 'vlan'
VLAN_START = '3150'
VLAN_END = '3200'
POOL_MODE = 'dynamic'
def main():
"""
Main create VMM routine
:return: None
"""
# Get all the arguments
description = 'Create VMM Domain'
creds = aci.Credentials('apic', description)
args = creds.get()
# Login to the APIC
session = aci.Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
# Define dynamic vlan range
vlans = aci.NetworkPool(POOL_NAME, ENCAP_TYPE, VLAN_START, VLAN_END, POOL_MODE)
# Commit VLAN Range
vlanresp = session.push_to_apic(vlans.get_url(), vlans.get_json())
if not vlanresp.ok:
print('%% Error: Could not push configuration to APIC')
print(vlanresp.text)
# Create Credentials object
vcenter_creds = aci.VMMCredentials(VCENTER_CREDS, VCENTER_USER, VCENTER_PASS)
# Vswitch Info object
vswitch_info = aci.VMMvSwitchInfo(VMM_TYPE, DATACENTER_NAME, DVS_NAME)
# Create VMM object
vmm = aci.VMM(DVS_NAME, VCENTER_IP, vcenter_creds, vswitch_info, vlans)
# Commit Changes
resp = session.push_to_apic(vmm.get_url(), vmm.get_json())
if not resp.ok:
print('%% Error: Could not push configuration to APIC')
print(resp.text)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
tests/generators/merkle/main.py | jacobkaufmann/consensus-specs | 2,161 | 11171588 | <gh_stars>1000+
from eth2spec.test.helpers.constants import ALTAIR, MERGE
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
if __name__ == "__main__":
altair_mods = {key: 'eth2spec.test.altair.merkle.test_' + key for key in [
'single_proof',
]}
merge_mods = altair_mods
all_mods = {
ALTAIR: altair_mods,
MERGE: merge_mods,
}
run_state_test_generators(runner_name="merkle", all_mods=all_mods)
|
serieswatcher/sqlobject/tests/test_comparison.py | lightcode/SeriesWatcher | 303 | 11171603 | <gh_stars>100-1000
from sqlobject import *
from sqlobject.tests.dbtest import *
class TestComparison(SQLObject):
pass
def test_eq():
setupClass(TestComparison)
t1 = TestComparison()
t2 = TestComparison()
TestComparison._connection.cache.clear()
t3 = TestComparison.get(1)
t4 = TestComparison.get(2)
assert t1.id == t3.id
assert t2.id == t4.id
assert t1 is not t3
assert t2 is not t4
assert t1 == t3
assert t2 == t4
assert t1 <> t2
|
tests/trac/trac-0033/tread.py | eLBati/pyxb | 123 | 11171626 | # -*- coding: utf-8 -*-
from __future__ import print_function
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import time
import pyxb.binding.generate
import pyxb.utils.domutils
from pyxb.utils.six.moves import xrange
max_reps = 20
def buildTest (num_reps, constraint='minOccurs="0" maxOccurs="1"'):
edefs = []
cdefs = []
duse = []
for r in xrange(num_reps):
edefs.append('<xs:element name="rep%d" type="xs:string"/>' % (r,))
cdefs.append('<xs:element ref="rep%d" %s/>' % (r, constraint))
duse.append('<rep%d>text_%d</rep%d>' % (r, r, r))
schema = ''.join([ '''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">''',
"\n".join(edefs),
'''<xs:element name="collection">
<xs:complexType><xs:sequence>''',
"\n".join(cdefs),
'''</xs:sequence></xs:complexType>
</xs:element>
</xs:schema>''' ])
xmls = '<collection>' + ''.join(duse) + '</collection>'
return (schema, xmls)
for size in xrange(1, max_reps):
(schema, xmls) = buildTest(size)
t0 = time.time()
code = pyxb.binding.generate.GeneratePython(schema_text=schema)
t1 = time.time()
rv = compile(code, 'test', 'exec')
t2 = time.time()
eval(rv)
t3 = time.time()
#open('code.py', 'w').write(code)
#print xmls
ct0 = time.time()
doc = CreateFromDocument(xmls)
ct1 = time.time()
print("%d gen=%g cpl=%g ld=%g prs=%g" % (size, t1 - t0, t2 - t1, t3 - t2, ct1 - ct0))
# Should not take more than a second (really, less than 10ms)
assert (ct1 - ct0) < 1.0
#open('code.py', 'w').write(code)
|
Python3/68.py | rakhi2001/ecom7 | 854 | 11171632 | __________________________________________________________________________________________________
sample 20 ms submission
class Solution:
def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:
n = len(words)
if n == 0:
return []
res = []
cur, num_letters = [], 0
for w in words:
if len(cur) + len(w) + num_letters > maxWidth:
if len(cur) == 1:
res.append(cur[0] + ' '*(maxWidth - num_letters))
else:
num_space = maxWidth - num_letters
gap, remain_gap = divmod(num_space, len(cur) - 1)
for i in range(remain_gap):
cur[i] += ' '
res.append((' '*gap).join(cur))
cur, num_letters = [], 0
cur += [w]
num_letters += len(w)
res.append(' '.join(cur) + ' '*(maxWidth - num_letters - len(cur) + 1))
return res
__________________________________________________________________________________________________
sample 13064 kb submission
class Solution:
def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:
curLine = [];
curLineCharacterLength = 0;
lines = [];
output = [];
for word in words:
if len(curLine) == 0:
curLine.append(word);
curLineCharacterLength = len(word);
else:
if curLineCharacterLength + len(word) + 1 <= maxWidth:
curLine.append(" " + word);
curLineCharacterLength += len(word) + 1;
else:
lines.append(curLine);
curLine = [word];
curLineCharacterLength = len(word);
if len(curLine) > 0:
lines.append(curLine);
for i in range(len(lines)):
line = lines[i];
if i < len(lines) - 1: #not last line
lineWordLength = 0;
for word in line:
lineWordLength += len(word);
remainingSpace = maxWidth - lineWordLength;
remainingSpacePer = remainingSpace//(len(line) - 1) if len(line)-1 > 0 else remainingSpace;
remainingPercentSpace = remainingSpace % (len(line) - 1) if len(line)-1 > 0 else 0;
lineString = "";
for j in range(len(line)):
word = line[j];
lineString += word;
if j < len(line) - 1 or len(line) == 1:
lineString += "".join(remainingSpacePer * [" "]);
if j < remainingPercentSpace:
lineString += " ";
output.append(lineString);
else: #last line, handle differently
lineWordLength = 0;
for word in line:
lineWordLength += len(word);
remainingSpace = maxWidth - lineWordLength;
lineString = "".join(line) + "".join(remainingSpace * [" "]);
output.append(lineString);
return output;
__________________________________________________________________________________________________
|
notebooks/Ch03_Processing_Wrangling_and_Visualizing_Data/matplotlib_viz.py | baoqt2/practical-machine-learning-with-python | 1,989 | 11171634 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 11 09:56:39 2017
@author: <NAME>
"""
"""
This script visualizes data using matplotlib
``Execute``
$ python matplotlib_viz.py
"""
import numpy as np
import matplotlib.pyplot as plt
if __name__=='__main__':
# sample plot
x = np.linspace(-10, 10, 50)
y=np.sin(x)
plt.plot(x,y)
plt.title('Sine Curve using matplotlib')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.show()
# figure
plt.figure(1)
plt.plot(x,y)
plt.title('Fig1: Sine Curve')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.show()
plt.figure(2)
y=np.cos(x)
plt.plot(x,y)
plt.title('Fig2: Cosine Curve')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.show()
### subplot
# fig.add_subplot
y = np.sin(x)
figure_obj = plt.figure()
ax1 = figure_obj.add_subplot(2,2,1)
ax1.plot(x,y)
ax2 = figure_obj.add_subplot(2,2,2)
ax3 = figure_obj.add_subplot(2,2,3)
ax4 = figure_obj.add_subplot(2,2,4)
ax4.plot(x+10,y)
plt.show()
# plt.subplots
fig, ax_list = plt.subplots(2,1,sharex=True)
y= np.sin(x)
ax_list[0].plot(x,y)
y= np.cos(x)
ax_list[1].plot(x,y)
plt.show()
# plt.subplot (creates figure and axes objects automatically)
plt.subplot(2,2,1)
y = np.sin(x)
plt.plot(x,y)
plt.subplot(2,2,2)
y = np.cos(x)
plt.plot(x,y)
plt.subplot(2,1,2)
y = np.tan(x)
plt.plot(x,y)
plt.show()
# subplot2grid
y = np.abs(x)
z = x**2
plt.subplot2grid((4,3), (0, 0), rowspan=4, colspan=2)
plt.plot(x, y,'b',x,z,'r')
ax2 = plt.subplot2grid((4,3), (0, 2),rowspan=2)
plt.plot(x, y,'b')
plt.setp(ax2.get_xticklabels(), visible=False)
plt.subplot2grid((4,3), (2, 2), rowspan=2)
plt.plot(x, z,'r')
plt.show()
### formatting
y = x
# color
ax1 = plt.subplot(611)
plt.plot(x,y,color='green')
ax1.set_title('Line Color')
plt.setp(ax1.get_xticklabels(), visible=False)
# linestyle
# linestyles -> '-','--','-.', ':', 'steps'
ax2 = plt.subplot(612,sharex=ax1)
plt.plot(x,y,linestyle='--')
ax2.set_title('Line Style')
plt.setp(ax2.get_xticklabels(), visible=False)
# marker
# markers -> '+', 'o', '*', 's', ',', '.', etc
ax3 = plt.subplot(613,sharex=ax1)
plt.plot(x,y,marker='*')
ax3.set_title('Point Marker')
plt.setp(ax3.get_xticklabels(), visible=False)
# line width
ax4 = plt.subplot(614,sharex=ax1)
line = plt.plot(x,y)
line[0].set_linewidth(3.0)
ax4.set_title('Line Width')
plt.setp(ax4.get_xticklabels(), visible=False)
# alpha
ax5 = plt.subplot(615,sharex=ax1)
alpha = plt.plot(x,y)
alpha[0].set_alpha(0.3)
ax5.set_title('Line Alpha')
plt.setp(ax5.get_xticklabels(), visible=False)
# combine linestyle
ax6 = plt.subplot(616,sharex=ax1)
plt.plot(x,y,'b^')
ax6.set_title('Styling Shorthand')
fig = plt.gcf()
fig.set_figheight(15)
plt.show()
# legends
y = x**2
z = x
plt.plot(x,y,'g',label='y=x^2')
plt.plot(x,z,'b:',label='y=x')
plt.legend(loc="best")
plt.title('Legend Sample')
plt.show()
# legend with latex formatting
plt.plot(x,y,'g',label='$y = x^2$')
plt.plot(x,z,'b:',linewidth=3,label='$y = x^2$')
plt.legend(loc="best",fontsize='x-large')
plt.title('Legend with LaTEX formatting')
plt.show()
## axis controls
# secondary y-axis
fig, ax1 = plt.subplots()
ax1.plot(x,y,'g')
ax1.set_ylabel(r"primary y-axis", color="green")
ax2 = ax1.twinx()
ax2.plot(x,z,'b:',linewidth=3)
ax2.set_ylabel(r"secondary y-axis", color="blue")
plt.title('Secondary Y Axis')
plt.show()
# ticks
y = np.log(x)
z = np.log2(x)
w = np.log10(x)
plt.plot(x,y,'r',x,z,'g',x,w,'b')
plt.title('Default Axis Ticks')
plt.show()
# axis-controls
plt.plot(x,y,'r',x,z,'g',x,w,'b')
# values: tight, scaled, equal,auto
plt.axis('tight')
plt.title('Tight Axis')
plt.show()
# manual
plt.plot(x,y,'r',x,z,'g',x,w,'b')
plt.axis([0,2,-1,2])
plt.title('Manual Axis Range')
plt.show()
# Manual ticks
plt.plot(x, y)
ax = plt.gca()
ax.xaxis.set_ticks(np.arange(-2, 2, 1))
plt.grid(True)
plt.title("Manual ticks on the x-axis")
plt.show()
# minor ticks
plt.plot(x, z)
plt.minorticks_on()
ax = plt.gca()
ax.yaxis.set_ticks(np.arange(0, 5))
ax.yaxis.set_ticklabels(["min", 2, 4, "max"])
plt.title("Minor ticks on the y-axis")
plt.show()
# scaling
plt.plot(x, y)
ax = plt.gca()
# values: log, logit, symlog
ax.set_yscale("log")
plt.grid(True)
plt.title("Log Scaled Axis")
plt.show()
# annotations
y = x**2
min_x = 0
min_y = min_x**2
plt.plot(x, y, "b-", min_x, min_y, "ro")
plt.axis([-10,10,-25,100])
plt.text(0, 60, "Parabola\n$y = x^2$", fontsize=15, ha="center")
plt.text(min_x, min_y+2, "Minima", ha="center")
plt.text(min_x, min_y-6, "(%0.1f, %0.1f)"%(min_x, min_y), ha='center',color='gray')
plt.title("Annotated Plot")
plt.show()
# global formatting params
params = {'legend.fontsize': 'large',
'figure.figsize': (10, 10),
'axes.labelsize': 'large',
'axes.titlesize':'large',
'xtick.labelsize':'large',
'ytick.labelsize':'large'}
plt.rcParams.update(params)
# saving
#plt.savefig("sample_plot.png", transparent=True) |
corehq/apps/accounting/migrations/0051_hubspot_restrictions.py | dimagilg/commcare-hq | 471 | 11171655 | <reponame>dimagilg/commcare-hq
# Generated by Django 2.2.16 on 2020-10-15 17:48
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounting', '0050_app_user_profiles'),
]
operations = [
migrations.AddField(
model_name='billingaccount',
name='block_email_domains_from_hubspot',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=253, null=True), default=list, blank=True, null=True, size=None),
),
migrations.AddField(
model_name='billingaccount',
name='block_hubspot_data_for_all_users',
field=models.BooleanField(default=False),
),
]
|
draw_pic.py | wu546300070/weiboanalysis | 685 | 11171713 | # SVM 准确率
from pylab import mpl
import matplotlib.pyplot as plt
mpl.rcParams['font.sans-serif'] = ['SimHei']
fig, ax = plt.subplots()
x = [i for i in range(0, 21)]
array = [
0.850000, 0.825000, 0.818750, 0.800000, 0.843750, 0.812500, 0.775000, 0.831250, 0.850000, 0.850000, 0.850000,
0.806250, 0.875000, 0.850000, 0.831250, 0.850000, 0.875000, 0.856250, 0.837500, 0.862500, 0.842300
]
plt.plot(x, array, "x-", label="正确率")
plt.plot(x, [0.8375] * 21,"+-", label="平均正确率")
plt.xlabel("次数")
plt.ylabel("准确率")
plt.legend(bbox_to_anchor=(1.0, 1), loc=1, borderaxespad=0.)
plt.title("SVC")
plt.show()
# 三类
# 1.画图,朴素贝叶斯,用自己实现的和sklearn中自带的MultinomialNB进行错误率比较
# from pylab import mpl
# import matplotlib.pyplot as plt
# import numpy as np
# mpl.rcParams['font.sans-serif'] = ['SimHei']
# migtime = [0.37, 0.33, 0.32, 0.33, 0.35, 0.3, 0.32, 0.32, 0.35, 0.29]
# mid=[1-np.sum(migtime)/10]
# # delay = [0.44, 0.48, 0.38, 0.35, 0.36, 0.34, 0.35, 0.39, 0.40, 0.35]
# fig, ax = plt.subplots()
# plt.xlabel('次数')
# plt.ylabel('准确率')
# x = [i for i in range(1, 11)]
# plt.plot(x, [1-t for t in migtime], "x-", label="正确率")
# plt.plot(x,10*mid,label="平均正确率")
# plt.title("多项式朴素贝叶斯")
# # plt.plot(x, delay, "+-", label="实现代码")
# plt.grid(True)
# plt.legend(bbox_to_anchor=(1.0, 1), loc=1, borderaxespad=0.)
# plt.show()
# 2.Adaboost的50次迭代过程错误率
# from pylab import mpl
# import matplotlib.pyplot as plt
# import numpy as np
# mpl.rcParams['font.sans-serif'] = ['SimHei']
# x = [i for i in range(1, 51)]
# array = [0.270000, 0.200000, 0.170000, 0.190000, 0.170000, 0.160000, 0.190000, 0.160000, 0.180000, 0.180000, 0.170000,
# 0.160000, 0.180000, 0.170000, 0.180000, 0.170000, 0.170000, 0.160000, 0.190000, 0.170000, 0.170000, 0.170000,
# 0.170000, 0.170000, 0.190000, 0.160000, 0.170000, 0.170000, 0.180000, 0.170000, 0.180000, 0.160000, 0.170000,
# 0.180000, 0.180000, 0.160000, 0.180000, 0.160000, 0.180000, 0.180000, 0.170000, 0.160000, 0.180000, 0.170000,
# 0.180000, 0.170000, 0.170000, 0.160000, 0.190000, 0.170000]
# plt.plot(x, array,label="错误率")
# plt.xlabel("次数")
# plt.ylabel("错误率")
# plt.xlim(0.5,50)
# plt.title("AdaBoost二分类错误率")
# plt.show()
|
library/oci_file_system_facts.py | slmjy/oci-ansible-modules | 106 | 11171727 | <reponame>slmjy/oci-ansible-modules<filename>library/oci_file_system_facts.py
#!/usr/bin/python
# Copyright (c) 2018, 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_file_system_facts
short_description: Fetches details of the OCI File System instances
description:
- Fetches details of the OCI File System instances.
version_added: "2.5"
options:
compartment_id:
description: Identifier of the compartment from which details of all OCI File System instances
must be fetched
required: false
availability_domain:
description: Availability domain from which details of all OCI File System instances
must be fetched.
required: false
file_system_id:
description: Identifier of the File System whose details needs to be fetched.
required: false
aliases: ['id']
lifecycle_state:
description: A filter to only return resources that match the given lifecycle state. The state value is
case-insensitive.
required: false
choices: ['CREATING', 'ACTIVE', 'DELETING', 'DELETED', 'FAILED']
author:
- "<NAME>(@debayan_gupta)"
extends_documentation_fragment: [ oracle, oracle_display_name_option ]
"""
EXAMPLES = """
# Fetch File System
- name: List all File System in a compartment and availability domain
oci_file_system_facts:
compartment_id: 'ocid1.compartment..xxxxxEXAMPLExxxxx'
availability_domain: 'IwGV:US-EXAMPLE-AD-1'
# Fetch File System, filtered by Display Name
- name: List all File System in a compartment and availability domain, filtered by Display Name
oci_file_system_facts:
compartment_id: 'ocid1.compartment..xxxxxEXAMPLExxxxx'
availability_domain: 'IwGV:US-EXAMPLE-AD-1'
display_name: 'ansible-mount-target'
# Fetch File System, filtered by lifecycle state
- name: List all File System in a compartment and availability domain, filtered by lifecycle state
oci_file_system_facts:
compartment_id: 'ocid1.compartment..xxxxxEXAMPLExxxxx'
availability_domain: 'IwGV:US-EXAMPLE-AD-1'
lifecycle_state: 'CREATING'
# Fetch specific File System
- name: List a specific File System
oci_file_system_facts:
file_system_id: 'ocid1.filesystem..xxxxxEXAMPLExxxxx'
"""
RETURN = """
file_systems:
description: Attributes of the fetchedd File System.
returned: success
type: complex
contains:
compartment_id:
description: The identifier of the compartment containing the File System
returned: always
type: string
sample: ocid1.compartment.oc1.xzvf..xxxxxEXAMPLExxxxx
availability_domain:
description: The availability domain the File System is in.
returned: always
type: string
sample: IwGV:US-EXAMPLE-AD-1
display_name:
description: The user-friendly name for the File System.
returned: always
type: string
sample: ansible-file-system
defined_tags:
description: Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see
U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
returned: always
type: dict
sample: {"Department": "Finance"}
freeform_tags:
description: Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see
U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
returned: always
type: dict
sample: {"Operations": {"CostCenter": "42"}}
id:
description: The identifier of the File System
returned: always
type: string
sample: ocid1.filesystem.oc1.xzvf..xxxxxEXAMPLExxxxx
lifecycle_state:
description: The current state of the File System.
returned: always
type: string
sample: ACTIVE
metered_bytes:
description: The number of bytes consumed by the file system, including any snapshots. This number
reflects the metered size of the file system and is updated asynchronously with respect
to updates to the file system.
returned: always
type: int
sample: 582
time_created:
description: Date and time when the File System was created, in
the format defined by RFC3339
returned: always
type: datetime
sample: 2018-10-16T09:43:00.051000+00:00
sample: [{
"availability_domain":"IwGV:US-EXAMPLE-AD-1",
"compartment_id":"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
"display_name":"ansible_file_system",
"defined_tags":{
"ansible_tag_namespace_integration_test_1":{
"ansible_tag_1":"initial"
}
},
"freeform_tags":{
"system_license":"trial"
},
"id":"ocid1.filesystem.oc1.iad.xxxxxEXAMPLExxxxx",
"lifecycle_state":"ACTIVE",
"metered_bytes":100,
"time_created":"2018-10-16T09:43:00.051000+00:00"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
from oci.file_storage.file_storage_client import FileStorageClient
from oci.exceptions import ServiceError
from oci.util import to_dict
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
logger = None
def list_file_systems(file_storage_client, module):
result = dict(file_systems="")
compartment_id = module.params.get("compartment_id")
availability_domain = module.params.get("availability_domain")
file_system_id = module.params.get("file_system_id")
try:
if compartment_id and availability_domain:
get_logger().debug(
"Listing all File Systems under compartment %s and availability domain %s",
compartment_id,
availability_domain,
)
optional_list_method_params = ["display_name", "lifecycle_state"]
optional_kwargs = dict(
(param, module.params[param])
for param in optional_list_method_params
if module.params.get(param) is not None
)
existing_file_systems_summary = to_dict(
oci_utils.list_all_resources(
file_storage_client.list_file_systems,
compartment_id=compartment_id,
availability_domain=availability_domain,
**optional_kwargs
)
)
existing_file_systems = [
oci_utils.call_with_backoff(
file_storage_client.get_file_system,
file_system_id=file_system["id"],
).data
for file_system in existing_file_systems_summary
]
elif file_system_id:
get_logger().debug("Listing File System %s", file_system_id)
response = oci_utils.call_with_backoff(
file_storage_client.get_file_system, file_system_id=file_system_id
)
existing_file_systems = [response.data]
else:
module.fail_json(
msg="No value provided for either compartment_id and availability_domain"
+ "or file_system_id"
)
except ServiceError as ex:
get_logger().error("Unable to list File Systems due to %s", ex.message)
module.fail_json(msg=ex.message)
result["file_systems"] = to_dict(existing_file_systems)
return result
def set_logger(input_logger):
global logger
logger = input_logger
def get_logger():
return logger
def main():
logger = oci_utils.get_logger("oci_file_system_facts")
set_logger(logger)
module_args = oci_utils.get_facts_module_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=False),
availability_domain=dict(type="str", required=False),
file_system_id=dict(type="str", required=False, aliases=["id"]),
lifecycle_state=dict(
type="str",
required=False,
choices=["CREATING", "ACTIVE", "DELETING", "DELETED", "FAILED"],
),
)
)
module = AnsibleModule(
argument_spec=module_args,
mutually_exclusive=[
["compartment_id", "file_system_id"],
["availability_domain", "file_system_id"],
],
)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module")
file_storage_client = oci_utils.create_service_client(module, FileStorageClient)
result = list_file_systems(file_storage_client, module)
module.exit_json(**result)
if __name__ == "__main__":
main()
|
test/test_timer.py | Scartography/mapchete | 161 | 11171734 | from mapchete import Timer
def test_timer():
timer1 = Timer(elapsed=1000)
timer2 = Timer(elapsed=2000)
timer3 = Timer(elapsed=1000)
assert timer1 < timer2
assert timer1 <= timer2
assert timer2 > timer3
assert timer2 >= timer3
assert timer1 == timer3
assert timer1 != timer2
assert timer1 + timer3 == timer2
assert timer2 - timer3 == timer1
timer = Timer(elapsed=60)
assert str(timer) == "1m 0s"
timer = Timer(elapsed=60)
assert str(timer) == "1m 0s"
timer = Timer(elapsed=3700)
assert str(timer) == "1h 1m 40s"
assert "Timer" in timer.__repr__()
|
backend/category/ssh/ssh_operation.py | zerlee/open-cmdb | 126 | 11171748 | <filename>backend/category/ssh/ssh_operation.py
import json
import os
from django.conf import settings
from rest_framework.exceptions import ParseError
from category.models import Server
from .ssh_connection import SSHConnection
class SSHOperation(object):
def __init__(self, host, port, user):
self.host = host
self.port = port
self.user = user
self.cron_dir = '/var/spool/cron/'
@staticmethod
def __operate(action):
try:
return action
except Exception as e:
raise ParseError(e)
def __conn(self):
ssh_conn = SSHConnection(host=self.host, port=self.port, user=self.user, key_file=settings.KEY_FILE)
return self.__operate(ssh_conn)
def __upload(self, path_local, path_remote):
conn = self.__conn()
return self.__operate(conn.upload(path_local, path_remote))
def __download(self, path_local, path_remote):
conn = self.__conn()
return self.__operate(conn.download(path_local, path_remote))
def __cmd(self, command):
conn = self.__conn()
return self.__operate(conn.cmd(command))
def fetch_host_info(self, source_data, ssh_user):
path_local = 'scripts/sys_info'
path_remote = '/tmp/sys_info'
self.__upload(path_local, path_remote)
command = "chmod +x {} && sudo {} {}".format(path_remote, path_remote, ssh_user)
data = self.__cmd(command)
if not data:
raise ParseError('未获取到客户机数据')
data = json.loads(data)
source_data['uuid'] = data['uuid']
source_data['system_product'] = data['system_product']
source_data['disk'] = data['disk']
source_data['cpu'] = data['cpu']['version']
source_data['memory'] = data['memory']
source_data['name'] = data['name']
return source_data
def fetch_cron_content(self, filename):
file_path = '{}{}'.format(self.cron_dir, filename)
command = "sudo cat {}".format(file_path)
data = self.__cmd(command)
return data
def fetch_cron_log(self, user, count=100):
command = "sudo tail -{} /var/log/cron|grep CROND|grep {}".format(count, user)
data = self.__cmd(command)
return data
def update_cron_file(self, filename, content):
tmp_file = '/tmp/{}'.format(filename)
remote_path = '{}{}'.format(self.cron_dir, filename)
with open(tmp_file, 'w') as f:
f.write(content)
self.__upload(tmp_file, tmp_file)
command = "sudo mv {} {} && sudo chmod 644 {}".format(tmp_file, remote_path, remote_path)
self.__cmd(command)
os.remove(tmp_file)
def sync_cron_file(self, filename, servers):
content = self.fetch_cron_content(filename)
servers = servers.get('servers')
ips = []
for pk in servers:
instance = Server.objects.get(pk=pk)
username = instance.ssh_user.name
ssh_operation = SSHOperation(instance.ssh_ip, instance.ssh_port, username)
ssh_operation.update_cron_file(username, content)
ips.append(instance.ssh_ip)
return ips
|
climlab/process/implicit.py | nfeldl/climlab | 160 | 11171764 | from __future__ import division
from climlab.process.time_dependent_process import TimeDependentProcess
class ImplicitProcess(TimeDependentProcess):
"""A parent class for modules that use implicit time discretization.
During initialization following attributes are intitialized:
:ivar time_type: is set to ``'implicit'``
:vartype time_type: str
:ivar adjustment: the model state adjustments due to this implicit
subprocess
:vartype adjustment: dict
"""
def __init__(self, **kwargs):
super(ImplicitProcess, self).__init__(**kwargs)
self.time_type = 'implicit'
self.adjustment = {}
def _compute(self):
"""Computes the state variable tendencies in time for implicit processes.
To calculate the new state the :func:`_implicit_solver()` method is
called for daughter classes. This however returns the new state of the
variables, not just the tendencies. Therefore, the adjustment is
calculated which is the difference between the new and the old state
and stored in the object's attribute adjustment.
Calculating the new model states through solving the matrix problem
already includes the multiplication with the timestep. The derived
adjustment is divided by the timestep to calculate the implicit
subprocess tendencies, which can be handeled by the
:func:`~climlab.process.time_dependent_process.TimeDependentProcess.compute`
method of the parent
:class:`~climlab.process.time_dependent_process.TimeDependentProcess` class.
:ivar dict adjustment: holding all state variables' adjustments
of the implicit process which are the
differences between the new states (which have
been solved through matrix inversion) and the
old states.
"""
newstate = self._implicit_solver()
adjustment = {}
tendencies = {}
for name, var in self.state.items():
adjustment[name] = newstate[name] - var
tendencies[name] = adjustment[name] / self.timestep
# express the adjustment (already accounting for the finite time step)
# as a tendency per unit time, so that it can be applied along with explicit
self.adjustment = adjustment
self._update_diagnostics(newstate)
return tendencies
def _update_diagnostics(self, newstate):
'''This method is called each timestep after the new state is computed
with the implicit solver. Daughter classes can implement this method to
compute any diagnostic quantities using the new state.'''
pass
|
tests/r/test_sp500.py | hajime9652/observations | 199 | 11171778 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.sp500 import sp500
def test_sp500():
"""Test module sp500.py by downloading
sp500.csv and testing shape of
extracted data has 2783 rows and 1 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = sp500(test_path)
try:
assert x_train.shape == (2783, 1)
except:
shutil.rmtree(test_path)
raise()
|
.github/CI_MISC/pre_qt_universal_build.py | Croden1999/Play- | 1,720 | 11171811 | <gh_stars>1000+
import sys
import os
import shutil
import glob
def fix_id(lib_name, is_fw):
if(lib_name.count("HOMEBREW_PREFIX") == 0):
return ""
if(is_fw):
lib_name = "/".join(lib_name.split(" (c")[0].strip().split("/")[4:])
new_lib_path = f"@rpath/{lib_name}"
return f"-add_rpath @loader_path/../../../ -id {new_lib_path}"
else:
lib_name = lib_name.split(" (c")[0].strip().split("/")[-1]
return f"-add_rpath @loader_path/../../lib -id {lib_name}"
def fix_framework(data):
data = [lib_name.strip().split(" (c")[0].strip() for lib_name in data if lib_name.count("HOMEBREW_CELLAR")]
l = []
for lib_name in data:
new_lib_name = "/".join(lib_name.split("/")[4:])
new_lib_path = f"@rpath/{new_lib_name}"
l.append(f"-change {lib_name} {new_lib_path}")
return " ".join(l).strip()
def fix_library(fw_path, is_fw):
stream = os.popen(f"otool -L {fw_path}")
output = stream.read()
data = output.split('\n')[1:]
change_name = fix_framework(data)
change_id = fix_id(data[0], is_fw)
args = f" {change_name} {change_id} {fw_path}"
if(change_id or change_name):
os.system(f"install_name_tool {args}")
print(f"install_name_tool {args}")
def process_framework():
framework_list = glob.glob(os.path.join(qt_base, "lib_orig", "Qt*"))
for orig_fw_path in framework_list:
other_fw_path = orig_fw_path.replace("lib_orig", "lib").replace(qt_base, qt_base_2)
if(os.path.isdir(other_fw_path)):
fw_name = orig_fw_path.split("/")[-1].split(".")[0]
if(os.path.isfile(os.path.join(orig_fw_path, "Versions", "5", fw_name))):
new_path = shutil.copytree(orig_fw_path, orig_fw_path.replace("lib_orig", "lib"), symlinks=True)
os.remove(os.path.join(new_path, "Versions", "5", fw_name))
print(f"Processing {fw_name}.framework")
fix_library(f"{orig_fw_path}/Versions/5/{fw_name}", True)
fix_library(f"{other_fw_path}/Versions/5/{fw_name}", True)
os.system(f"lipo -create -output {new_path}/Versions/5/{fw_name} {orig_fw_path}/Versions/5/{fw_name} {other_fw_path}/Versions/5/{fw_name}")
else:
src = os.path.join(qt_base, "lib_orig", f"{fw_name}.framework")
dst = os.path.join(qt_base, "lib", f"{fw_name}.framework")
if(os.path.isdir(src)):
print(f"Processing {fw_name}.framework (Header Only)")
shutil.copytree(src, dst)
def process_static_libs():
static_lib_list = glob.glob(os.path.join(qt_base, "lib_orig", "lib*.a"))
for orig_lib_path in static_lib_list:
other_lib_path = orig_lib_path.replace("lib_orig", "lib").replace(qt_base, qt_base_2)
if(os.path.isfile(other_lib_path)):
new_lib_path = orig_lib_path.replace("lib_orig", "lib")
lib_name = orig_lib_path.split("/")[-1]
print(f"Processing {lib_name}")
prl_src = orig_lib_path[:-1] + "prl"
prl_dst = prl_src.replace("lib_orig", "lib")
shutil.copy(prl_src, prl_dst)
os.system(f"lipo -create -output {new_lib_path} {orig_lib_path} {other_lib_path}")
def process_plugins():
plugin_list = glob.glob(os.path.join(qt_base, "plugins_orig", "*", "*.dylib"))
for orig_plugin_path in plugin_list:
other_plugin_path = orig_plugin_path.replace("plugins_orig", "plugins").replace(qt_base, qt_base_2)
if(os.path.isfile(other_plugin_path)):
plugin_name = orig_plugin_path.split("/")[-1]
new_plugin_path = orig_plugin_path.replace("plugins_orig", "plugins")
os.makedirs(os.path.dirname(new_plugin_path), exist_ok=True)
print(f"Processing {plugin_name}")
fix_library(orig_plugin_path, False)
fix_library(other_plugin_path, False)
os.system(f"lipo -create -output {new_plugin_path} {orig_plugin_path} {other_plugin_path}")
def prepare_folders():
if(not os.path.exists(lib_orig)):
shutil.move(os.path.join(qt_base, "lib"), lib_orig)
if(os.path.exists(os.path.join(qt_base, "lib"))):
shutil.rmtree(os.path.join(qt_base, "lib"))
if(not os.path.exists(plugins_orig)):
shutil.move(os.path.join(qt_base, "plugins"), plugins_orig)
if(os.path.exists(os.path.join(qt_base, "plugins"))):
shutil.rmtree(os.path.join(qt_base, "plugins"))
os.makedirs(os.path.join(qt_base, "lib"))
os.makedirs(os.path.join(qt_base, "plugins"))
for name in ["cmake", "metatypes", "pkgconfig"]:
shutil.copytree( os.path.join(lib_orig, name), os.path.join(qt_base, "lib", name))
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) != 2:
print("Usage: {} QT_BASE QT_2ND_BASE\n\n".format(sys.argv[0]))
print("e.g: {} ./qt_intel/5.15.2/ ./qt_arm64/5.12.2/".format(sys.argv[0]))
exit(0)
qt_base = os.path.abspath(args[0])
qt_base_2 = os.path.abspath(args[1])
lib_orig = os.path.join(qt_base, "lib_orig")
plugins_orig = os.path.join(qt_base, "plugins_orig")
prepare_folders()
process_framework()
process_static_libs()
process_plugins()
|
monitoring/grafana/grr_grafanalib_dashboards/config.py | tsehori/grr | 4,238 | 11171812 | from grr_grafanalib_dashboards import reusable_panels
# The data source names are specified after Grafana is set up
# and it can be visited at localhost:3000.
# In GRR Monitoring docs, we suggest naming the data sources "grr-server"
# and "grrafana" respectively, but if it's not the case, change it here.
# For reference, take a look at the docs here:
# https://grr-doc.readthedocs.io/en/latest/maintaining-and-tuning/monitoring.html#example-visualization-and-alerting-setup
GRAFANA_DATA_SOURCE = "grr-server"
CLIENT_LOAD_STATS_DATA_SOURCE = "grrafana"
# An alert will be fired if the number of active processes (of any
# GRR server component) is below this number.
# This alert will be triggered once this condition holds for 10 seconds.
ACTIVE_PROCESSES_ALERTING_CONDITION = 1
|
setup.py | thavelick/summarize | 175 | 11171820 | #!/usr/bin/env python
# http://docs.python.org/2/distutils/setupscript.html
from distutils.core import setup
setup(
name='summarize',
version='20121029',
# ...
py_modules=['summarize'],
# ...
)
|
beautifultable/beautifultable.py | onmeac/beautifultable | 137 | 11171847 | <gh_stars>100-1000
"""This module provides BeautifulTable class
It is intended for printing Tabular data to terminals.
Example
-------
>>> from beautifultable import BeautifulTable
>>> table = BeautifulTable()
>>> table.columns.header = ['1st column', '2nd column']
>>> for i in range(5):
... table.rows.apppend([i, i*i])
...
>>> print(table)
+------------+------------+
| 1st column | 2nd column |
+------------+------------+
| 0 | 0 |
+------------+------------+
| 1 | 1 |
+------------+------------+
| 2 | 4 |
+------------+------------+
| 3 | 9 |
+------------+------------+
| 4 | 16 |
+------------+------------+
"""
from __future__ import division, unicode_literals
import copy
import csv
import warnings
from . import enums
from .utils import (
pre_process,
termwidth,
deprecated,
deprecated_param,
deprecation_message,
ensure_type,
)
from .compat import basestring, Iterable, to_unicode
from .base import BTBaseList
from .helpers import (
BTRowCollection,
BTColumnCollection,
BTRowHeader,
BTColumnHeader,
)
__all__ = [
"BeautifulTable",
"BTRowCollection",
"BTColumnCollection",
"BTRowHeader",
"BTColumnHeader",
"BTBorder",
]
class BTBorder(object):
"""Class to control how each section of the table's border is rendered.
To disable a behaviour, just set its corresponding attribute
to an empty string
Attributes
----------
top : str
Character used to draw the top border.
left : str
Character used to draw the left border.
bottom : str
Character used to draw the bottom border.
right : str
Character used to draw the right border.
top_left : str
Left most character of the top border.
bottom_left : str
Left most character of the bottom border.
bottom_right : str
Right most character of the bottom border.
top_right : str
Right most character of the top border.
header_left : str
Left most character of the header separator.
header_right : str
Right most character of the header separator.
top_junction : str
Junction character for top border.
left_junction : str
Junction character for left border.
bottom_junction : str
Junction character for bottom border.
right_junction : str
Junction character for right border.
"""
def __init__(
self,
top,
left,
bottom,
right,
top_left,
bottom_left,
bottom_right,
top_right,
header_left,
header_right,
top_junction,
left_junction,
bottom_junction,
right_junction,
):
self.top = left
self.left = right
self.bottom = top
self.right = bottom
self.top_left = top_left
self.bottom_left = bottom_left
self.bottom_right = bottom_right
self.top_right = top_right
self.header_left = header_left
self.header_right = header_right
self.top_junction = top_junction
self.left_junction = left_junction
self.bottom_junction = bottom_junction
self.right_junction = right_junction
def _make_getter(attr):
return lambda self: getattr(self, attr)
def _make_setter(attr):
return lambda self, value: setattr(
self, attr, ensure_type(value, basestring)
)
for prop, attr in [
(x, "_{}".format(x))
for x in (
"top",
"left",
"bottom",
"right",
"top_left",
"bottom_left",
"bottom_right",
"top_right",
"header_left",
"header_right",
"top_junction",
"left_junction",
"bottom_junction",
"right_junction",
)
]:
setattr(BTBorder, prop, property(_make_getter(attr), _make_setter(attr)))
class BTTableData(BTBaseList):
def __init__(self, table, value=None):
if value is None:
value = []
self._table = table
self._value = value
def _get_canonical_key(self, key):
return self._table.rows._canonical_key(key)
def _get_ideal_length(self):
pass
class BeautifulTable(object):
"""Utility Class to print data in tabular format to terminal.
Parameters
----------
maxwidth: int, optional
maximum width of the table in number of characters. this is ignored
when manually setting the width of the columns. if this value is too
low with respect to the number of columns and width of padding, the
resulting table may override it(default 80).
default_alignment : int, optional
Default alignment for new columns(default beautifultable.ALIGN_CENTER).
default_padding : int, optional
Default width of the left and right padding for new columns(default 1).
precision : int, optional
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
serialno : bool, optional
If true, a column will be rendered with serial numbers(**DEPRECATED**).
serialno_header: str, optional
The header of the serial number column if rendered(**DEPRECATED**).
detect_numerics : bool, optional
Whether numeric strings should be automatically detected(Default True).
sign : SignMode, optional
Parameter to control how signs in numeric data are displayed.
(default beautifultable.SM_MINUS).
Attributes
----------
precision : int
All float values will have maximum number of digits after the decimal,
capped by this value(Default 3).
detect_numerics : bool
Whether numeric strings should be automatically detected(Default True).
"""
@deprecated_param("1.0.0", "1.2.0", "sign_mode", "sign")
@deprecated_param("1.0.0", "1.2.0", "numeric_precision", "precision")
@deprecated_param("1.0.0", "1.2.0", "max_width", "maxwidth")
@deprecated_param("1.0.0", "1.2.0", "serialno")
@deprecated_param("1.0.0", "1.2.0", "serialno_header")
def __init__(
self,
maxwidth=80,
default_alignment=enums.ALIGN_CENTER,
default_padding=1,
precision=3,
serialno=False,
serialno_header="SN",
detect_numerics=True,
sign=enums.SM_MINUS,
**kwargs
):
kwargs.setdefault("max_width", None)
if kwargs["max_width"] is not None:
maxwidth = kwargs["max_width"]
kwargs.setdefault("numeric_precision", None)
if kwargs["numeric_precision"] is not None:
precision = kwargs["numeric_precision"]
kwargs.setdefault("sign_mode", None)
if kwargs["sign_mode"] is not None:
sign = kwargs["sign_mode"]
self.precision = precision
self._serialno = serialno
self._serialno_header = serialno_header
self.detect_numerics = detect_numerics
self._sign = sign
self.maxwidth = maxwidth
self._ncol = 0
self._data = BTTableData(self)
self.rows = BTRowCollection(self)
self.columns = BTColumnCollection(
self, default_alignment, default_padding
)
self._header_separator = ""
self._header_junction = ""
self._column_separator = ""
self._row_separator = ""
self.border = ""
self.set_style(enums.STYLE_DEFAULT)
def __copy__(self):
obj = type(self)()
obj.__dict__.update(
{k: copy.copy(v) for k, v in self.__dict__.items()}
)
obj.rows._table = obj
obj.rows.header._table = obj
obj.columns._table = obj
obj.columns.header._table = obj
obj.columns.alignment._table = obj
obj.columns.width._table = obj
obj.columns.padding_left._table = obj
obj.columns.padding_right._table = obj
obj._data._table = obj
for row in obj._data:
row._table = obj
return obj
def __deepcopy__(self, memo):
obj = type(self)()
obj.__dict__.update(
{k: copy.deepcopy(v, memo) for k, v in self.__dict__.items()}
)
obj.rows._table = obj
obj.rows.header._table = obj
obj.columns._table = obj
obj.columns.header._table = obj
obj.columns.alignment._table = obj
obj.columns.width._table = obj
obj.columns.padding_left._table = obj
obj.columns.padding_right._table = obj
obj._data._table = obj
for row in obj._data:
row._table = obj
return obj
def __setattr__(self, name, value):
attrs = (
"left_border_char",
"right_border_char",
"top_border_char",
"bottom_border_char",
"header_separator_char",
"column_separator_char",
"row_separator_char",
"intersect_top_left",
"intersect_top_mid",
"intersect_top_right",
"intersect_header_left",
"intersect_header_mid",
"intersect_header_right",
"intersect_row_left",
"intersect_row_mid",
"intersect_row_right",
"intersect_bottom_left",
"intersect_bottom_mid",
"intersect_bottom_right",
)
if to_unicode(name) in attrs:
warnings.warn(
deprecation_message(name, "1.0.0", "1.2.0", None),
FutureWarning,
)
value = ensure_type(value, basestring, name)
super(BeautifulTable, self).__setattr__(name, value)
@deprecated(
"1.0.0",
"1.2.0",
BTRowCollection.__len__,
details="Use len(BeautifulTable.rows)' instead.",
)
def __len__(self): # pragma: no cover
return len(self.rows)
@deprecated(
"1.0.0" "1.2.0",
BTRowCollection.__iter__,
details="Use iter(BeautifulTable.rows)' instead.",
)
def __iter__(self): # pragma: no cover
return iter(self.rows)
@deprecated(
"1.0.0",
"1.2.0",
BTColumnCollection.__contains__,
details="Use ''value' in BeautifulTable.{columns|rows}' instead.",
)
def __contains__(self, key): # pragma: no cover
if isinstance(key, basestring):
return key in self.columns
elif isinstance(key, Iterable):
return key in self.rows
else:
raise TypeError(
("'key' must be str or Iterable, " "not {}").format(
type(key).__name__
)
)
def __repr__(self):
return repr(self._data)
def __str__(self):
if len(self.rows) == 0 or len(self.columns) == 0:
return ""
string_ = []
for line in self._get_string([], append=False):
string_.append(line)
return "\n".join(string_)
# ************************Properties Begin Here************************
@property
def shape(self):
"""Read only attribute which returns the shape of the table."""
return (len(self.rows), len(self.columns))
@property
def sign(self):
"""Attribute to control how signs are displayed for numerical data.
It can be one of the following:
======================== =============================================
Option Meaning
======================== =============================================
beautifultable.SM_PLUS A sign should be used for both +ve and -ve
numbers.
beautifultable.SM_MINUS A sign should only be used for -ve numbers.
beautifultable.SM_SPACE A leading space should be used for +ve
numbers and a minus sign for -ve numbers.
======================== =============================================
"""
return self._sign
@sign.setter
def sign(self, value):
if not isinstance(value, enums.SignMode):
allowed = (
"{}.{}".format(type(self).__name__, i.name)
for i in enums.SignMode
)
error_msg = "allowed values for sign are: " + ", ".join(allowed)
raise ValueError(error_msg)
self._sign = value
@property
def border(self):
"""Characters used to draw the border of the table.
You can set this directly to a character or use it's several attribute
to control how each section of the table is rendered.
It is an instance of :class:`~.BTBorder`
"""
return self._border
@border.setter
def border(self, value):
self._border = BTBorder(
top=value,
left=value,
bottom=value,
right=value,
top_left=value,
bottom_left=value,
bottom_right=value,
top_right=value,
header_left=value,
header_right=value,
top_junction=value,
left_junction=value,
bottom_junction=value,
right_junction=value,
)
@property
def junction(self):
"""Character used to draw junctions in the row separator."""
return self._junction
@junction.setter
def junction(self, value):
self._junction = ensure_type(value, basestring)
@property
@deprecated("1.0.0", "1.2.0", BTRowCollection.header.fget)
def serialno(self): # pragma: no cover
return self._serialno
@serialno.setter
@deprecated("1.0.0", "1.2.0", BTRowCollection.header.fget)
def serialno(self, value): # pragma: no cover
self._serialno = value
@property
@deprecated("1.0.0", "1.2.0")
def serialno_header(self): # pragma: no cover
return self._serialno_header
@serialno_header.setter
@deprecated("1.0.0", "1.2.0")
def serialno_header(self, value): # pragma: no cover
self._serialno_header = value
@property
@deprecated("1.0.0", "1.2.0", sign.fget)
def sign_mode(self): # pragma: no cover
return self.sign
@sign_mode.setter
@deprecated("1.0.0", "1.2.0", sign.fget)
def sign_mode(self, value): # pragma: no cover
self.sign = value
@property
def maxwidth(self):
"""get/set the maximum width of the table.
The width of the table is guaranteed to not exceed this value. If it
is not possible to print a given table with the width provided, this
value will automatically adjust.
"""
offset = (len(self.columns) - 1) * termwidth(self.columns.separator)
offset += termwidth(self.border.left)
offset += termwidth(self.border.right)
self._maxwidth = max(self._maxwidth, offset + len(self.columns))
return self._maxwidth
@maxwidth.setter
def maxwidth(self, value):
self._maxwidth = value
@property
@deprecated("1.0.0", "1.2.0", maxwidth.fget)
def max_table_width(self): # pragma: no cover
return self.maxwidth
@max_table_width.setter
@deprecated("1.0.0", "1.2.0", maxwidth.fget)
def max_table_width(self, value): # pragma: no cover
self.maxwidth = value
@property
@deprecated(
"1.0.0",
"1.2.0",
BTColumnCollection.__len__,
details="Use 'len(self.columns)' instead.",
)
def column_count(self): # pragma: no cover
return len(self.columns)
@property
@deprecated("1.0.0", "1.2.0", BTColumnCollection.width_exceed_policy.fget)
def width_exceed_policy(self): # pragma: no cover
return self.columns.width_exceed_policy
@width_exceed_policy.setter
@deprecated("1.0.0", "1.2.0", BTColumnCollection.width_exceed_policy.fget)
def width_exceed_policy(self, value): # pragma: no cover
self.columns.width_exceed_policy = value
@property
@deprecated("1.0.0", "1.2.0", BTColumnCollection.default_alignment.fget)
def default_alignment(self): # pragma: no cover
return self.columns.default_alignment
@default_alignment.setter
@deprecated("1.0.0", "1.2.0", BTColumnCollection.default_alignment.fget)
def default_alignment(self, value): # pragma: no cover
self.columns.default_alignment = value
@property
@deprecated("1.0.0", "1.2.0", BTColumnCollection.default_padding.fget)
def default_padding(self): # pragma: no cover
return self.columns.default_padding
@default_padding.setter
@deprecated("1.0.0", "1.2.0", BTColumnCollection.default_padding.fget)
def default_padding(self, value): # pragma: no cover
self.columns.default_padding = value
@property
@deprecated("1.0.0", "1.2.0", BTColumnCollection.width.fget)
def column_widths(self): # pragma: no cover
return self.columns.width
@column_widths.setter
@deprecated("1.0.0", "1.2.0", BTColumnCollection.width.fget)
def column_widths(self, value): # pragma: no cover
self.columns.width = value
@property
@deprecated("1.0.0", "1.2.0", BTColumnCollection.header.fget)
def column_headers(self): # pragma: no cover
return self.columns.header
@column_headers.setter
@deprecated("1.0.0", "1.2.0", BTColumnCollection.header.fget)
def column_headers(self, value): # pragma: no cover
self.columns.header = value
@property
@deprecated("1.0.0", "1.2.0", BTColumnCollection.alignment.fget)
def column_alignments(self): # pragma: no cover
return self.columns.alignment
@column_alignments.setter
@deprecated("1.0.0", "1.2.0", BTColumnCollection.alignment.fget)
def column_alignments(self, value): # pragma: no cover
self.columns.alignment = value
@property
@deprecated("1.0.0", "1.2.0", BTColumnCollection.padding_left.fget)
def left_padding_widths(self): # pragma: no cover
return self.columns.padding_left
@left_padding_widths.setter
@deprecated("1.0.0", "1.2.0", BTColumnCollection.padding_left.fget)
def left_padding_widths(self, value): # pragma: no cover
self.columns.padding_left = value
@property
@deprecated("1.0.0", "1.2.0", BTColumnCollection.padding_right.fget)
def right_padding_widths(self): # pragma: no cover
return self.columns.padding_right
@right_padding_widths.setter
@deprecated("1.0.0", "1.2.0", BTColumnCollection.padding_right.fget)
def right_padding_widths(self, value): # pragma: no cover
self.columns.padding_right = value
@deprecated(
"1.0.0",
"1.2.0",
BTColumnCollection.__getitem__,
details="Use 'BeautifulTable.{columns|rows}[key]' instead.",
)
def __getitem__(self, key): # pragma: no cover
if isinstance(key, basestring):
return self.columns[key]
return self.rows[key]
@deprecated(
"1.0.0",
"1.2.0",
BTColumnCollection.__setitem__,
details="Use 'BeautifulTable.{columns|rows}[key]' instead.",
)
def __setitem__(self, key, value): # pragma: no cover
if isinstance(key, basestring):
self.columns[key] = value
else:
self.rows[key] = value
@deprecated(
"1.0.0",
"1.2.0",
BTColumnCollection.__delitem__,
details="Use 'BeautifulTable.{columns|rows}[key]' instead.",
)
def __delitem__(self, key): # pragma: no cover
if isinstance(key, basestring):
del self.columns[key]
else:
del self.rows[key]
# *************************Properties End Here*************************
@deprecated(
"1.0.0",
"1.2.0",
BTColumnCollection.__getitem__,
details="Use 'BeautifulTable.columns[key]' instead.",
)
def get_column(self, key): # pragma: no cover
return self.columns[key]
@deprecated(
"1.0.0",
"1.2.0",
BTColumnHeader.__getitem__,
details="Use 'BeautifulTable.columns.header[key]' instead.",
)
def get_column_header(self, index): # pragma: no cover
return self.columns.header[index]
@deprecated(
"1.0.0",
"1.2.0",
BTColumnHeader.__getitem__,
details="Use 'BeautifulTable.columns.header.index(header)' instead.",
)
def get_column_index(self, header): # pragma: no cover
return self.columns.header.index(header)
@deprecated("1.0.0", "1.2.0", BTRowCollection.filter)
def filter(self, key): # pragma: no cover
return self.rows.filter(key)
@deprecated("1.0.0", "1.2.0", BTRowCollection.sort)
def sort(self, key, reverse=False): # pragma: no cover
self.rows.sort(key, reverse=reverse)
@deprecated("1.0.0", "1.2.0", BTRowCollection.reverse)
def reverse(self, value): # pragma: no cover
self.rows.reverse()
@deprecated("1.0.0", "1.2.0", BTRowCollection.pop)
def pop_row(self, index=-1): # pragma: no cover
return self.rows.pop(index)
@deprecated("1.0.0", "1.2.0", BTRowCollection.insert)
def insert_row(self, index, row): # pragma: no cover
return self.rows.insert(index, row)
@deprecated("1.0.0", "1.2.0", BTRowCollection.append)
def append_row(self, value): # pragma: no cover
self.rows.append(value)
@deprecated("1.0.0", "1.2.0", BTRowCollection.update)
def update_row(self, key, value): # pragma: no cover
self.rows.update(key, value)
@deprecated("1.0.0", "1.2.0", BTColumnCollection.pop)
def pop_column(self, index=-1): # pragma: no cover
return self.columns.pop(index)
@deprecated("1.0.0", "1.2.0", BTColumnCollection.insert)
def insert_column(self, index, header, column): # pragma: no cover
self.columns.insert(index, column, header)
@deprecated("1.0.0", "1.2.0", BTColumnCollection.append)
def append_column(self, header, column): # pragma: no cover
self.columns.append(column, header)
@deprecated("1.0.0", "1.2.0", BTColumnCollection.update)
def update_column(self, header, column): # pragma: no cover
self.columns.update(header, column)
def set_style(self, style):
"""Set the style of the table from a predefined set of styles.
Parameters
----------
style: Style
It can be one of the following:
* beautifultable.STYLE_DEFAULT
* beautifultable.STYLE_NONE
* beautifultable.STYLE_DOTTED
* beautifultable.STYLE_MYSQL
* beautifultable.STYLE_SEPARATED
* beautifultable.STYLE_COMPACT
* beautifultable.STYLE_MARKDOWN
* beautifultable.STYLE_RESTRUCTURED_TEXT
* beautifultable.STYLE_BOX
* beautifultable.STYLE_BOX_DOUBLED
* beautifultable.STYLE_BOX_ROUNDED
* beautifultable.STYLE_GRID
"""
if not isinstance(style, enums.Style):
allowed = (
"{}.{}".format(type(self).__name__, i.name)
for i in enums.Style
)
error_msg = "allowed values for style are: " + ", ".join(allowed)
raise ValueError(error_msg)
style_template = style.value
self.border.left = style_template.left_border_char
self.border.right = style_template.right_border_char
self.border.top = style_template.top_border_char
self.border.bottom = style_template.bottom_border_char
self.border.top_left = style_template.intersect_top_left
self.border.bottom_left = style_template.intersect_bottom_left
self.border.bottom_right = style_template.intersect_bottom_right
self.border.top_right = style_template.intersect_top_right
self.border.header_left = style_template.intersect_header_left
self.border.header_right = style_template.intersect_header_right
self.columns.header.separator = style_template.header_separator_char
self.columns.separator = style_template.column_separator_char
self.rows.separator = style_template.row_separator_char
self.border.top_junction = style_template.intersect_top_mid
self.border.left_junction = style_template.intersect_row_left
self.border.bottom_junction = style_template.intersect_bottom_mid
self.border.right_junction = style_template.intersect_row_right
self.columns.header.junction = style_template.intersect_header_mid
self.junction = style_template.intersect_row_mid
def _compute_width(self):
"""Calculate width of column automatically based on data."""
table_width = self._width
lpw, rpw = self.columns.padding_left, self.columns.padding_right
pad_widths = [(lpw[i] + rpw[i]) for i in range(len(self.columns))]
maxwidths = [0 for index in range(len(self.columns))]
offset = table_width - sum(self.columns.width) + sum(pad_widths)
self._maxwidth = max(self._maxwidth, offset + len(self.columns))
for index, header in enumerate(self.columns.header):
max_length = 0
for i in pre_process(
header, self.detect_numerics, self.precision, self.sign.value
).split("\n"):
output_str = pre_process(
i,
self.detect_numerics,
self.precision,
self.sign.value,
)
max_length = max(max_length, termwidth(output_str))
maxwidths[index] += max_length
for index, column in enumerate(zip(*self._data)):
max_length = maxwidths[index]
for i in column:
for j in pre_process(
i, self.detect_numerics, self.precision, self.sign.value
).split("\n"):
output_str = pre_process(
j,
self.detect_numerics,
self.precision,
self.sign.value,
)
max_length = max(max_length, termwidth(output_str))
maxwidths[index] = max_length
sum_ = sum(maxwidths)
desired_sum = self._maxwidth - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(maxwidths)
for i, width in enumerate(maxwidths):
if width <= int(desired_sum / len(self.columns)):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(maxwidths):
self.columns.width[i] = width
if not flag[i]:
new_width = 1 + int((width - 1) * avail_space / actual_space)
if new_width < width:
self.columns.width[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = self._maxwidth - offset - sum(self.columns.width)
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.columns.width[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (
self._maxwidth - offset - sum(self.columns.width)
)
self.columns.width[index] += extra
for i in range(len(self.columns)):
self.columns.width[i] += pad_widths[i]
@deprecated("1.0.0", "1.2.0", BTColumnCollection.padding.fget)
def set_padding_widths(self, pad_width): # pragma: no cover
self.columns.padding_left = pad_width
self.columns.padding_right = pad_width
@deprecated("1.0.0", "1.2.0")
def copy(self):
return copy.copy(self)
@deprecated_param("1.0.0", "1.2.0", "clear_metadata", "reset_columns")
def clear(self, reset_columns=False, **kwargs): # pragma: no cover
"""Clear the contents of the table.
Clear all rows of the table, and if specified clears all column
specific data.
Parameters
----------
reset_columns : bool, optional
If it is true(default False), all metadata of columns such as their
alignment, padding, width, etc. are also cleared and number of
columns is set to 0.
"""
kwargs.setdefault("clear_metadata", None)
if kwargs["clear_metadata"]:
reset_columns = kwargs["clear_metadata"]
self.rows.clear()
if reset_columns:
self.columns.clear()
def _get_horizontal_line(
self, char, intersect_left, intersect_mid, intersect_right, mask=None
):
"""Get a horizontal line for the table.
Internal method used to draw all horizontal lines in the table.
Column width should be set prior to calling this method. This method
detects intersection and handles it according to the values of
`intersect_*_*` attributes.
Parameters
----------
char : str
Character used to draw the line.
Returns
-------
str
String which will be printed as a line in the table.
"""
width = self._width
if mask is None:
mask = [True] * len(self.columns)
try:
line = list(char * (int(width / termwidth(char)) + 1))[:width]
except ZeroDivisionError:
line = [" "] * width
if len(line) == 0:
return ""
# Only if Special Intersection is enabled and horizontal line is
# visible
if not char.isspace():
# If left border is enabled and it is visible
visible_junc = not intersect_left.isspace()
if termwidth(self.border.left) > 0:
if not (self.border.left.isspace() and visible_junc):
length = min(
termwidth(self.border.left),
termwidth(intersect_left),
)
for i in range(length):
line[i] = intersect_left[i] if mask[0] else " "
visible_junc = not intersect_right.isspace()
# If right border is enabled and it is visible
if termwidth(self.border.right) > 0:
if not (self.border.right.isspace() and visible_junc):
length = min(
termwidth(self.border.right),
termwidth(intersect_right),
)
for i in range(length):
line[-i - 1] = (
intersect_right[-i - 1] if mask[-1] else " "
)
visible_junc = not intersect_mid.isspace()
# If column separator is enabled and it is visible
if termwidth(self.columns.separator):
if not (self.columns.separator.isspace() and visible_junc):
index = termwidth(self.border.left)
for i in range(len(self.columns) - 1):
if not mask[i]:
for j in range(self.columns.width[i]):
line[index + j] = " "
index += self.columns.width[i]
length = min(
termwidth(self.columns.separator),
termwidth(intersect_mid),
)
for j in range(length):
# TODO: we should also hide junctions based on mask
line[index + j] = (
intersect_mid[j]
if (mask[i] or mask[i + 1])
else " "
)
index += termwidth(self.columns.separator)
return "".join(line)
def _get_top_border(self, *args, **kwargs):
return self._get_horizontal_line(
self.border.top,
self.border.top_left,
self.border.top_junction,
self.border.top_right,
*args,
**kwargs
)
def _get_header_separator(self, *args, **kwargs):
return self._get_horizontal_line(
self.columns.header.separator,
self.border.header_left,
self.columns.header.junction,
self.border.header_right,
*args,
**kwargs
)
def _get_row_separator(self, *args, **kwargs):
return self._get_horizontal_line(
self.rows.separator,
self.border.left_junction,
self.junction,
self.border.right_junction,
*args,
**kwargs
)
def _get_bottom_border(self, *args, **kwargs):
return self._get_horizontal_line(
self.border.bottom,
self.border.bottom_left,
self.border.bottom_junction,
self.border.bottom_right,
*args,
**kwargs
)
@property
def _width(self):
"""Get the actual width of the table as number of characters.
Column width should be set prior to calling this method.
Returns
-------
int
Width of the table as number of characters.
"""
if len(self.columns) == 0:
return 0
width = sum(self.columns.width)
width += (len(self.columns) - 1) * termwidth(self.columns.separator)
width += termwidth(self.border.left)
width += termwidth(self.border.right)
return width
@deprecated("1.0.0", "1.2.0", _width.fget)
def get_table_width(self): # pragma: no cover
return self._width
def _get_string(self, rows=None, append=False, recalculate_width=True):
row_header_visible = bool(
"".join(
x if x is not None else "" for x in self.rows.header
).strip()
) and (len(self.columns) > 0)
column_header_visible = bool(
"".join(
x if x is not None else "" for x in self.columns.header
).strip()
) and (len(self.rows) > 0 or rows is not None)
# Preparing table for printing serialno, row headers and column headers
if len(self.columns) > 0:
if self._serialno:
self.columns.insert(
0, range(1, len(self.rows) + 1), self._serialno_header
)
if row_header_visible:
self.columns.insert(0, self.rows.header)
if column_header_visible:
self.rows.insert(0, self.columns.header)
if (self.columns._auto_width and recalculate_width) or sum(
self.columns.width
) == 0:
self._compute_width()
try:
# Rendering the top border
if self.border.top:
yield self._get_top_border()
# Print column headers if not empty or only spaces
row_iterator = iter(self.rows)
if column_header_visible:
yield next(row_iterator)._get_string(
align=self.columns.header.alignment
)
if self.columns.header.separator:
yield self._get_header_separator()
# Printing rows
first_row_encountered = False
for i, row in enumerate(row_iterator):
if first_row_encountered and self.rows.separator:
yield self._get_row_separator()
first_row_encountered = True
content = to_unicode(row)
yield content
if rows is not None:
# Printing additional rows
prev_length = len(self.rows)
for i, row in enumerate(rows, start=1):
if first_row_encountered and self.rows.separator:
yield self._get_row_separator()
first_row_encountered = True
if self._serialno:
row.insert(0, prev_length + i)
if row_header_visible:
self.rows.append([None] + list(row))
else:
self.rows.append(row)
content = to_unicode(self.rows[-1])
if not append:
self.rows.pop()
yield content
# Rendering the bottom border
if self.border.bottom:
yield self._get_bottom_border()
except Exception:
raise
finally:
# Cleanup
if column_header_visible:
self.rows.pop(0)
if row_header_visible:
self.columns.pop(0)
if len(self.columns) > 0:
if self._serialno:
self.columns.pop(0)
return
def stream(self, rows, append=False):
"""Get a generator for the table.
This should be used in cases where data takes time to retrieve and it
is required to be displayed as soon as possible. Any existing rows in
the table shall also be returned. It is essential that atleast one of
column header, width or existing rows set before calling this method.
Parameters
----------
rows : iterable
A generator which yields one row at a time.
append : bool, optional
If rows should also be appended to the table.(Default False)
Returns
-------
iterable:
string representation of the table as a generators
"""
for line in self._get_string(
rows, append=append, recalculate_width=False
):
yield line
@deprecated("1.0.0", "1.2.0", str)
def get_string(self):
return str(self)
def to_csv(self, file_name, *args, **kwargs):
"""Export table to CSV format.
Parameters
----------
file_name : str
Path to CSV file.
"""
if not isinstance(file_name, str):
raise ValueError(
("Expected 'file_name' to be string, got {}").format(
type(file_name).__name__
)
)
with open(file_name, mode="wt", newline="") as csv_file:
csv_writer = csv.writer(csv_file, *args, **kwargs)
if bool(
"".join(
x if x is not None else "" for x in self.columns.header
).strip()
):
csv_writer.writerow(self.columns.header)
csv_writer.writerows(self.rows)
def from_csv(self, file_name, header=True, **kwargs):
"""Create table from CSV file.
Parameters
----------
file_name : str
Path to CSV file.
header : bool, optional
Whether First row in CSV file should be parsed as table header.
Raises
------
ValueError
If `file_name` is not str type.
FileNotFoundError
If `file_name` is not valid path to file.
"""
if not isinstance(file_name, str):
raise ValueError(
("Expected 'file_name' to be string, got {}").format(
type(file_name).__name__
)
)
with open(file_name, mode="rt", newline="") as csv_file:
csv_reader = csv.reader(csv_file, **kwargs)
if header:
self.columns.header = next(csv_reader)
for row in csv_reader:
self.rows.append(row)
return self
|
libtbx/queuing_system_utils/__init__.py | rimmartin/cctbx_project | 155 | 11171849 | <filename>libtbx/queuing_system_utils/__init__.py<gh_stars>100-1000
from __future__ import absolute_import, division, print_function
import sys
from libtbx import Auto
class chunk_manager(object):
def __init__(self, n, i):
assert n > 0
assert i >= 0
assert i < n
self.n = n
self.i = i
self.queuing_system_info = None
def easy_all(self, log_format=Auto, out=Auto):
self.queuing_system_overrides_chunk()
self.redirect_chunk_stdout_and_stderr(log_format=log_format, out=out)
return self
def skip_iteration(self, i):
return (i % self.n != self.i)
def queuing_system_overrides_chunk(self):
from libtbx.queuing_system_utils import pbs_utils, sge_utils
pbs_info = pbs_utils.chunk_info()
sge_info = sge_utils.info()
assert [pbs_info, sge_info].count(None) <= 1
if pbs_info.have_array():
self.queuing_system_info = pbs_info
n, i = pbs_info.as_n_i_pair()
self.n = max(self.n, n)
self.i = i
elif sge_info.have_array():
self.queuing_system_info = sge_info
self.n = max(self.n, sge_info.last)
self.i = sge_info.id - 1
return self
def redirect_chunk_stdout_and_stderr(self,
log_format=Auto,
out=Auto,
have_array=False):
if self.n == 1: return
log_name = None
if not have_array:
i = self.queuing_system_info
if i is not None and i.have_array():
have_array = True
if have_array:
if log_format is Auto: log_format="log%%0%dd"
fmt = log_format % max(3, len("%d" % (self.n-1)))
log_name = fmt % self.i
log = open(log_name, "w")
sys.stdout = log
sys.stderr = log
from libtbx.utils import host_and_user
if out is Auto: out = sys.stdout
if out is not None:
host_and_user().show(out=out)
print("chunk.n:", self.n, file=out)
print("chunk.i:", self.i, file=out)
if log_name:
print("log_name:", log_name, file=out)
print(file=out)
return self
# XXX tested on SGE only so far (2012-12-19)
def qdel(job_id, platform):
"""
Stop a queue job. Supports the same platforms as 'processing' sub-module,
but primarily used by the Phenix GUI.
"""
from libtbx import easy_run
assert platform in ("sge", "lsf", "pbs", "condor", "pbspro", "slurm")
cmd = None
if platform in ("sge", "pbs", "pbspro", "slurm"):
cmd = "qdel %s" % job_id
elif platform == "lsf":
cmd = "bkill %s" % job_id
elif platform == "condor":
cmd = "condor_rm %s" % job_id
assert cmd
qdel_out = easy_run.fully_buffered(
command=cmd).raise_if_errors().stdout_lines
print("\n".join(qdel_out))
# XXX this is specific to SGE - need error handling for other systems too
for line in qdel_out:
if "denied" in line:
if "does not exist" in line: # SGE job does not exist
pass
else:
raise RuntimeError("\n".join(qdel_out))
return True
|
electrumsv/keystore.py | electrumsv/electrumsv | 136 | 11171923 | # Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import json
from typing import Any, cast, Dict, List, Optional, Sequence, Set, Tuple, TYPE_CHECKING, Union
from bitcoinx import (
PrivateKey, PublicKey, BIP32PrivateKey,
int_to_be_bytes, be_bytes_to_int, CURVE_ORDER,
bip32_key_from_string, bip32_decompose_chain_string, Base58Error, hash160,
bip32_build_chain_string, BIP39Mnemonic, ElectrumMnemonic
)
from .i18n import _
from .app_state import app_state
from .constants import DerivationType, DerivationPath, KeystoreTextType, KeystoreType
from .crypto import sha256d, pw_encode, pw_decode
from .exceptions import InvalidPassword, OverloadedMultisigKeystore, IncompatibleWalletError
from .logs import logs
from .networks import Net
from .transaction import Transaction, TransactionContext, XPublicKey, XPublicKeyKind
from .types import MasterKeyDataBIP32, MasterKeyDataElectrumOld, MasterKeyDataHardware, \
MasterKeyDataMultiSignature, MasterKeyDataTypes, DatabaseKeyDerivationData
from .wallet_database.types import KeyInstanceRow, MasterKeyRow
if TYPE_CHECKING:
from .devices.hw_wallet.plugin import HW_PluginBase
from .devices.hw_wallet.qt import QtHandlerBase, QtPluginBase
logger = logs.get_logger("keystore")
class KeyStore:
derivation_type = DerivationType.NONE
label: Optional[str] = None
def __init__(self, row: Optional[MasterKeyRow]=None) -> None:
self.set_row(row)
def clean_up(self) -> None:
pass
def set_row(self, row: Optional[MasterKeyRow]=None) -> None:
self._row = row
def type(self) -> KeystoreType:
return KeystoreType.UNSPECIFIED
def subtype(self) -> Optional[str]:
return None
def get_label(self) -> Optional[str]:
return self.label
def set_label(self, label: Optional[str]) -> None:
self.label = label
def debug_name(self) -> str:
name = self.type().value
sub_type = self.subtype() # pylint: disable=assignment-from-none
if sub_type is not None:
name += "/"+ sub_type
return name
def get_id(self) -> int:
"""
Get the database id for the masterkey record for this keystore.
Will raise an AssertionError for imported keystores, as they do not have masterkeys.
"""
assert self._row is not None
return self._row.masterkey_id
def get_fingerprint(self) -> bytes:
raise NotImplementedError
def has_masterkey(self) -> bool:
return self._row is not None
def has_seed(self) -> bool:
return False
def is_deterministic(self) -> bool:
return False
def can_change_password(self) -> bool:
raise NotImplementedError
def to_derivation_data(self) -> MasterKeyDataTypes:
raise NotImplementedError
def to_masterkey_row(self) -> MasterKeyRow:
"""
The initial database row (with placeholder id) for this new keystore.
"""
raise NotImplementedError
def is_watching_only(self) -> bool:
return False
def can_import(self) -> bool:
return False
def can_export(self) -> bool:
return False
def get_master_public_key(self) -> Optional[str]:
raise NotImplementedError
def get_private_key(self, key_data: Any, password: str) -> Tuple[bytes, bool]:
raise NotImplementedError
def get_private_key_from_xpubkey(self, x_pubkey: XPublicKey,
password: str) -> Tuple[bytes, bool]:
raise NotImplementedError
def is_signature_candidate(self, x_pubkey: XPublicKey) -> bool:
raise NotImplementedError
def can_sign(self, tx: Transaction) -> bool:
if self.is_watching_only():
return False
return any(self.is_signature_candidate(x_pubkey) for txin in tx.inputs
for x_pubkey in txin.unused_x_pubkeys())
def requires_input_transactions(self) -> bool:
return False
def sign_transaction(self, tx: Transaction, password: str,
context: TransactionContext) -> None:
raise NotImplementedError
class Software_KeyStore(KeyStore):
def __init__(self, row: Optional[MasterKeyRow]=None) -> None:
KeyStore.__init__(self, row)
def type(self) -> KeystoreType:
return KeystoreType.SOFTWARE
def sign_message(self, derivation_path: DerivationPath, message: bytes, password: str) -> bytes:
privkey, compressed = self.get_private_key(derivation_path, password)
key = PrivateKey(privkey, compressed)
return cast(bytes, key.sign_message(message))
def decrypt_message(self, sequence: DerivationPath, message: bytes, password: str) -> bytes:
privkey, compressed = self.get_private_key(sequence, password)
key = PrivateKey(privkey)
return cast(bytes, key.decrypt_message(message))
def check_password(self, password: Optional[str]) -> None:
raise NotImplementedError
def sign_transaction(self, tx: Transaction, password: str,
context: TransactionContext) -> None:
if self.is_watching_only():
return
# Raise if password is not correct.
self.check_password(password)
# Add private keys
keypairs: Dict[XPublicKey, Tuple[bytes, bool]] = {}
for txin in tx.inputs:
for x_pubkey in txin.unused_x_pubkeys():
if self.is_signature_candidate(x_pubkey):
keypairs[x_pubkey] = self.get_private_key_from_xpubkey(x_pubkey, password)
# Sign
if keypairs:
tx.sign(keypairs)
class Imported_KeyStore(Software_KeyStore):
derivation_type = DerivationType.IMPORTED
# keystore for imported private keys
# private keys are encrypted versions of the WIF encoding
def __init__(self, row: Optional[MasterKeyRow]=None) -> None:
self._public_keys: Dict[int, PublicKey] = {}
self._keypairs: Dict[PublicKey, str] = {}
Software_KeyStore.__init__(self, row)
def type(self) -> KeystoreType:
return KeystoreType.IMPORTED_PRIVATE_KEY
def set_state(self, keyinstance_rows: List[KeyInstanceRow]) -> None:
self._keypairs.clear()
self._public_keys.clear()
for row in keyinstance_rows:
data = json.loads(row.derivation_data)
public_key = PublicKey.from_hex(data['pub'])
self._public_keys[row.keyinstance_id] = public_key
self._keypairs[public_key] = cast(str, data['prv'])
def set_encrypted_prv(self, keyinstance_id: int, encrypted_prv: str) -> None:
"""
Update a re-encrypted private key.
This will occur when the wallet password has been changed
"""
public_key = self._public_keys[keyinstance_id]
self._keypairs[public_key] = encrypted_prv
def can_change_password(self) -> bool:
return True
def get_master_public_key(self) -> Optional[str]:
return None
def to_derivation_data(self) -> MasterKeyDataTypes:
raise IncompatibleWalletError("imported keystores do not map to a masterkey")
def to_masterkey_row(self) -> MasterKeyRow:
raise IncompatibleWalletError("imported keystores do not map to a masterkey")
def can_import(self) -> bool:
return True
def sign_message(self, public_key: PublicKey, message: bytes, password: str) -> bytes:
private_key_bytes, is_compressed = self.get_private_key(public_key, password)
private_key = PrivateKey(private_key_bytes, is_compressed)
return cast(bytes, private_key.sign_message(message))
def decrypt_message(self, public_key: PublicKey, message: bytes, password: str) -> bytes:
private_key_bytes, is_compressed = self.get_private_key(public_key, password)
private_key = PrivateKey(private_key_bytes, is_compressed)
return cast(bytes, private_key.decrypt_message(message))
def remove_key(self, keyinstance_id: int) -> None:
pubkey = self._public_keys.pop(keyinstance_id)
self._keypairs.pop(pubkey)
def check_password(self, password: Optional[str]) -> None:
assert password is not None
pubkey = list(self._keypairs.keys())[0]
self.export_private_key(pubkey, password)
def import_private_key(self, keyinstance_id: int, public_key: PublicKey,
enc_prvkey: str) -> None:
self._public_keys[keyinstance_id] = public_key
self._keypairs[public_key] = enc_prvkey
def export_private_key(self, pubkey: PublicKey, password: str) -> str:
'''Returns a WIF string'''
privkey_text = pw_decode(self._keypairs[pubkey], password)
# this checks the password
if pubkey != _public_key_from_private_key_text(privkey_text):
raise InvalidPassword()
return privkey_text
def can_export(self) -> bool:
return True
def get_private_key(self, public_key: PublicKey, password: str) -> Tuple[bytes, bool]:
'''Returns a (32 byte privkey, is_compressed) pair.'''
private_key_text = self.export_private_key(public_key, password)
private_key = PrivateKey.from_text(private_key_text)
return private_key.to_bytes(), private_key.is_compressed()
def get_private_key_from_xpubkey(self, x_public_key: XPublicKey,
password: str) -> Tuple[bytes, bool]:
public_key = x_public_key.to_public_key()
return self.get_private_key(public_key, password)
def is_signature_candidate(self, x_public_key: XPublicKey) -> bool:
if x_public_key.kind() == XPublicKeyKind.PRIVATE_KEY:
return x_public_key.to_public_key() in self._keypairs
return False
class Deterministic_KeyStore(Software_KeyStore):
seed: Optional[str] = None
passphrase: Optional[str] = None
label: Optional[str] = None
def __init__(self, row: Optional[MasterKeyRow]=None) -> None:
Software_KeyStore.__init__(self, row)
def is_deterministic(self) -> bool:
return True
def has_seed(self) -> bool:
return self.seed is not None
def is_watching_only(self) -> bool:
return not self.has_seed()
def can_change_password(self) -> bool:
return not self.is_watching_only()
def get_seed(self, password: str) -> str:
"""
Get the source private key data for this keystore.
This may be the seed words where applicable, or whatever else the user originally entered.
"""
assert isinstance(self.seed, str)
return pw_decode(self.seed, password)
def get_passphrase(self, password: str) -> str:
if self.passphrase:
return pw_decode(self.passphrase, password)
return ''
class Xpub:
def __init__(self) -> None:
self.xpub: Optional[str] = None
self._child_xpubs: Dict[DerivationPath, str] = {}
def get_master_public_key(self) -> Optional[str]:
return self.xpub
def get_fingerprint(self) -> bytes:
return cast(bytes, bip32_key_from_string(self.xpub).fingerprint())
def derive_pubkey(self, derivation_path: DerivationPath) -> PublicKey:
parent_path = derivation_path[:-1]
xpub = self._child_xpubs.get(parent_path)
if xpub is None:
xpubkey = bip32_key_from_string(self.xpub)
for n in parent_path:
xpubkey = xpubkey.child_safe(n)
xpub = xpubkey.to_extended_key_string()
self._child_xpubs[parent_path] = xpub
return self.get_pubkey_from_xpub(xpub, derivation_path[-1:])
@classmethod
def get_pubkey_from_xpub(cls, xpub: str, sequence: DerivationPath) -> PublicKey:
pubkey = bip32_key_from_string(xpub)
assert isinstance(pubkey, PublicKey)
for n in sequence:
pubkey = pubkey.child_safe(n)
return pubkey
def get_xpubkey(self, data: DatabaseKeyDerivationData) -> XPublicKey:
return XPublicKey(bip32_xpub=self.xpub, derivation_data=data)
def is_signature_candidate(self, x_pubkey: XPublicKey) -> bool:
if x_pubkey.kind() == XPublicKeyKind.BIP32:
return self.xpub == x_pubkey.bip32_extended_key()
return False
class BIP32_KeyStore(Deterministic_KeyStore, Xpub):
derivation_type = DerivationType.BIP32
def __init__(self, data: MasterKeyDataBIP32, row: Optional[MasterKeyRow]=None,
parent_keystore: Optional[KeyStore]=None) -> None:
Xpub.__init__(self)
Deterministic_KeyStore.__init__(self, row)
self._parent_keystore = parent_keystore
self.seed: Optional[str] = data.get('seed')
self.passphrase: Optional[str] = data.get('passphrase')
self.label: Optional[str] = data.get('label')
self.xpub: Optional[str] = data.get('xpub')
self.xprv: Optional[str] = data.get('xprv')
def type(self) -> KeystoreType:
return KeystoreType.BIP32
def set_row(self, row: Optional[MasterKeyRow]=None) -> None:
Deterministic_KeyStore.set_row(self, row)
def get_fingerprint(self) -> bytes:
return Xpub.get_fingerprint(self)
def to_derivation_data(self) -> MasterKeyDataBIP32:
assert self.xpub is not None
return {
"seed": self.seed,
"passphrase": self.passphrase,
"label": self.label,
"xpub": self.xpub,
"xprv": self.xprv,
}
def to_masterkey_row(self) -> MasterKeyRow:
derivation_data = json.dumps(self.to_derivation_data()).encode()
return MasterKeyRow(-1, None, DerivationType.BIP32, derivation_data)
def get_master_public_key(self) -> Optional[str]:
return Xpub.get_master_public_key(self)
def get_master_private_key(self, password: Optional[str]) -> str:
assert self.xprv is not None
return pw_decode(self.xprv, password)
def check_password(self, password: Optional[str]) -> None:
"""
Check if the password is valid for one of the pieces of encrypted data.
It is assumed that all the encrypted data
"""
assert self.xprv is not None
xprv = pw_decode(self.xprv, password)
try:
assert (bip32_key_from_string(xprv).derivation().chain_code
== bip32_key_from_string(self.xpub).derivation().chain_code)
except (ValueError, AssertionError, Base58Error):
raise InvalidPassword()
def is_watching_only(self) -> bool:
return self.xprv is None
def can_export(self) -> bool:
return True
def get_private_key(self, derivation_path: DerivationPath, password: str) -> Tuple[bytes, bool]:
xprv = self.get_master_private_key(password)
privkey = bip32_key_from_string(xprv)
for n in derivation_path:
privkey = privkey.child_safe(n)
return privkey.to_bytes(), True
def get_private_key_from_xpubkey(self, x_pubkey: XPublicKey,
password: str) -> Tuple[bytes, bool]:
return self.get_private_key(x_pubkey.derivation_path, password)
# If we do not do this it falls through to the the base KeyStore method, not Xpub.
def is_signature_candidate(self, x_pubkey: XPublicKey) -> bool:
return Xpub.is_signature_candidate(self, x_pubkey)
def set_encrypted_seed(self, encrypted_seed: str) -> None:
assert self.seed is not None
self.seed = encrypted_seed
def set_encrypted_passphrase(self, encrypted_passphrase: str) -> None:
assert self.passphrase is not None
self.passphrase = encrypted_passphrase
def set_encrypted_xprv(self, encrypted_xprv: str) -> None:
assert self.xprv is not None
self.xprv = encrypted_xprv
class Old_KeyStore(Deterministic_KeyStore):
derivation_type = DerivationType.ELECTRUM_OLD
def __init__(self, data: MasterKeyDataElectrumOld, row: Optional[MasterKeyRow]=None) -> None:
super().__init__(row)
self.seed = data['seed']
self.mpk = data['mpk']
def type(self) -> KeystoreType:
return KeystoreType.OLD
def _get_hex_seed_bytes(self, password: Optional[str]) -> bytes:
assert self.seed is not None
return pw_decode(self.seed, password).encode('utf8')
@classmethod
def _mpk_from_hex_seed(cls, hex_seed: str) -> str:
secexp = cls.stretch_key(hex_seed.encode())
master_private_key = PrivateKey(int_to_be_bytes(secexp, 32))
return cast(str, master_private_key.public_key.to_hex(compressed=False)[2:])
@classmethod
def _mpk_to_PublicKey(cls, mpk: str) -> PublicKey:
return PublicKey.from_hex('04' + mpk)
@classmethod
def from_mpk(cls, mpk: str) -> 'Old_KeyStore':
return cls({ "mpk": mpk, "seed": None })
def to_derivation_data(self) -> MasterKeyDataElectrumOld:
return {
"seed": self.seed,
"mpk": self.mpk,
}
def to_masterkey_row(self) -> MasterKeyRow:
derivation_lump = json.dumps(self.to_derivation_data()).encode()
return MasterKeyRow(-1, None, DerivationType.ELECTRUM_OLD, derivation_lump)
def get_seed(self, password: Optional[str]) -> str:
"""
Get the old Electrum type mnemonic words for this keystore's master key.
Raises ValueError if the hex seed is not either of 16 or 32 bytes.
"""
s = self._get_hex_seed_bytes(password)
return cast(str, ElectrumMnemonic.hex_seed_to_old(s))
@classmethod
def stretch_key(cls, seed: bytes) -> int:
x = seed
for i in range(100000):
x = hashlib.sha256(x + seed).digest()
return cast(int, be_bytes_to_int(x))
@classmethod
def get_sequence(cls, mpk: str, derivation_path: DerivationPath) -> int:
old_sequence = derivation_path[1], derivation_path[0]
return cast(int, be_bytes_to_int(sha256d(("%d:%d:"% old_sequence).encode('ascii') +
bytes.fromhex(mpk))))
@classmethod
def get_pubkey_from_mpk(cls, mpk: str, derivation_path: DerivationPath) -> PublicKey:
assert len(derivation_path) == 2
z = cls.get_sequence(mpk, derivation_path)
master_public_key = cls._mpk_to_PublicKey(mpk)
public_key = master_public_key.add(int_to_be_bytes(z, 32))
assert not public_key.is_compressed()
return public_key
def derive_pubkey(self, derivation_path: DerivationPath) -> PublicKey:
assert len(derivation_path) == 2
return self.get_pubkey_from_mpk(self.mpk, derivation_path)
def get_private_key_from_stretched_exponent(self, derivation_path: DerivationPath,
secexp: int) -> bytes:
assert len(derivation_path) == 2
secexp = (secexp + self.get_sequence(self.mpk, derivation_path)) % CURVE_ORDER
return cast(bytes, int_to_be_bytes(secexp, 32))
def can_export(self) -> bool:
return True
def get_private_key(self, derivation_path: DerivationPath, password: str) -> Tuple[bytes, bool]:
seed = self._get_hex_seed_bytes(password)
self.check_seed(seed)
secexp = self.stretch_key(seed)
pk = self.get_private_key_from_stretched_exponent(derivation_path, secexp)
return pk, False
def get_private_key_from_xpubkey(self, x_pubkey: XPublicKey,
password: str) -> Tuple[bytes, bool]:
mpk, path = x_pubkey.old_keystore_mpk_and_path()
assert self.mpk == mpk.hex()
return self.get_private_key(path, password)
def check_seed(self, seed: bytes) -> None:
secexp = self.stretch_key(seed)
master_private_key = PrivateKey(int_to_be_bytes(secexp, 32))
master_public_key = master_private_key.public_key.to_bytes(compressed=False)[1:]
if master_public_key != bytes.fromhex(self.mpk):
logger.error('invalid password (mpk) %s %s', self.mpk, master_public_key.hex())
raise InvalidPassword()
def check_password(self, password: Optional[str]) -> None:
assert password is not None
seed = self._get_hex_seed_bytes(password)
self.check_seed(seed)
def get_fingerprint(self) -> bytes:
return cast(bytes, hash160(bytes.fromhex(self.mpk))[:4])
def get_master_public_key(self) -> Optional[str]:
return self.mpk
def get_xpubkey(self, data: DatabaseKeyDerivationData) -> XPublicKey:
assert data.derivation_path is not None and len(data.derivation_path) == 2
return XPublicKey(old_mpk=bytes.fromhex(self.mpk), derivation_data=data)
def is_signature_candidate(self, x_pubkey: XPublicKey) -> bool:
"""
Check whether this keystore can sign for the given extended public key.
"""
if x_pubkey.kind() == XPublicKeyKind.OLD:
mpk, path = x_pubkey.old_keystore_mpk_and_path()
return self.mpk == mpk.hex()
return False
def set_encrypted_seed(self, encrypted_seed: str) -> None:
assert self.seed is not None
self.seed = encrypted_seed
class Hardware_KeyStore(Xpub, KeyStore):
derivation_type = DerivationType.HARDWARE
# Derived classes must set:
# - device
# - wallet_type
hw_type: str
device: str
plugin: Optional["HW_PluginBase"] = None
handler_qt: Optional["QtHandlerBase"] = None
def __init__(self, data: MasterKeyDataHardware, row: Optional[MasterKeyRow]=None) -> None:
Xpub.__init__(self)
KeyStore.__init__(self, row)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.xpub = data['xpub']
self.derivation = data['derivation']
# TODO(database-migration) Move this into a migration.
# New hardware account bug stored the derivation as a decomposed list not a string.
if isinstance(self.derivation, list):
self.derivation = bip32_build_chain_string(self.derivation)
self.hw_type = data['hw_type']
self.label = data.get('label')
def clean_up(self) -> None:
assert self.xpub is not None
app_state.device_manager.unpair_xpub(self.xpub)
if self.handler_qt is not None:
self.handler_qt.clean_up()
def type(self) -> KeystoreType:
return KeystoreType.HARDWARE
def subtype(self) -> Optional[str]:
return self.hw_type
@property
def plugin_qt(self) -> "QtPluginBase":
assert self.plugin is not None
return cast("QtPluginBase", self.plugin)
def set_row(self, row: Optional[MasterKeyRow]=None) -> None:
KeyStore.set_row(self, row)
def is_deterministic(self) -> bool:
return True
def to_derivation_data(self) -> MasterKeyDataHardware:
assert self.xpub is not None
return {
'hw_type': self.hw_type,
'xpub': self.xpub,
'derivation':self.derivation,
'label':self.label,
"cfg": None,
}
def to_masterkey_row(self) -> MasterKeyRow:
derivation_lump = json.dumps(self.to_derivation_data()).encode()
return MasterKeyRow(-1, None, DerivationType.HARDWARE, derivation_lump)
def unpaired(self) -> None:
'''A device paired with the wallet was diconnected. This can be
called in any thread context.'''
logger.debug("unpaired")
def paired(self) -> None:
'''A device paired with the wallet was (re-)connected. This can be
called in any thread context.'''
logger.debug("paired")
def is_watching_only(self) -> bool:
'''The wallet is not watching-only; the user will be prompted for
pin and passphrase as appropriate when needed.'''
assert not self.has_seed()
return False
def can_change_password(self) -> bool:
return False
def can_export(self) -> bool:
return False
def sign_message(self, derivation_path: DerivationPath, message: bytes, password: str) -> bytes:
raise NotImplementedError
def decrypt_message(self, sequence: DerivationPath, message: bytes, password: str) -> bytes:
raise NotImplementedError
SinglesigKeyStoreTypes = Union[BIP32_KeyStore, Hardware_KeyStore, Old_KeyStore]
class Multisig_KeyStore(KeyStore):
# This isn't used, it's mostly included for consistency. Generally this attribute is used
# only by this class, to classify derivation data of cosigner information.
derivation_type = DerivationType.ELECTRUM_MULTISIG
_cosigner_keystores: List[SinglesigKeyStoreTypes]
def __init__(self, data: MasterKeyDataMultiSignature, row: Optional[MasterKeyRow]=None) -> None:
self.set_row(row)
self.m = data["m"]
self.n = data["n"]
self._cosigner_keystores = []
for derivation_type, derivation_data in data["cosigner-keys"]:
assert derivation_type in (DerivationType.BIP32, DerivationType.HARDWARE,
DerivationType.ELECTRUM_OLD)
keystore = instantiate_keystore(derivation_type, derivation_data)
keystore = cast(SinglesigKeyStoreTypes, keystore)
self.add_cosigner_keystore(keystore)
def type(self) -> KeystoreType:
return KeystoreType.MULTISIG
def is_deterministic(self) -> bool:
return True
def set_row(self, row: Optional[MasterKeyRow]=None) -> None:
self._row = row
def to_derivation_data(self) -> MasterKeyDataMultiSignature:
cosigner_keys = [
(k.derivation_type, k.to_derivation_data())
for k in self._cosigner_keystores
]
return {
'm': self.m,
'n': self.n,
'cosigner-keys': cosigner_keys,
}
def to_masterkey_row(self) -> MasterKeyRow:
derivation_lump = json.dumps(self.to_derivation_data()).encode()
return MasterKeyRow(-1, None, DerivationType.ELECTRUM_MULTISIG, derivation_lump)
def is_watching_only(self) -> bool:
return all(k.is_watching_only() for k in self.get_cosigner_keystores())
def can_change_password(self) -> bool:
return all(k.is_watching_only() for k in self.get_cosigner_keystores())
def check_password(self, password: Optional[str]) -> None:
if self.is_watching_only():
return
for keystore in self.get_cosigner_keystores():
if keystore.can_change_password():
assert not isinstance(keystore, Hardware_KeyStore)
keystore.check_password(password)
def get_cosigner_keystores(self) -> Sequence[SinglesigKeyStoreTypes]:
return self._cosigner_keystores
def add_cosigner_keystore(self, keystore: SinglesigKeyStoreTypes) -> None:
if len(self._cosigner_keystores) == self.n:
raise OverloadedMultisigKeystore()
self._cosigner_keystores.append(keystore)
def bip44_derivation(account_id: int) -> str:
return "m/44'/%d'/%d'" % (Net.BIP44_COIN_TYPE, int(account_id))
def bip44_derivation_cointype(cointype: int, account_id: int) -> str:
return f"m/44'/{cointype:d}'/{account_id:d}'"
def private_key_from_bip32_seed(bip32_seed: bytes, derivation_text: str) -> BIP32PrivateKey:
private_key = BIP32PrivateKey.from_seed(bip32_seed, Net.COIN)
for n in bip32_decompose_chain_string(derivation_text):
private_key = private_key.child_safe(n)
return private_key
def bip32_master_key_data_from_seed(seed_phrase: str, passphrase: str, bip32_seed: bytes,
derivation_text: str, password: Optional[str]) -> MasterKeyDataBIP32:
private_key = private_key_from_bip32_seed(bip32_seed, derivation_text)
optional_encrypted_seed = None
optional_encrypted_passphrase = None
optional_encrypted_xprv = None
# If the key is not watch only, we store it but always encrypted.
if password is not None:
optional_encrypted_seed = pw_encode(seed_phrase, password)
if len(passphrase):
optional_encrypted_passphrase = pw_encode(passphrase, password)
optional_encrypted_xprv = pw_encode(private_key.to_extended_key_string(), password)
return {
"seed": optional_encrypted_seed,
"passphrase": optional_encrypted_passphrase,
"label": None,
"xprv": optional_encrypted_xprv,
"xpub": private_key.public_key.to_extended_key_string(),
}
def _public_key_from_private_key_text(text: str) -> PublicKey:
return PrivateKey.from_text(text).public_key
def instantiate_keystore(derivation_type: DerivationType, data: MasterKeyDataTypes,
parent_keystore: Optional[KeyStore]=None,
row: Optional[MasterKeyRow]=None) -> KeyStore:
keystore: KeyStore
if derivation_type == DerivationType.BIP32:
keystore = BIP32_KeyStore(cast(MasterKeyDataBIP32, data),
row, parent_keystore)
elif derivation_type == DerivationType.HARDWARE:
assert parent_keystore is None
keystore = app_state.device_manager.create_keystore(cast(MasterKeyDataHardware, data), row)
elif derivation_type == DerivationType.ELECTRUM_MULTISIG:
assert parent_keystore is None
keystore = Multisig_KeyStore(cast(MasterKeyDataMultiSignature, data), row)
elif derivation_type == DerivationType.ELECTRUM_OLD:
assert parent_keystore is None
keystore = Old_KeyStore(cast(MasterKeyDataElectrumOld, data), row)
else:
raise Exception(_("unknown masterkey type {}:{}").format(
row.masterkey_id if row is not None else None, derivation_type))
return keystore
KeystoreMatchType = Union[str, Set[str]]
def instantiate_keystore_from_text(text_type: KeystoreTextType, text_match: KeystoreMatchType,
password: Optional[str]=None, derivation_text: Optional[str]=None,
passphrase: str="", watch_only: bool=False) -> KeyStore:
assert isinstance(passphrase, str)
bip32_data: MasterKeyDataBIP32
if text_type == KeystoreTextType.EXTENDED_PUBLIC_KEY:
derivation_type = DerivationType.BIP32
assert isinstance(text_match, str)
assert not derivation_text
assert watch_only
assert not passphrase
# `watch_only` is ignored.
bip32_data = {
"xpub": text_match,
"seed": None,
"passphrase": None,
"label": None,
"xprv": None,
}
return instantiate_keystore(derivation_type, bip32_data)
elif text_type == KeystoreTextType.EXTENDED_PRIVATE_KEY:
derivation_type = DerivationType.BIP32
assert isinstance(text_match, str)
assert not derivation_text
assert not passphrase
private_key = bip32_key_from_string(text_match)
assert isinstance(private_key, PrivateKey)
optional_encrypted_xprv = None
if not watch_only:
assert password is not None
optional_encrypted_xprv = pw_encode(text_match, password)
bip32_data = {
"xpub": private_key.public_key.to_extended_key_string(),
"seed": None,
"passphrase": None,
"label": None,
"xprv": optional_encrypted_xprv,
}
return instantiate_keystore(derivation_type, bip32_data)
elif text_type == KeystoreTextType.PRIVATE_KEYS:
derivation_type = DerivationType.IMPORTED
assert not derivation_text
# watch_only?
elif text_type == KeystoreTextType.ADDRESSES:
derivation_type = DerivationType.IMPORTED
assert not derivation_text
# All address types have to be the same.
pass
elif text_type == KeystoreTextType.BIP39_SEED_WORDS:
derivation_type = DerivationType.BIP32
if derivation_text is None:
derivation_text = bip44_derivation_cointype(0, 0)
assert isinstance(text_match, str)
bip32_seed = BIP39Mnemonic.to_seed(text_match, passphrase)
data = bip32_master_key_data_from_seed(text_match, passphrase, bip32_seed, derivation_text,
password)
return instantiate_keystore(derivation_type, data)
elif text_type == KeystoreTextType.ELECTRUM_SEED_WORDS:
derivation_type = DerivationType.BIP32
assert not derivation_text
assert isinstance(text_match, str)
assert password is not None
derivation_text = "m"
bip32_seed = ElectrumMnemonic.new_to_seed(text_match, passphrase, compatible=True)
data = bip32_master_key_data_from_seed(text_match, passphrase, bip32_seed, derivation_text,
password)
return instantiate_keystore(derivation_type, data)
elif text_type == KeystoreTextType.ELECTRUM_OLD_SEED_WORDS:
derivation_type = DerivationType.ELECTRUM_OLD
assert isinstance(text_match, str)
assert not derivation_text
assert not passphrase
if ElectrumMnemonic.is_valid_old(text_match):
assert password is not None
try:
bytes.fromhex(text_match)
except ValueError:
hex_seed = ElectrumMnemonic.old_to_hex_seed(text_match)
else:
hex_seed = text_match
mpk = Old_KeyStore._mpk_from_hex_seed(hex_seed)
else:
watch_only = True
hex_seed = None
mpk = text_match
optional_encrypted_seed = None
if not watch_only:
assert hex_seed is not None
optional_encrypted_seed = pw_encode(hex_seed, password)
old_data: MasterKeyDataElectrumOld = {
"seed": optional_encrypted_seed,
"mpk": mpk,
}
return instantiate_keystore(derivation_type, old_data)
raise NotImplementedError("Unsupported text match type", text_type)
SignableKeystoreTypes = Union[Software_KeyStore, Hardware_KeyStore]
StandardKeystoreTypes = Union[Old_KeyStore, BIP32_KeyStore]
|
tests/validate/common/test_repository_topics_check.py | Chriserus/integration | 2,039 | 11171940 | import pytest
from custom_components.hacs.validate.repository_topics import Validator
@pytest.mark.asyncio
async def test_repository_no_topics(repository):
repository.data.topics = []
check = Validator(repository)
await check.execute_validation()
assert check.failed
@pytest.mark.asyncio
async def test_repository_hacs_topics(repository):
repository.data.topics = ["test"]
check = Validator(repository)
await check.execute_validation()
assert not check.failed
|
collection/cp/codelibrary-master/python/plot.py | daemonslayer/Notebook | 1,727 | 11171971 | <reponame>daemonslayer/Notebook
from cmath import sin
import matplotlib.pyplot as plt
plt.plot([sin(x) for x in range(100)])
plt.show()
|
tests/test_primitive_data/test_tag.py | amih90/bacpypes | 240 | 11171986 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Primitive Data Tag
-----------------------
"""
import unittest
from bacpypes.errors import InvalidTag
from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob, btox
from bacpypes.primitivedata import Tag, ApplicationTag, ContextTag, \
OpeningTag, ClosingTag, TagList, \
Null, Boolean, Unsigned, Integer, Real, Double, OctetString, \
CharacterString, BitString, Enumerated, Date, Time, ObjectIdentifier
from bacpypes.pdu import PDUData
# some debugging
_debug = 0
_log = ModuleLogger(globals())
def tag_tuple(tag):
"""Simple function to decompose a tag for debugging."""
return (tag.tagClass, tag.tagNumber, tag.tagLVT, tag.tagData)
@bacpypes_debugging
def IntegerTag(v):
"""Return an application encoded integer tag with the appropriate value.
"""
obj = Integer(v)
tag = Tag()
obj.encode(tag)
return tag
@bacpypes_debugging
def obj_decode(blob):
"""Build PDU from the string, decode the tag, convert to an object."""
if _debug: obj_decode._debug("obj_decode %r", blob)
data = PDUData(blob)
tag = Tag(data)
obj = tag.app_to_object()
return obj
@bacpypes_debugging
def obj_encode(obj):
"""Encode the object into a tag, encode it in a PDU, return the data."""
if _debug: obj_encode._debug("obj_encode %r", obj)
tag = Tag()
obj.encode(tag)
data = PDUData()
tag.encode(data)
return data.pduData
@bacpypes_debugging
def obj_endec(obj, x):
"""Convert the value (a primitive object) to a hex encoded string,
convert the hex encoded string to and object, and compare the results to
each other."""
if _debug: obj_endec._debug("obj_endec %r %r", obj, x)
# convert the hex string to a blob
blob = xtob(x)
# decode the blob into an object
obj2 = obj_decode(blob)
if _debug: obj_endec._debug(" - obj: %r, %r", obj, obj.value)
# encode the object into a blob
blob2 = obj_encode(obj)
if _debug: obj_endec._debug(" - blob2: %r", blob2)
# compare the results
assert obj == obj2
assert blob == blob2
@bacpypes_debugging
def context_decode(blob):
"""Build PDU from the string, decode the tag, convert to an object."""
if _debug: context_decode._debug("context_decode %r", blob)
data = PDUData(blob)
tag = ContextTag(data)
return tag
@bacpypes_debugging
def context_encode(tag):
"""Encode the object into a tag, encode it in a PDU, return the data."""
if _debug: context_encode._debug("context_encode %r", tag)
data = PDUData()
tag.encode(data)
return data.pduData
@bacpypes_debugging
def context_endec(tnum, x, y):
"""Convert the value (a primitive object) to a hex encoded string,
convert the hex encoded string to and object, and compare the results to
each other."""
if _debug: context_endec._debug("context_endec %r %r %r", tnum, x, y)
# convert the hex strings to a blobs
tdata = xtob(x)
blob1 = xtob(y)
# make a context tag
tag1 = ContextTag(tnum, tdata)
# decode the blob into a tag
tag2 = context_decode(blob1)
if _debug: context_endec._debug(" - tag: %r", tag)
# encode the tag into a blob
blob2 = context_encode(tag1)
if _debug: context_endec._debug(" - blob2: %r", blob2)
# compare the results
assert tag1 == tag2
assert blob1 == blob2
@bacpypes_debugging
def opening_decode(blob):
"""Build PDU from the string, decode the tag, convert to an object."""
if _debug: opening_decode._debug("opening_decode %r", blob)
data = PDUData(blob)
tag = OpeningTag(data)
return tag
@bacpypes_debugging
def opening_encode(tag):
"""Encode the object into a tag, encode it in a PDU, return the data."""
if _debug: opening_encode._debug("opening_encode %r", tag)
data = PDUData()
tag.encode(data)
return data.pduData
@bacpypes_debugging
def opening_endec(tnum, x):
"""Convert the value (a primitive object) to a hex encoded string,
convert the hex encoded string to and object, and compare the results to
each other."""
if _debug: opening_endec._debug("opening_endec %r %r", tnum, x)
# convert the hex string to a blob
blob1 = xtob(x)
# make a context tag
tag1 = OpeningTag(tnum)
if _debug: opening_endec._debug(" - tag1: %r", tag1)
# decode the blob into a tag
tag2 = opening_decode(blob1)
if _debug: opening_endec._debug(" - tag2: %r", tag2)
# encode the tag into a blob
blob2 = opening_encode(tag1)
if _debug: opening_endec._debug(" - blob2: %r", blob2)
# compare the results
assert tag1 == tag2
assert blob1 == blob2
@bacpypes_debugging
def closing_decode(blob):
"""Build PDU from the string, decode the tag, convert to an object."""
if _debug: closing_decode._debug("closing_decode %r", blob)
data = PDUData(blob)
tag = ClosingTag(data)
return tag
@bacpypes_debugging
def closing_encode(tag):
"""Encode the object into a tag, encode it in a PDU, return the data."""
if _debug: closing_encode._debug("closing_encode %r", tag)
data = PDUData()
tag.encode(data)
return data.pduData
@bacpypes_debugging
def closing_endec(tnum, x):
"""Convert the value (a primitive object) to a hex encoded string,
convert the hex encoded string to and object, and compare the results to
each other."""
if _debug: closing_endec._debug("closing_endec %r %r", tnum, x)
# convert the hex string to a blob
blob1 = xtob(x)
# make a context tag
tag1 = ClosingTag(tnum)
if _debug: closing_endec._debug(" - tag1: %r", tag1)
# decode the blob into a tag
tag2 = closing_decode(blob1)
if _debug: closing_endec._debug(" - tag2: %r", tag2)
# encode the tag into a blob
blob2 = closing_encode(tag1)
if _debug: closing_endec._debug(" - blob2: %r", blob2)
# compare the results
assert tag1 == tag2
assert blob1 == blob2
@bacpypes_debugging
def tag(tag_class, tag_number, x):
"""Create a tag object with the given class, number, and data."""
if _debug: tag._debug("tag %r %r %r", tag_class, tag_number, x)
b = xtob(x)
tag = Tag(tag_class, tag_number, len(b), b)
if _debug: tag_tuple._debug(" - tag: %r", tag)
return tag
@bacpypes_debugging
class TestTag(unittest.TestCase):
def test_tag(self):
if _debug: TestTag._debug("test_tag")
# test tag construction
tag = Tag()
assert (tag.tagClass, tag.tagNumber) == (None, None)
# must have a valid encoded tag to extract from the data
data = PDUData(xtob(''))
with self.assertRaises(InvalidTag):
tag = Tag(data)
# must have two values, class and number
with self.assertRaises(ValueError):
tag = Tag(0)
tag = Tag(0, 1)
assert (tag.tagClass, tag.tagNumber) == (0, 1)
tag = Tag(0, 1, 2)
assert (tag.tagClass, tag.tagNumber, tag.tagLVT) == (0, 1, 2)
# tag data must be bytes or bytearray
with self.assertRaises(TypeError):
tag = Tag(0, 1, 2, 3)
@bacpypes_debugging
class TestApplicationTag(unittest.TestCase):
def test_application_tag(self):
if _debug: TestApplicationTag._debug("test_application_tag")
# application tag construction, encoding, and decoding
tag = ApplicationTag(0, xtob(''))
if _debug: TestApplicationTag._debug(" - tag: %r", tag_tuple(tag))
with self.assertRaises(ValueError):
tag = ApplicationTag(0)
def test_generic_application_to_context(self):
if _debug: TestApplicationTag._debug("test_generic_application_to_context")
# create an application
tag = ApplicationTag(0, xtob('01'))
if _debug: TestApplicationTag._debug(" - tag: %r", tag_tuple(tag))
# convert it to context tagged, context 0
ctag = tag.app_to_context(0)
if _debug: TestApplicationTag._debug(" - ctag: %r", tag_tuple(ctag))
# create a context tag with the same shape
ttag = ContextTag(0, xtob('01'))
if _debug: TestApplicationTag._debug(" - ttag: %r", tag_tuple(ttag))
# check to see they are the same
assert ctag == ttag
# convert the context tag back to an application tag
dtag = ctag.context_to_app(0)
if _debug: TestApplicationTag._debug(" - dtag: %r", tag_tuple(dtag))
# check to see it round-tripped
assert dtag == tag
def test_boolean_application_to_context(self):
if _debug: TestApplicationTag._debug("test_boolean_application_to_context")
# create an application
tag = Tag(Tag.applicationTagClass, Tag.booleanAppTag, 0)
if _debug: TestApplicationTag._debug(" - tag: %r", tag_tuple(tag))
# convert it to context tagged, context 0
ctag = tag.app_to_context(0)
if _debug: TestApplicationTag._debug(" - ctag: %r", tag_tuple(ctag))
# create a context tag with the same shape
ttag = ContextTag(0, xtob('00'))
if _debug: TestApplicationTag._debug(" - ttag: %r", tag_tuple(ttag))
# check to see they are the same
assert ctag == ttag
# convert the context tag back to an application tag
dtag = ctag.context_to_app(Tag.booleanAppTag)
if _debug: TestApplicationTag._debug(" - dtag: %r", tag_tuple(dtag))
# check to see it round-tripped
assert dtag == tag
def test_boolean_application_to_object(self):
if _debug: TestApplicationTag._debug("test_boolean_application_to_object")
# null
obj_endec(Null(), '00')
# boolean
obj_endec(Boolean(True), '11')
obj_endec(Boolean(False), '10')
# unsigned
obj_endec(Unsigned(0), '2100')
obj_endec(Unsigned(1), '2101')
obj_endec(Unsigned(127), '217F')
obj_endec(Unsigned(128), '2180')
# integer
obj_endec(Integer(0), '3100')
obj_endec(Integer(1), '3101')
obj_endec(Integer(-1), '31FF')
obj_endec(Integer(128), '320080')
obj_endec(Integer(-128), '3180')
# real
obj_endec(Real(0), '4400000000')
obj_endec(Real(1), '443F800000')
obj_endec(Real(-1), '44BF800000')
obj_endec(Real(73.5), '4442930000')
# double
obj_endec(Double(0), '55080000000000000000')
obj_endec(Double(1), '55083FF0000000000000')
obj_endec(Double(-1), '5508BFF0000000000000')
obj_endec(Double(73.5), '55084052600000000000')
# octet string
obj_endec(OctetString(xtob('')), '60')
obj_endec(OctetString(xtob('01')), '6101')
obj_endec(OctetString(xtob('0102')), '620102')
obj_endec(OctetString(xtob('010203040506')), '6506010203040506')
# character string
obj_endec(CharacterString(''), '7100')
obj_endec(CharacterString('a'), '720061')
obj_endec(CharacterString('abcde'), '7506006162636465')
# bit string
obj_endec(BitString([]), '8100')
obj_endec(BitString([0]), '820700')
obj_endec(BitString([1]), '820780')
obj_endec(BitString([1, 1, 1, 1, 1]), '8203F8')
obj_endec(BitString([1] * 10), '8306FFC0')
# enumerated
obj_endec(Enumerated(0), '9100')
obj_endec(Enumerated(1), '9101')
obj_endec(Enumerated(127), '917F')
obj_endec(Enumerated(128), '9180')
# date
obj_endec(Date((1,2,3,4)), 'A401020304')
obj_endec(Date((255,2,3,4)), 'A4FF020304')
obj_endec(Date((1,255,3,4)), 'A401FF0304')
obj_endec(Date((1,2,255,4)), 'A40102FF04')
obj_endec(Date((1,2,3,255)), 'A4010203FF')
# time
obj_endec(Time((1,2,3,4)), 'B401020304')
obj_endec(Time((255,2,3,4)), 'B4FF020304')
obj_endec(Time((1,255,3,4)), 'B401FF0304')
obj_endec(Time((1,2,255,4)), 'B40102FF04')
obj_endec(Time((1,2,3,255)), 'B4010203FF')
# object identifier
obj_endec(ObjectIdentifier(0,0), 'C400000000')
obj_endec(ObjectIdentifier(1,0), 'C400400000')
obj_endec(ObjectIdentifier(0,2), 'C400000002')
obj_endec(ObjectIdentifier(3,4), 'C400C00004')
@bacpypes_debugging
class TestContextTag(unittest.TestCase):
def test_context_tag(self):
if _debug: TestContextTag._debug("test_context_tag")
# test context tag construction
tag = ContextTag(0, xtob(''))
with self.assertRaises(ValueError):
tag = ContextTag()
# test encoding and decoding
context_endec(0, '', '08')
context_endec(1, '01', '1901')
context_endec(2, '0102', '2A0102')
context_endec(3, '010203', '3B010203')
context_endec(14, '010203', 'EB010203')
context_endec(15, '010203', 'FB0F010203')
@bacpypes_debugging
class TestOpeningTag(unittest.TestCase):
def test_opening_tag(self):
if _debug: TestOpeningTag._debug("test_opening_tag")
# test opening tag construction
tag = OpeningTag(0)
with self.assertRaises(TypeError):
tag = OpeningTag()
# test encoding, and decoding
opening_endec(0, '0E')
opening_endec(1, '1E')
opening_endec(2, '2E')
opening_endec(3, '3E')
opening_endec(14, 'EE')
opening_endec(15, 'FE0F')
opening_endec(254, 'FEFE')
@bacpypes_debugging
class TestClosingTag(unittest.TestCase):
def test_closing_tag(self):
if _debug: TestClosingTag._debug("test_closing_tag")
# test closing tag construction
tag = ClosingTag(0)
with self.assertRaises(TypeError):
tag = ClosingTag()
# test encoding, and decoding
closing_endec(0, '0F')
closing_endec(1, '1F')
closing_endec(2, '2F')
closing_endec(3, '3F')
closing_endec(14, 'EF')
closing_endec(15, 'FF0F')
closing_endec(254, 'FFFE')
@bacpypes_debugging
class TestTagList(unittest.TestCase):
def test_tag_list(self):
if _debug: TestTagList._debug("test_tag_list")
# test tag list construction
tag_list = TagList()
tag_list = TagList([])
def test_peek(self):
if _debug: TestTagList._debug("test_peek")
tag0 = IntegerTag(0)
taglist = TagList([tag0])
# peek at the first tag
assert tag0 == taglist.Peek()
# pop of the front
tag1 = taglist.Pop()
assert taglist.tagList == []
# push it back on the front
taglist.push(tag1)
assert taglist.tagList == [tag1]
def test_get_context(self):
"""Test extracting specific context encoded content.
"""
if _debug: TestTagList._debug("test_get_context")
tag_list_data = [
ContextTag(0, xtob('00')),
ContextTag(1, xtob('01')),
OpeningTag(2),
IntegerTag(3),
OpeningTag(0),
IntegerTag(4),
ClosingTag(0),
ClosingTag(2),
]
taglist = TagList(tag_list_data)
# known to be a simple context encoded element
context_0 = taglist.get_context(0)
if _debug: TestTagList._debug(" - context_0: %r", context_0)
assert context_0 == tag_list_data[0]
# known to be a simple context encoded list of element(s)
context_2 = taglist.get_context(2)
if _debug: TestTagList._debug(" - context_2: %r", context_2)
assert context_2.tagList == tag_list_data[3:7]
# known missing context
context_3 = taglist.get_context(3)
if _debug: TestTagList._debug(" - context_3: %r", context_3)
assert taglist.get_context(3) is None
def test_endec_0(self):
"""Test empty tag list encoding and decoding.
"""
if _debug: TestTagList._debug("test_endec_0")
taglist = TagList([])
data = PDUData()
taglist.encode(data)
assert data.pduData == xtob('')
taglist = TagList()
taglist.decode(data)
assert taglist.tagList == []
def test_endec_1(self):
"""Test short tag list encoding and decoding, application tags.
"""
if _debug: TestTagList._debug("test_endec_1")
tag0 = IntegerTag(0x00)
tag1 = IntegerTag(0x01)
taglist = TagList([tag0, tag1])
data = PDUData()
taglist.encode(data)
assert data.pduData == xtob('31003101')
taglist = TagList()
taglist.decode(data)
assert taglist.tagList == [tag0, tag1]
def test_endec_2(self):
"""Test short tag list encoding and decoding, context tags.
"""
if _debug: TestTagList._debug("test_endec_2")
tag0 = ContextTag(0, xtob('00'))
tag1 = ContextTag(1, xtob('01'))
taglist = TagList([tag0, tag1])
data = PDUData()
taglist.encode(data)
assert data.pduData == xtob('09001901')
taglist = TagList()
taglist.decode(data)
assert taglist.tagList == [tag0, tag1]
def test_endec_3(self):
"""Test bracketed application tagged integer encoding and decoding."""
if _debug: TestTagList._debug("test_endec_2")
tag0 = OpeningTag(0)
tag1 = IntegerTag(0x0102)
tag2 = ClosingTag(0)
taglist = TagList([tag0, tag1, tag2])
data = PDUData()
taglist.encode(data)
assert data.pduData == xtob('0E3201020F')
taglist = TagList()
taglist.decode(data)
assert taglist.tagList == [tag0, tag1, tag2] |
tests/unit/profiles/test_plone.py | jdufresne/isort | 3,043 | 11171987 | from functools import partial
from ..utils import isort_test
plone_isort_test = partial(isort_test, profile="plone")
def test_plone_code_snippet_one():
plone_isort_test(
"""# -*- coding: utf-8 -*-
from plone.app.multilingual.testing import PLONE_APP_MULTILINGUAL_PRESET_FIXTURE # noqa
from plone.app.robotframework.testing import REMOTE_LIBRARY_BUNDLE_FIXTURE
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PloneWithPackageLayer
from plone.testing import z2
import plone.app.multilingualindexes
PAMI_FIXTURE = PloneWithPackageLayer(
bases=(PLONE_APP_MULTILINGUAL_PRESET_FIXTURE,),
name="PAMILayer:Fixture",
gs_profile_id="plone.app.multilingualindexes:default",
zcml_package=plone.app.multilingualindexes,
zcml_filename="configure.zcml",
additional_z2_products=["plone.app.multilingualindexes"],
)
"""
)
def test_plone_code_snippet_two():
plone_isort_test(
"""# -*- coding: utf-8 -*-
from Acquisition import aq_base
from App.class_init import InitializeClass
from App.special_dtml import DTMLFile
from BTrees.OOBTree import OOTreeSet
from logging import getLogger
from plone import api
from plone.app.multilingual.events import ITranslationRegisteredEvent
from plone.app.multilingual.interfaces import ITG
from plone.app.multilingual.interfaces import ITranslatable
from plone.app.multilingual.interfaces import ITranslationManager
from plone.app.multilingualindexes.utils import get_configuration
from plone.indexer.interfaces import IIndexableObject
from Products.CMFPlone.utils import safe_hasattr
from Products.DateRecurringIndex.index import DateRecurringIndex
from Products.PluginIndexes.common.UnIndex import UnIndex
from Products.ZCatalog.Catalog import Catalog
from ZODB.POSException import ConflictError
from zope.component import getMultiAdapter
from zope.component import queryAdapter
from zope.globalrequest import getRequest
logger = getLogger(__name__)
"""
)
def test_plone_code_snippet_three():
plone_isort_test(
"""# -*- coding: utf-8 -*-
from plone.app.querystring.interfaces import IQueryModifier
from zope.interface import provider
import logging
logger = logging.getLogger(__name__)
"""
)
|
Python/Tests/TestData/Grammar/Literals.py | techkey/PTVS | 695 | 11172022 | <gh_stars>100-1000
"abc"
r"raw string"
R"raw string"
"""abc"""
r"""raw string"""
R"""raw string"""
'abc'
r'raw string'
R'raw string'
'''abc'''
r'''raw string'''
R'''raw string'''
1000
2147483647
3.14
10.
.001
1e100
3.14e-10
0e0
3.14j
10.j
10j
.001j
1e100j
3.14e-10j
-2147483648
-100 |
release/stubs.min/System/Windows/Forms/__init___parts/ToolStripSystemRenderer.py | htlcnn/ironpython-stubs | 182 | 11172027 | <reponame>htlcnn/ironpython-stubs<gh_stars>100-1000
class ToolStripSystemRenderer(ToolStripRenderer):
"""
Handles the painting functionality for System.Windows.Forms.ToolStrip objects,using system colors and a flat visual style.
ToolStripSystemRenderer()
"""
|
cliport/tasks/__init__.py | wx-b/cliport | 110 | 11172050 | """Ravens tasks."""
from cliport.tasks.align_box_corner import AlignBoxCorner
from cliport.tasks.assembling_kits import AssemblingKits
from cliport.tasks.assembling_kits import AssemblingKitsEasy
from cliport.tasks.assembling_kits_seq import AssemblingKitsSeqSeenColors
from cliport.tasks.assembling_kits_seq import AssemblingKitsSeqUnseenColors
from cliport.tasks.assembling_kits_seq import AssemblingKitsSeqFull
from cliport.tasks.block_insertion import BlockInsertion
from cliport.tasks.block_insertion import BlockInsertionEasy
from cliport.tasks.block_insertion import BlockInsertionNoFixture
from cliport.tasks.block_insertion import BlockInsertionSixDof
from cliport.tasks.block_insertion import BlockInsertionTranslation
from cliport.tasks.manipulating_rope import ManipulatingRope
from cliport.tasks.align_rope import AlignRope
from cliport.tasks.packing_boxes import PackingBoxes
from cliport.tasks.packing_shapes import PackingShapes
from cliport.tasks.packing_boxes_pairs import PackingBoxesPairsSeenColors
from cliport.tasks.packing_boxes_pairs import PackingBoxesPairsUnseenColors
from cliport.tasks.packing_boxes_pairs import PackingBoxesPairsFull
from cliport.tasks.packing_google_objects import PackingSeenGoogleObjectsSeq
from cliport.tasks.packing_google_objects import PackingUnseenGoogleObjectsSeq
from cliport.tasks.packing_google_objects import PackingSeenGoogleObjectsGroup
from cliport.tasks.packing_google_objects import PackingUnseenGoogleObjectsGroup
from cliport.tasks.palletizing_boxes import PalletizingBoxes
from cliport.tasks.place_red_in_green import PlaceRedInGreen
from cliport.tasks.put_block_in_bowl import PutBlockInBowlSeenColors
from cliport.tasks.put_block_in_bowl import PutBlockInBowlUnseenColors
from cliport.tasks.put_block_in_bowl import PutBlockInBowlFull
from cliport.tasks.stack_block_pyramid import StackBlockPyramid
from cliport.tasks.stack_block_pyramid_seq import StackBlockPyramidSeqSeenColors
from cliport.tasks.stack_block_pyramid_seq import StackBlockPyramidSeqUnseenColors
from cliport.tasks.stack_block_pyramid_seq import StackBlockPyramidSeqFull
from cliport.tasks.sweeping_piles import SweepingPiles
from cliport.tasks.separating_piles import SeparatingPilesSeenColors
from cliport.tasks.separating_piles import SeparatingPilesUnseenColors
from cliport.tasks.separating_piles import SeparatingPilesFull
from cliport.tasks.task import Task
from cliport.tasks.towers_of_hanoi import TowersOfHanoi
from cliport.tasks.towers_of_hanoi_seq import TowersOfHanoiSeqSeenColors
from cliport.tasks.towers_of_hanoi_seq import TowersOfHanoiSeqUnseenColors
from cliport.tasks.towers_of_hanoi_seq import TowersOfHanoiSeqFull
names = {
# demo conditioned
'align-box-corner': AlignBoxCorner,
'assembling-kits': AssemblingKits,
'assembling-kits-easy': AssemblingKitsEasy,
'block-insertion': BlockInsertion,
'block-insertion-easy': BlockInsertionEasy,
'block-insertion-nofixture': BlockInsertionNoFixture,
'block-insertion-sixdof': BlockInsertionSixDof,
'block-insertion-translation': BlockInsertionTranslation,
'manipulating-rope': ManipulatingRope,
'packing-boxes': PackingBoxes,
'palletizing-boxes': PalletizingBoxes,
'place-red-in-green': PlaceRedInGreen,
'stack-block-pyramid': StackBlockPyramid,
'sweeping-piles': SweepingPiles,
'towers-of-hanoi': TowersOfHanoi,
# goal conditioned
'align-rope': AlignRope,
'assembling-kits-seq-seen-colors': AssemblingKitsSeqSeenColors,
'assembling-kits-seq-unseen-colors': AssemblingKitsSeqUnseenColors,
'assembling-kits-seq-full': AssemblingKitsSeqFull,
'packing-shapes': PackingShapes,
'packing-boxes-pairs-seen-colors': PackingBoxesPairsSeenColors,
'packing-boxes-pairs-unseen-colors': PackingBoxesPairsUnseenColors,
'packing-boxes-pairs-full': PackingBoxesPairsFull,
'packing-seen-google-objects-seq': PackingSeenGoogleObjectsSeq,
'packing-unseen-google-objects-seq': PackingUnseenGoogleObjectsSeq,
'packing-seen-google-objects-group': PackingSeenGoogleObjectsGroup,
'packing-unseen-google-objects-group': PackingUnseenGoogleObjectsGroup,
'put-block-in-bowl-seen-colors': PutBlockInBowlSeenColors,
'put-block-in-bowl-unseen-colors': PutBlockInBowlUnseenColors,
'put-block-in-bowl-full': PutBlockInBowlFull,
'stack-block-pyramid-seq-seen-colors': StackBlockPyramidSeqSeenColors,
'stack-block-pyramid-seq-unseen-colors': StackBlockPyramidSeqUnseenColors,
'stack-block-pyramid-seq-full': StackBlockPyramidSeqFull,
'separating-piles-seen-colors': SeparatingPilesSeenColors,
'separating-piles-unseen-colors': SeparatingPilesUnseenColors,
'separating-piles-full': SeparatingPilesFull,
'towers-of-hanoi-seq-seen-colors': TowersOfHanoiSeqSeenColors,
'towers-of-hanoi-seq-unseen-colors': TowersOfHanoiSeqUnseenColors,
'towers-of-hanoi-seq-full': TowersOfHanoiSeqFull,
}
|
challenge_9/python/system123/challenge_9.py | YearOfProgramming/2017Challenges | 271 | 11172053 | <filename>challenge_9/python/system123/challenge_9.py<gh_stars>100-1000
from collections import deque
def squared_sort(input_list):
negs = deque([])
pos = deque([])
sorted = []
for x in input_list: #O(n)
if x < 0:
negs.append(x**2)
else:
pos.append(x**2)
n = None
p = None
for i in range(0, len(input_list)): #O(n)
if pos and p == None:
p = pos.popleft()
if negs and n == None:
n = negs.pop()
if ((n != None) and (n <= p)) or (p == None):
sorted.append(n)
n = None
else:
sorted.append(p)
p = None
return(sorted)
if __name__ == "__main__":
print(squared_sort(input()))
|
tests/test_remote.py | irina694/earth-science-notebook | 1,076 | 11172080 | <reponame>irina694/earth-science-notebook
import pytest
from geonotebook.kernel import Remote
@pytest.fixture
def remote(mocker):
protocols = [{'procedure': 'no_args',
'required': [],
'optional': []},
{'procedure': 'required_only',
'required': [{"key": "a"}, {"key": "b"}],
'optional': []},
{'procedure': 'optional_only',
'required': [],
'optional': [{"key": "x"}, {"key": "y"}, {"key": "z"}]},
{'procedure': 'required_and_optional',
'required': [{"key": "a"}, {"key": "b"}],
'optional': [{"key": "x"}, {"key": "y"}, {"key": "z"}]}]
r = Remote(None, protocols)
# Mock out the UUID.uuid4 function to return a consistent ID for testing
mocker.patch('geonotebook.jsonrpc.uuid.uuid4', return_value='TEST-ID')
mocker.patch.object(r, '_send_msg')
return r
def test_remote_bad_protocol():
with pytest.raises(AssertionError):
Remote(None, ['foo', 'bar'])
def test_remote_bad_protocol_missing_procedure():
with pytest.raises(AssertionError):
Remote(None, [{'required': [],
'optional': []}])
def test_remote_bad_protocol_missing_required():
with pytest.raises(AssertionError):
Remote(None, [{'procedure': 'no_args',
'optional': []}])
def test_remote_bad_protocol_missing_optional():
with pytest.raises(AssertionError):
Remote(None, [{'procedure': 'no_args',
'required': []}])
def test_remote_init(remote):
assert hasattr(remote.no_args, '__call__')
assert hasattr(remote.required_only, '__call__')
assert hasattr(remote.required_and_optional, '__call__')
def test_remote_call_no_args(remote):
remote.no_args()
assert remote._send_msg.call_count == 1
remote._send_msg.assert_called_with({'jsonrpc': '2.0', 'params': [],
'method': 'no_args', 'id': 'TEST-ID'})
def test_remote_call_no_args_with_args(remote):
with pytest.raises(AssertionError):
remote.no_args('foo', 'bar')
def test_remote_call_required_only(remote):
remote.required_only('foo', 'bar')
assert remote._send_msg.call_count == 1
remote._send_msg.assert_called_with({
'jsonrpc': '2.0', 'params': [{'key': 'a',
'value': 'foo',
'required': True},
{'key': 'b',
'value': 'bar',
'required': True}],
'method': 'required_only', 'id': 'TEST-ID'})
def test_remote_call_required_only_with_too_few_args(remote):
with pytest.raises(AssertionError):
remote.required_only('foo')
def test_remote_call_required_only_with_too_many_args(remote):
with pytest.raises(AssertionError):
remote.no_args('foo', 'bar', 'baz')
def test_remote_call_optional_only(remote):
remote.optional_only(x='foo', y='bar', z='baz')
assert remote._send_msg.call_count == 1
remote._send_msg.assert_called_with({
'jsonrpc': '2.0',
'params': [{'key': 'x', 'value': 'foo', 'required': False},
{'key': 'y', 'value': 'bar', 'required': False},
{'key': 'z', 'value': 'baz', 'required': False}],
'method': 'optional_only', 'id': 'TEST-ID'})
remote.optional_only()
assert remote._send_msg.call_count == 2
remote._send_msg.assert_called_with({
'jsonrpc': '2.0', 'params': [],
'method': 'optional_only', 'id': 'TEST-ID'})
def test_remote_call_optional_only_missing_arguments(remote):
remote.optional_only(x='foo', z='bar')
assert remote._send_msg.call_count == 1
remote._send_msg.assert_called_with({
'jsonrpc': '2.0',
'params': [{'key': 'x', 'value': 'foo', 'required': False},
{'key': 'z', 'value': 'bar', 'required': False}],
'method': 'optional_only', 'id': 'TEST-ID'})
def test_remote_promise_resolve_success(remote):
class Nonlocal(object):
pass
def success(val):
Nonlocal.result = val
def error(val):
Nonlocal.result = val
remote.no_args().then(success, error)
remote.resolve({'id': 'TEST-ID', 'result': 'SUCCESS', 'error': None})
assert Nonlocal.result == 'SUCCESS'
def test_remote_promise_resolve_error(remote):
class Nonlocal(object):
pass
def success(val):
Nonlocal.result = val
def error(val):
Nonlocal.result = val
remote.no_args().then(success, error)
remote.resolve({'id': 'TEST-ID', 'result': None, 'error': 'ERROR'})
assert isinstance(Nonlocal.result, Exception)
assert str(Nonlocal.result) == "ERROR"
@pytest.mark.skip(reason="See: geonotebook/issues/46")
def test_remote_promise_resolve_with_bad_message(r, mocker):
class Nonlocal(object):
pass
def success(val):
Nonlocal.result = val
def error(val):
Nonlocal.result = val
remote.no_args().then(success, error)
with pytest.raises(Exception):
remote.resolve('bad message')
remote.no_args().then(success, error)
with pytest.raises(Exception):
remote.resolve({'id': 'TEST-ID', 'bad': 'message'})
remote.no_args().then(success, error)
# warn = mockeremote.patch.object(remote.log, 'warn')
remote.resolve({'id': 'BAD-ID'})
# assert warn.called_once == 1
|
tests/utils/test_pydantic.py | 0scarB/piccolo | 750 | 11172091 | <filename>tests/utils/test_pydantic.py<gh_stars>100-1000
import decimal
from unittest import TestCase
import pydantic
from pydantic import ValidationError
from piccolo.columns import JSON, JSONB, Array, Numeric, Secret, Text, Varchar
from piccolo.columns.column_types import ForeignKey
from piccolo.table import Table
from piccolo.utils.pydantic import create_pydantic_model
class TestVarcharColumn(TestCase):
def test_varchar_length(self):
class Director(Table):
name = Varchar(length=10)
pydantic_model = create_pydantic_model(table=Director)
with self.assertRaises(ValidationError):
pydantic_model(name="This is a really long name")
pydantic_model(name="short name")
class TestNumericColumn(TestCase):
"""
Numeric and Decimal are the same - so we'll just Numeric.
"""
def test_numeric_digits(self):
class Movie(Table):
box_office = Numeric(digits=(5, 1))
pydantic_model = create_pydantic_model(table=Movie)
with self.assertRaises(ValidationError):
# This should fail as there are too much numbers after the decimal
# point
pydantic_model(box_office=decimal.Decimal("1.11"))
with self.assertRaises(ValidationError):
# This should fail as there are too much numbers in total
pydantic_model(box_office=decimal.Decimal("11111.1"))
pydantic_model(box_office=decimal.Decimal("1.0"))
def test_numeric_without_digits(self):
class Movie(Table):
box_office = Numeric()
try:
create_pydantic_model(table=Movie)
except TypeError:
self.fail(
"Creating numeric field without"
" digits failed in pydantic model."
)
else:
self.assertTrue(True)
class TestSecretColumn(TestCase):
def test_secret_param(self):
class TopSecret(Table):
confidential = Secret()
pydantic_model = create_pydantic_model(table=TopSecret)
self.assertEqual(
pydantic_model.schema()["properties"]["confidential"]["extra"][
"secret"
],
True,
)
class TestArrayColumn(TestCase):
def test_array_param(self):
class Band(Table):
members = Array(base_column=Varchar(length=16))
pydantic_model = create_pydantic_model(table=Band)
self.assertEqual(
pydantic_model.schema()["properties"]["members"]["items"]["type"],
"string",
)
class TestTextColumn(TestCase):
def test_text_format(self):
class Band(Table):
bio = Text()
pydantic_model = create_pydantic_model(table=Band)
self.assertEqual(
pydantic_model.schema()["properties"]["bio"]["format"],
"text-area",
)
class TestColumnHelpText(TestCase):
"""
Make sure that columns with `help_text` attribute defined have the
relevant text appear in the schema.
"""
def test_help_text_present(self):
help_text = "In millions of US dollars."
class Movie(Table):
box_office = Numeric(digits=(5, 1), help_text=help_text)
pydantic_model = create_pydantic_model(table=Movie)
self.assertEqual(
pydantic_model.schema()["properties"]["box_office"]["extra"][
"help_text"
],
help_text,
)
class TestTableHelpText(TestCase):
"""
Make sure that tables with `help_text` attribute defined have the
relevant text appear in the schema.
"""
def test_help_text_present(self):
help_text = "Movies which were released in cinemas."
class Movie(Table, help_text=help_text):
name = Varchar()
pydantic_model = create_pydantic_model(table=Movie)
self.assertEqual(
pydantic_model.schema()["help_text"],
help_text,
)
class TestJSONColumn(TestCase):
def test_default(self):
class Movie(Table):
meta = JSON()
meta_b = JSONB()
pydantic_model = create_pydantic_model(table=Movie)
json_string = '{"code": 12345}'
model_instance = pydantic_model(meta=json_string, meta_b=json_string)
self.assertEqual(model_instance.meta, json_string)
self.assertEqual(model_instance.meta_b, json_string)
def test_deserialize_json(self):
class Movie(Table):
meta = JSON()
meta_b = JSONB()
pydantic_model = create_pydantic_model(
table=Movie, deserialize_json=True
)
json_string = '{"code": 12345}'
output = {"code": 12345}
model_instance = pydantic_model(meta=json_string, meta_b=json_string)
self.assertEqual(model_instance.meta, output)
self.assertEqual(model_instance.meta_b, output)
def test_validation(self):
class Movie(Table):
meta = JSON()
meta_b = JSONB()
for deserialize_json in (True, False):
pydantic_model = create_pydantic_model(
table=Movie, deserialize_json=deserialize_json
)
json_string = "error"
with self.assertRaises(pydantic.ValidationError):
pydantic_model(meta=json_string, meta_b=json_string)
def test_json_format(self):
class Movie(Table):
features = JSON()
pydantic_model = create_pydantic_model(table=Movie)
self.assertEqual(
pydantic_model.schema()["properties"]["features"]["format"],
"json",
)
class TestExcludeColumn(TestCase):
def test_all(self):
class Computer(Table):
CPU = Varchar()
GPU = Varchar()
pydantic_model = create_pydantic_model(Computer, exclude_columns=())
properties = pydantic_model.schema()["properties"]
self.assertIsInstance(properties["GPU"], dict)
self.assertIsInstance(properties["CPU"], dict)
def test_exclude(self):
class Computer(Table):
CPU = Varchar()
GPU = Varchar()
pydantic_model = create_pydantic_model(
Computer,
exclude_columns=(Computer.CPU,),
)
properties = pydantic_model.schema()["properties"]
self.assertIsInstance(properties.get("GPU"), dict)
self.assertIsNone(properties.get("CPU"))
def test_exclude_all_manually(self):
class Computer(Table):
GPU = Varchar()
CPU = Varchar()
pydantic_model = create_pydantic_model(
Computer,
exclude_columns=(Computer.GPU, Computer.CPU),
)
self.assertEqual(pydantic_model.schema()["properties"], {})
def test_exclude_all_meta(self):
class Computer(Table):
GPU = Varchar()
CPU = Varchar()
pydantic_model = create_pydantic_model(
Computer,
exclude_columns=tuple(Computer._meta.columns),
)
self.assertEqual(pydantic_model.schema()["properties"], {})
def test_invalid_column_str(self):
class Computer(Table):
CPU = Varchar()
GPU = Varchar()
with self.assertRaises(ValueError):
create_pydantic_model(
Computer,
exclude_columns=("CPU",),
)
def test_invalid_column_different_table(self):
class Computer(Table):
CPU = Varchar()
GPU = Varchar()
class Computer2(Table):
SSD = Varchar()
with self.assertRaises(ValueError):
create_pydantic_model(Computer, exclude_columns=(Computer2.SSD,))
def test_invalid_column_different_table_same_type(self):
class Computer(Table):
CPU = Varchar()
GPU = Varchar()
class Computer2(Table):
CPU = Varchar()
with self.assertRaises(ValueError):
create_pydantic_model(Computer, exclude_columns=(Computer2.CPU,))
class TestNestedModel(TestCase):
def test_nested_models(self):
class Country(Table):
name = Varchar(length=10)
class Director(Table):
name = Varchar(length=10)
country = ForeignKey(Country)
class Movie(Table):
name = Varchar(length=10)
director = ForeignKey(Director)
MovieModel = create_pydantic_model(table=Movie, nested=True)
#######################################################################
DirectorModel = MovieModel.__fields__["director"].type_
self.assertTrue(issubclass(DirectorModel, pydantic.BaseModel))
director_model_keys = [i for i in DirectorModel.__fields__.keys()]
self.assertEqual(director_model_keys, ["name", "country"])
#######################################################################
CountryModel = DirectorModel.__fields__["country"].type_
self.assertTrue(issubclass(CountryModel, pydantic.BaseModel))
country_model_keys = [i for i in CountryModel.__fields__.keys()]
self.assertEqual(country_model_keys, ["name"])
def test_cascaded_args(self):
"""
Make sure that arguments passed to ``create_pydantic_model`` are
cascaded to nested models.
"""
class Country(Table):
name = Varchar(length=10)
class Director(Table):
name = Varchar(length=10)
country = ForeignKey(Country)
class Movie(Table):
name = Varchar(length=10)
director = ForeignKey(Director)
MovieModel = create_pydantic_model(
table=Movie, nested=True, include_default_columns=True
)
#######################################################################
DirectorModel = MovieModel.__fields__["director"].type_
self.assertTrue(issubclass(DirectorModel, pydantic.BaseModel))
director_model_keys = [i for i in DirectorModel.__fields__.keys()]
self.assertEqual(director_model_keys, ["id", "name", "country"])
#######################################################################
CountryModel = DirectorModel.__fields__["country"].type_
self.assertTrue(issubclass(CountryModel, pydantic.BaseModel))
country_model_keys = [i for i in CountryModel.__fields__.keys()]
self.assertEqual(country_model_keys, ["id", "name"])
class TestDBColumnName(TestCase):
def test_db_column_name(self):
"""
Make sure that the Pydantic model has an alias if ``db_column_name``
is specified for a column.
"""
class Band(Table):
name = Varchar(db_column_name="regrettable_column_name")
BandModel = create_pydantic_model(table=Band)
model = BandModel(regrettable_column_name="test")
self.assertTrue(model.name == "test")
|
Alignment/CommonAlignmentProducer/python/ALCARECOTkAlCosmicsInCollisions_Output_cff.py | ckamtsikis/cmssw | 852 | 11172098 | # Author : <NAME>
# Date : July 1st, 2010
# last update: $Date: 2010/07/06 11:48:22 $ by $Author: mussgill $
import FWCore.ParameterSet.Config as cms
# AlCaReco for track based alignment using Cosmic muon events
OutALCARECOTkAlCosmicsInCollisions_noDrop = cms.PSet(
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOTkAlCosmicsInCollisions')
),
outputCommands = cms.untracked.vstring(
'keep *_ALCARECOTkAlCosmicsInCollisions_*_*',
'keep siStripDigis_DetIdCollection_*_*',
'keep L1AcceptBunchCrossings_*_*_*',
'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',
'keep *_TriggerResults_*_*',
'keep DcsStatuss_scalersRawToDigi_*_*',
#'keep Si*Cluster*_si*Clusters_*_*', # for cosmics keep original clusters
'keep SiPixelCluster*_siPixelClusters_*_*', # for cosmics keep original clusters
'keep SiStripCluster*_siStripClusters_*_*', # for cosmics keep original clusters
'keep recoMuons_muons1Leg_*_*') # save muons as timing info is needed for BP corrections in deconvolution
)
import copy
OutALCARECOTkAlCosmicsInCollisions = copy.deepcopy(OutALCARECOTkAlCosmicsInCollisions_noDrop)
OutALCARECOTkAlCosmicsInCollisions.outputCommands.insert(0, "drop *")
|
pottery/executor.py | brainix/pottery | 625 | 11172126 | <filename>pottery/executor.py
# --------------------------------------------------------------------------- #
# executor.py #
# #
# Copyright © 2015-2021, <NAME>, original author. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at: #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# --------------------------------------------------------------------------- #
import concurrent.futures
from types import TracebackType
from typing import Optional
from typing import Type
from typing import overload
from typing_extensions import Literal
class BailOutExecutor(concurrent.futures.ThreadPoolExecutor):
'''ThreadPoolExecutor subclass that doesn't wait for futures on .__exit__().
The beating heart of all consensus based distributed algorithms is to
scatter a computation across multiple nodes, then to gather their results,
then to evaluate whether quorum is achieved.
In some cases, quorum requires gathering all of the nodes' results (e.g.,
interrogating all nodes for a maximum value for a variable).
But in other cases, quorum requires gathering only n // 2 + 1 nodes'
results (e.g., figuring out if > 50% of nodes believe that I'm the owner of
a lock).
In the latter case, the desired behavior is for the executor to bail out
early returning control to the main thread as soon as quorum is achieved,
while still allowing pending in-flight futures to complete in backgound
threads. Python's ThreadPoolExecutor's .__exit__() method waits for
pending futures to complete before returning control to the main thread,
preventing bail out:
https://github.com/python/cpython/blob/212337369a64aa96d8b370f39b70113078ad0020/Lib/concurrent/futures/_base.py
https://docs.python.org/3.9/library/concurrent.futures.html#concurrent.futures.Executor.shutdown
This subclass overrides .__exit__() to not wait for pending futures to
complete before returning control to the main thread, allowing bail out.
'''
@overload
def __exit__(self,
exc_type: None,
exc_value: None,
exc_traceback: None,
) -> Literal[False]:
raise NotImplementedError
@overload
def __exit__(self,
exc_type: Type[BaseException],
exc_value: BaseException,
exc_traceback: TracebackType,
) -> Literal[False]:
raise NotImplementedError
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
exc_traceback: Optional[TracebackType],
) -> Literal[False]:
self.shutdown(wait=False)
return False
|
lib/python/treadmill/appenv/__init__.py | vrautela/treadmill | 133 | 11172163 | <filename>lib/python/treadmill/appenv/__init__.py<gh_stars>100-1000
"""Treadmill application environment.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
if os.name == 'nt':
from ._windows import WindowsAppEnvironment as AppEnvironment
else:
from ._linux import LinuxAppEnvironment as AppEnvironment
__all__ = ['AppEnvironment']
|
zcls/model/layers/dbb_util.py | ZJCV/PyCls | 110 | 11172168 | <filename>zcls/model/layers/dbb_util.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
@date: 2021/7/28 下午5:50
@file: ddb_util.py
@author: zj
@description:
"""
from .dbb_transforms import transI_fusebn, transII_addbranch, transIII_1x1_kxk, \
transVI_multiscale, transV_avg, transVI_multiscale
from .diverse_branch_block import DiverseBranchBlock
def get_equivalent_kernel_bias(module):
assert isinstance(module, DiverseBranchBlock)
k_origin, b_origin = transI_fusebn(module.dbb_origin.conv.weight, module.dbb_origin.bn)
if hasattr(module, 'dbb_1x1'):
k_1x1, b_1x1 = transI_fusebn(module.dbb_1x1.conv.weight, module.dbb_1x1.bn)
k_1x1 = transVI_multiscale(k_1x1, module.kernel_size)
else:
k_1x1, b_1x1 = 0, 0
if hasattr(module.dbb_1x1_kxk, 'idconv1'):
k_1x1_kxk_first = module.dbb_1x1_kxk.idconv1.get_actual_kernel()
else:
k_1x1_kxk_first = module.dbb_1x1_kxk.conv1.weight
k_1x1_kxk_first, b_1x1_kxk_first = transI_fusebn(k_1x1_kxk_first, module.dbb_1x1_kxk.bn1)
k_1x1_kxk_second, b_1x1_kxk_second = transI_fusebn(module.dbb_1x1_kxk.conv2.weight, module.dbb_1x1_kxk.bn2)
k_1x1_kxk_merged, b_1x1_kxk_merged = transIII_1x1_kxk(k_1x1_kxk_first, b_1x1_kxk_first, k_1x1_kxk_second,
b_1x1_kxk_second, groups=module.groups)
k_avg = transV_avg(module.out_channels, module.kernel_size, module.groups)
k_1x1_avg_second, b_1x1_avg_second = transI_fusebn(k_avg.to(module.dbb_avg.avgbn.weight.device),
module.dbb_avg.avgbn)
if hasattr(module.dbb_avg, 'conv'):
k_1x1_avg_first, b_1x1_avg_first = transI_fusebn(module.dbb_avg.conv.weight, module.dbb_avg.bn)
k_1x1_avg_merged, b_1x1_avg_merged = transIII_1x1_kxk(k_1x1_avg_first, b_1x1_avg_first, k_1x1_avg_second,
b_1x1_avg_second, groups=module.groups)
else:
k_1x1_avg_merged, b_1x1_avg_merged = k_1x1_avg_second, b_1x1_avg_second
return transII_addbranch((k_origin, k_1x1, k_1x1_kxk_merged, k_1x1_avg_merged),
(b_origin, b_1x1, b_1x1_kxk_merged, b_1x1_avg_merged))
|
Medium/Alphabets in order with numbers/alphabets.py | anishsingh42/CodeChef | 127 | 11172183 | def alpha(a):
c = 0
for i in range(len(a)):
if i==ord(a[i])-97 or i==ord(a[i])-65:
c+=1
return c
a = input()
print(alpha(a))
|
client/pac_websrv.py | HackademicsForum/pacdoor | 162 | 11172190 | <reponame>HackademicsForum/pacdoor<filename>client/pac_websrv.py
#!/usr/bin/env python
# Copyright (c) 2016, SafeBreach
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import SimpleHTTPServer
import SocketServer
import sys
####################
# Global Variables #
####################
__version__ = "1.0"
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, SafeBreach"
##########
# Consts #
##########
DEFAULT_PORT = 8080
#############
# Functions #
#############
def main():
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
Handler.extensions_map['.pac'] = 'application/x-ns-proxy-autoconfig'
try:
port = int(sys.argv[1])
except:
port = DEFAULT_PORT
print "Serving at port %s/tcp ..." % port
httpd = SocketServer.TCPServer(("", port), Handler)
httpd.serve_forever()
##############
# Enry Point #
##############
if __name__ == "__main__":
sys.exit(main())
|
nuplan/planning/training/modeling/objectives/test/test_agents_imitation_objective.py | motional/nuplan-devkit | 128 | 11172205 | import unittest
from typing import List
import numpy as np
import numpy.typing as npt
import torch
from nuplan.planning.training.modeling.objectives.agents_imitation_objective import AgentsImitationObjective
from nuplan.planning.training.preprocessing.features.agents_trajectories import AgentsTrajectories
class TestAgentImitationObjective(unittest.TestCase):
"""Test agent imitation objective."""
def setUp(self) -> None:
"""Set up test case."""
self.target_data: List[npt.NDArray[np.float32]] = [
np.array(
[
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
]
)
]
self.prediction_data: List[npt.NDArray[np.float32]] = [
np.array(
[
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]],
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]],
]
)
]
self.objective = AgentsImitationObjective()
def test_compute_loss(self) -> None:
"""
Test loss computation
"""
prediction = AgentsTrajectories(data=self.prediction_data)
target = AgentsTrajectories(data=self.target_data)
loss = self.objective.compute(
{"agents_trajectory": prediction.to_feature_tensor()}, {"agents_trajectory": target.to_feature_tensor()}
)
self.assertEqual(loss, torch.tensor(0.5))
def test_zero_loss(self) -> None:
"""
Test perfect prediction. The loss should be zero
"""
target = AgentsTrajectories(data=self.target_data)
loss = self.objective.compute(
{"agents_trajectory": target.to_feature_tensor()}, {"agents_trajectory": target.to_feature_tensor()}
)
self.assertEqual(loss, torch.tensor(0.0))
if __name__ == '__main__':
unittest.main()
|
warehouse/tests/unit/test_on_order_events.py | Al-bambino/aws-serverless-ecommerce-platform | 758 | 11172211 | import copy
import datetime
import random
import uuid
from botocore import stub
import pytest
from fixtures import context, lambda_module, get_order, get_product # pylint: disable=import-error
from helpers import mock_table # pylint: disable=import-error,no-name-in-module
METADATA_KEY = "__metadata"
lambda_module = pytest.fixture(scope="module", params=[{
"function_dir": "on_order_events",
"module_name": "main",
"environ": {
"ENVIRONMENT": "test",
"METADATA_KEY": METADATA_KEY,
"TABLE_NAME": "TABLE_NAME",
"ORDERS_LIMIT": "20",
"POWERTOOLS_TRACE_DISABLED": "true"
}
}])(lambda_module)
context = pytest.fixture(context)
@pytest.fixture(scope="module")
def order(get_order):
return get_order()
@pytest.fixture()
def order_metadata(order):
return {
"orderId": order["orderId"],
"productId": METADATA_KEY,
"modifiedDate": order["modifiedDate"],
"status": "NEW"
}
@pytest.fixture
def order_products(order):
return [
{
"orderId": order["orderId"],
"productId": product["productId"],
"quantity": product["quantity"]
}
for product in order["products"]
]
def test_get_diff(lambda_module, get_product):
"""
Test get_diff()
"""
def _get_product():
product = get_product()
product["quantity"] = random.randint(1, 10)
return product
old_products = [_get_product() for _ in range(5)]
new_products = copy.deepcopy(old_products[1:]) + [_get_product()]
new_products[0]["quantity"] += 10
response = lambda_module.get_diff(old_products, new_products)
assert "created" in response
assert len(response["created"]) == 1
assert response["created"][0] == new_products[-1]
assert "deleted" in response
assert len(response["deleted"]) == 1
assert response["deleted"][0] == old_products[0]
assert "modified" in response
assert len(response["modified"]) == 1
assert response["modified"][0] == new_products[0]
def test_get_metadata(lambda_module, order_metadata):
"""
Test get_metadata()
"""
table = mock_table(
lambda_module.table, "get_item",
["orderId", "productId"],
items=order_metadata
)
response = lambda_module.get_metadata(order_metadata["orderId"])
table.assert_no_pending_responses()
table.deactivate()
assert response == order_metadata
def test_get_products(lambda_module, order, order_products):
"""
Test get_products()
"""
table = mock_table(
lambda_module.table, "query",
["orderId", "productId"],
items=order_products
)
response = lambda_module.get_products(order["orderId"])
table.assert_no_pending_responses()
table.deactivate()
assert response == order_products
def test_get_products_next(lambda_module, order, order_products):
"""
Test get_products() with a LastEvaluatedKey value
"""
table = mock_table(
lambda_module.table, "query",
["orderId", "productId"],
response={
"Items": order_products,
"LastEvaluatedKey": {
"orderId": {"S": order_products[-1]["orderId"]},
"productId": {"S": order_products[-1]["productId"]}
}
},
items=order_products
)
mock_table(
table, "query",
["orderId", "productId"],
expected_params={
"TableName": lambda_module.table.name,
"KeyConditionExpression": stub.ANY,
"Limit": 100,
"ExclusiveStartKey": {
"orderId": order_products[-1]["orderId"],
"productId": order_products[-1]["productId"]
}
},
items=order_products
)
response = lambda_module.get_products(order["orderId"])
table.assert_no_pending_responses()
table.deactivate()
assert response == order_products + order_products
def test_delete_metadata(lambda_module, order_metadata):
"""
Test delete_metadata()
"""
table = mock_table(
lambda_module.table, "delete_item",
["orderId", "productId"],
items=order_metadata
)
lambda_module.delete_metadata(order_metadata["orderId"])
table.assert_no_pending_responses()
table.deactivate()
def test_delete_products(lambda_module, order, order_products):
"""
Test delete_products()
"""
table = mock_table(
lambda_module.table, "batch_write_item",
["orderId", "productId"],
items=[
{"DeleteRequest": {"Key": {
"orderId": product["orderId"],
"productId": product["productId"]
}}}
for product in order_products
]
)
products = order["products"] + [{"orderId": order["orderId"], "productId": METADATA_KEY}]
lambda_module.delete_products(order["orderId"], products)
table.assert_no_pending_responses()
table.deactivate()
def test_save_metadata(lambda_module, order_metadata):
"""
Test save_metadata()
"""
item = copy.deepcopy(order_metadata)
item["newDate"] = order_metadata["modifiedDate"]
table = mock_table(
lambda_module.table, "put_item",
["orderId", "productId"],
items=item
)
lambda_module.save_metadata(
order_metadata["orderId"],
order_metadata["modifiedDate"],
order_metadata["status"]
)
table.assert_no_pending_responses()
table.deactivate()
def test_save_products(lambda_module, order, order_products):
"""
Test save_products()
"""
table = mock_table(
lambda_module.table, "batch_write_item",
["orderId", "productId"],
items=[
{"PutRequest": {"Item": product}}
for product in order_products
]
)
lambda_module.save_products(order["orderId"], order["products"])
table.assert_no_pending_responses()
table.deactivate()
def test_update_products_new(lambda_module, order, order_products):
"""
Test update_products() with new products only
"""
table = mock_table(
lambda_module.table, "batch_write_item",
["orderId", "productId"],
items=[
{"PutRequest": {"Item": product}}
for product in order_products
]
)
lambda_module.update_products(order["orderId"], [], order["products"])
table.assert_no_pending_responses()
table.deactivate()
def test_update_products_old(lambda_module, order, order_products):
"""
Test update_products() with old products only
"""
table = mock_table(
lambda_module.table, "batch_write_item",
["orderId", "productId"],
items=[
{"DeleteRequest": {"Key": {
"orderId": product["orderId"],
"productId": product["productId"]
}}}
for product in order_products
]
)
lambda_module.update_products(order["orderId"], order["products"], [])
table.assert_no_pending_responses()
table.deactivate()
def test_on_order_created(lambda_module, order, order_products, order_metadata):
"""
Test on_order_created()
"""
table = mock_table(
lambda_module.table, "get_item", ["orderId", "productId"]
)
mock_table(
table, "batch_write_item",
["orderId", "productId"],
table_name=lambda_module.table.name,
items=[
{"PutRequest": {"Item": product}}
for product in order_products
]
)
order_metadata = copy.deepcopy(order_metadata)
order_metadata["newDate"] = order_metadata["modifiedDate"]
mock_table(
table, "put_item", ["orderId", "productId"],
table_name=lambda_module.table.name,
items=order_metadata
)
lambda_module.on_order_created(order)
table.assert_no_pending_responses()
table.deactivate()
def test_on_order_created_idempotent(lambda_module, order, order_metadata):
"""
Test on_order_created() with an existing item
"""
table = mock_table(
lambda_module.table, "get_item", ["orderId", "productId"],
items=order_metadata
)
lambda_module.on_order_created(order)
table.assert_no_pending_responses()
table.deactivate()
def test_on_order_modified_new(lambda_module, order, order_products, order_metadata):
"""
Test on_order_modified() with a new event
"""
table = mock_table(
lambda_module.table, "get_item", ["orderId", "productId"]
)
mock_table(
table, "batch_write_item",
["orderId", "productId"],
table_name=lambda_module.table.name,
items=[
{"PutRequest": {"Item": product}}
for product in order_products
]
)
order_metadata = copy.deepcopy(order_metadata)
order_metadata["newDate"] = order_metadata["modifiedDate"]
mock_table(
table, "put_item", ["orderId", "productId"],
table_name=lambda_module.table.name,
items=order_metadata
)
lambda_module.on_order_modified(order, order)
table.assert_no_pending_responses()
table.deactivate()
def test_on_order_modified_idempotent(lambda_module, order, order_metadata):
"""
Test on_order_modified() with an already processed event
"""
table = mock_table(
lambda_module.table, "get_item", ["orderId", "productId"],
items=order_metadata
)
lambda_module.on_order_modified(order, order)
table.assert_no_pending_responses()
table.deactivate()
def test_on_order_deleted(lambda_module, order, order_products, order_metadata):
"""
Test on_order_deleted()
"""
table = mock_table(
lambda_module.table, "get_item", ["orderId", "productId"],
items=order_metadata
)
mock_table(
table, "batch_write_item",
["orderId", "productId"],
table_name=lambda_module.table.name,
items=[
{"DeleteRequest": {"Key": {
"orderId": product["orderId"],
"productId": product["productId"]
}}}
for product in order_products
]
)
mock_table(
table, "delete_item",
["orderId", "productId"],
table_name=lambda_module.table.name,
items=order_metadata
)
lambda_module.on_order_deleted(order)
table.assert_no_pending_responses()
table.deactivate()
def test_on_order_deleted_idempotent(lambda_module, order):
"""
Test on_order_deleted() with an already deleted item
"""
table = mock_table(
lambda_module.table, "get_item", ["orderId", "productId"]
)
lambda_module.on_order_deleted(order)
table.assert_no_pending_responses()
table.deactivate()
def test_handler_created(lambda_module, context, order, order_products, order_metadata):
"""
Test handler() with OrderCreated
"""
table = mock_table(
lambda_module.table, "get_item", ["orderId", "productId"]
)
mock_table(
table, "batch_write_item",
["orderId", "productId"],
table_name=lambda_module.table.name,
items=[
{"PutRequest": {"Item": product}}
for product in order_products
]
)
order_metadata = copy.deepcopy(order_metadata)
order_metadata["newDate"] = order_metadata["modifiedDate"]
mock_table(
table, "put_item", ["orderId", "productId"],
table_name=lambda_module.table.name,
items=order_metadata
)
lambda_module.handler({
"source": "ecommerce.orders",
"resources": [order["orderId"]],
"detail-type": "OrderCreated",
"detail": order
}, context)
table.assert_no_pending_responses()
table.deactivate()
def test_handler_deleted(lambda_module, context, order, order_products, order_metadata):
"""
Test handler() with OrderDeleted
"""
table = mock_table(
lambda_module.table, "get_item", ["orderId", "productId"],
items=order_metadata
)
mock_table(
table, "batch_write_item",
["orderId", "productId"],
table_name=lambda_module.table.name,
items=[
{"DeleteRequest": {"Key": {
"orderId": product["orderId"],
"productId": product["productId"]
}}}
for product in order_products
]
)
mock_table(
table, "delete_item",
["orderId", "productId"],
table_name=lambda_module.table.name,
items=order_metadata
)
lambda_module.handler({
"source": "ecommerce.orders",
"resources": [order["orderId"]],
"detail-type": "OrderDeleted",
"detail": order
}, context)
table.assert_no_pending_responses()
table.deactivate() |
genomepy/plugins/star.py | tilschaef/genomepy | 146 | 11172244 | <reponame>tilschaef/genomepy<filename>genomepy/plugins/star.py<gh_stars>100-1000
import os
from loguru import logger
from genomepy.files import extracted_file
from genomepy.plugins import Plugin
from genomepy.utils import cmd_ok, mkdir_p, rm_rf, run_index_cmd
class StarPlugin(Plugin):
def after_genome_download(self, genome, threads=1, force=False):
index_name = genome.plugin["star"]["index_name"]
if not cmd_ok("STAR") or (os.path.exists(index_name) and not force):
return
index_dir = genome.plugin["star"]["index_dir"]
rm_rf(index_dir)
mkdir_p(index_dir)
# gunzip genome if bgzipped and return up-to-date genome name
with extracted_file(genome.filename) as fname:
# index command
cmd = (
f"STAR --runMode genomeGenerate --runThreadN {threads} "
+ f"--genomeFastaFiles {fname} --genomeDir {index_dir} "
+ f"--outFileNamePrefix {index_dir}"
)
# if an annotation is present, generate a splice-aware index
gtf_file = genome.annotation_gtf_file
if gtf_file:
with extracted_file(gtf_file) as _gtf_file:
# update index command with annotation
cmd += f" --sjdbGTFfile {_gtf_file}"
# Create index
run_index_cmd("star", cmd)
else:
logger.info("Creating STAR index without annotation file.")
# Create index
run_index_cmd("star", cmd)
def get_properties(self, genome):
props = {
"index_dir": os.path.join(
os.path.dirname(genome.filename), "index", "star"
),
"index_name": os.path.join(
os.path.dirname(genome.filename), "index", "star", "SA"
),
}
return props
|
scale/node/test/test_views.py | kaydoh/scale | 121 | 11172265 | <filename>scale/node/test/test_views.py<gh_stars>100-1000
from __future__ import unicode_literals
import json
import django
from rest_framework import status
import node.test.utils as node_test_utils
from rest_framework.test import APITransactionTestCase
from scheduler.models import Scheduler
from util import rest
class TestNodesViewV6(APITransactionTestCase):
def setUp(self):
django.setup()
rest.login_client(self.client)
self.node1 = node_test_utils.create_node()
self.node2 = node_test_utils.create_node()
def test_nodes_view(self):
"""Test the REST call to retrieve a list of nodes"""
url = '/v6/nodes/'
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
results = json.loads(response.content)
self.assertEqual(len(results['results']), 2)
for entry in results['results']:
if entry['id'] == self.node1.id:
self.assertEqual(entry['hostname'], self.node1.hostname)
elif entry['id'] == self.node2.id:
self.assertEqual(entry['hostname'], self.node2.hostname)
else:
self.fail('Unexpected node in results: %i' % entry['id'])
class TestNodesViewEmptyV6(APITransactionTestCase):
def setUp(self):
django.setup()
rest.login_client(self.client)
def test_nodes_view(self):
""" test the REST call to retrieve an empty list of nodes"""
url = '/v6/nodes/'
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
results = json.loads(response.content)
self.assertEqual(len(results['results']), 0)
class TestNodeDetailsViewV6(APITransactionTestCase):
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
self.node1 = node_test_utils.create_node()
self.node2 = node_test_utils.create_node()
self.node3 = node_test_utils.create_node()
Scheduler.objects.create(id=1)
def test_get_node_success(self):
"""Test successfully calling the Get Node method."""
url = '/v6/nodes/%d/' % self.node2.id
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertIn('hostname', result)
self.assertEqual(result['hostname'], self.node2.hostname)
def test_get_node_not_found(self):
"""Test calling the Get Node method with a bad node id."""
url = '/v6/nodes/9999/'
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
def test_update_node_success(self):
"""Test successfully calling the Update Node method."""
json_data = {
'is_paused': True,
'pause_reason': 'Test reason',
}
url = '/v6/nodes/%d/' % self.node2.id
response = self.client.patch(url, json_data, 'json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.content)
def test_update_node_unpause(self):
"""Tests unpausing the node and specifying a reason."""
json_data = {'is_paused': False, 'pause_reason': 'Test reason'}
url = '/v6/nodes/%d/' % self.node2.id
response = self.client.patch(url, json_data, 'json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.content)
def test_update_node_not_found(self):
"""Test calling the Update Node method with a bad node id."""
json_data = {
'is_paused': False,
}
url = '/v6/nodes/9999/'
response = self.client.patch(url, json_data, 'json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
def test_update_node_no_fields(self):
"""Test calling the Update Node method with no fields."""
json_data = {}
url = '/v6/nodes/%d/' % self.node2.id
response = self.client.patch(url, json_data, 'json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
def test_update_node_extra_fields(self):
"""Test calling the Update Node method with extra fields."""
json_data = {
'foo': 'bar',
}
url = '/v6/nodes/%d/' % self.node2.id
response = self.client.patch(url, json_data, 'json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
def test_update_active(self):
"""Test successfully deactivating a node."""
json_data = {
'is_active': False,
}
url = '/v6/nodes/%d/' % self.node2.id
response = self.client.patch(url, json_data, 'json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.content)
|
ctpn_crnn_ocr/demo.py | shijieS/Scene-Text-Understanding | 380 | 11172272 | from ctpnport import *
from crnnport import *
#ctpn
text_detector = ctpnSource()
#crnn
model,converter = crnnSource()
timer=Timer()
print "\ninput exit break\n"
while 1 :
im_name = raw_input("\nplease input file name:")
if im_name == "exit":
break
im_path = "./img/" + im_name
im = cv2.imread(im_path)
if im is None:
continue
timer.tic()
img,text_recs = getCharBlock(text_detector,im)
crnnRec(model,converter,img,text_recs)
print "Time: %f"%timer.toc()
cv2.waitKey(0)
|
pytorch_wrapper/modules/transformer_encoder_block.py | skatsaounis/pytorch-wrapper | 111 | 11172280 | import torch.nn as nn
from . import LayerNorm, MultiHeadAttention
from .. import functional as pwF
class TransformerEncoderBlock(nn.Module):
"""
Transformer Encoder Block (https://arxiv.org/pdf/1706.03762.pdf).
"""
def __init__(self, time_step_size, heads, out_mlp, dp=0, is_end_padded=True):
"""
:param time_step_size: Time step size.
:param heads: Number of attention heads.
:param out_mlp: MLP that will be performed after the attended sequence is generated.
:param dp: Dropout probability.
:param is_end_padded: Whether to mask at the end.
"""
super(TransformerEncoderBlock, self).__init__()
self._norm_1 = LayerNorm(time_step_size)
self._norm_2 = LayerNorm(time_step_size)
self._q_linear = nn.Linear(time_step_size, time_step_size)
self._v_linear = nn.Linear(time_step_size, time_step_size)
self._k_linear = nn.Linear(time_step_size, time_step_size)
self._attn = MultiHeadAttention(time_step_size, time_step_size, time_step_size, heads, 'dot', dp, is_end_padded)
self._att_out_linear = nn.Linear(time_step_size, time_step_size)
self._out_mlp = out_mlp
self._dropout_1 = nn.Dropout(dp)
self._dropout_2 = nn.Dropout(dp)
self._is_end_padded = is_end_padded
def forward(self, batch_sequences, batch_sequence_lengths):
"""
:param batch_sequences: batch_sequences: 3D Tensor (batch_size, sequence_length, time_step_size).
:param batch_sequence_lengths: 1D Tensor (batch_size) containing the lengths of the sequences.
:return: 3D Tensor (batch_size, sequence_length, time_step_size).
"""
q = self._q_linear(batch_sequences)
k = self._k_linear(batch_sequences)
v = self._v_linear(batch_sequences)
batch_sequences = batch_sequences + self._att_out_linear(
self._attn(
q,
k,
v,
batch_sequence_lengths,
batch_sequence_lengths
)['output']
)
batch_sequences = self._norm_1(self._dropout_1(batch_sequences))
batch_sequences = batch_sequences + self._out_mlp(batch_sequences)
batch_sequences = self._norm_2(self._dropout_2(batch_sequences))
mask = pwF.create_mask_from_length(
batch_sequence_lengths,
batch_sequences.shape[1],
self._is_end_padded
).unsqueeze(-1)
batch_sequences = batch_sequences.masked_fill(mask == 0, 0)
return batch_sequences
|
tests/test_evpn_tunnel_p2mp.py | gfreewind/sonic-swss | 132 | 11172296 | from evpn_tunnel import VxlanTunnel
DVS_ENV = ["HWSKU=Mellanox-SN2700"]
class TestVxlanOrchP2MP(object):
def get_vxlan_obj(self):
return VxlanTunnel()
# Test 1 - Create and Delete SIP Tunnel and Map entries
def test_p2mp_tunnel(self, dvs, testlog):
vxlan_obj = self.get_vxlan_obj()
tunnel_name = 'tunnel_1'
map_name = 'map_1000_100'
map_name_1 = 'map_1001_101'
map_name_2 = 'map_1002_102'
vxlan_obj.fetch_exist_entries(dvs)
vxlan_obj.create_vlan1(dvs,"Vlan100")
vxlan_obj.create_vlan1(dvs,"Vlan101")
vxlan_obj.create_vlan1(dvs,"Vlan102")
vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6')
vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100')
vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101')
vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_2, '1002', 'Vlan102')
vlanlist = ['100', '101', '102']
vnilist = ['1000', '1001', '1002']
print("Testing SIP Tunnel Creation")
vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist)
print("Testing Tunnel Map Entry")
vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist)
print("Testing Tunnel Map entry removal")
vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100')
vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101')
vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_2, '1002', 'Vlan102')
vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist)
print("Testing SIP Tunnel Deletion")
vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name)
vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6')
# Test 2 - Vlan extension Tests
def test_vlan_extension(self, dvs, testlog):
vxlan_obj = self.get_vxlan_obj()
tunnel_name = 'tunnel_2'
map_name = 'map_1000_100'
map_name_1 = 'map_1001_101'
map_name_2 = 'map_1002_102'
vlanlist = ['100', '101', '102']
vnilist = ['1000', '1001', '1002']
vxlan_obj.fetch_exist_entries(dvs)
vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6')
vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100')
vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101')
vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_2, '1002', 'Vlan102')
vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist)
vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist)
vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name)
vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000')
print("Testing VLAN 100 extension")
vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7')
vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7', '1001')
vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan102', '7.7.7.7', '1002')
print("Testing VLAN 101 extension")
vxlan_obj.check_vlan_extension_p2mp(dvs, '101', '6.6.6.6', '7.7.7.7')
print("Testing VLAN 102 extension")
vxlan_obj.check_vlan_extension_p2mp(dvs, '102', '6.6.6.6', '7.7.7.7')
print("Testing another remote endpoint to 8.8.8.8")
vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8', '1000')
print("Testing remote endpoint creation to 8.8.8.8")
print("Testing VLAN 100 extension to 8.8.8.8 and 7.7.7.7")
vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8')
vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7')
print("Testing Vlan Extension removal")
vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7')
vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7')
vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7')
vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '101', '6.6.6.6', '7.7.7.7')
print("Testing Last Vlan removal and remote endpoint delete")
vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan102', '7.7.7.7')
vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '102', '6.6.6.6', '7.7.7.7')
print("Testing Last Vlan removal and remote endpoint delete for 8.8.8.8")
vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8')
vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8')
vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100')
vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101')
vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_2, '1002', 'Vlan102')
vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist)
print("Testing SIP Tunnel Deletion")
vxlan_obj.remove_evpn_nvo(dvs, 'nvo1')
vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name)
vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6')
|
figures/modeling/arrows.py | patricknaughton01/RoboticSystemsBook | 116 | 11172328 | <gh_stars>100-1000
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
from klampt.math import so3,se3,vectorops
class Arrow3D(FancyArrowPatch):
def __init__(self, start, end, shrinkA=0.0, shrinkB=0.0, mutation_scale=20, arrowstyle="-|>", color='k', lw=1, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, shrinkA=shrinkA, shrinkB=shrinkB, mutation_scale=mutation_scale, arrowstyle=arrowstyle, color=color, lw=lw, **kwargs)
self._verts3d = zip(start,end)
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
def add_line(ax,a,b,linestyle=None, color='k',lw=1,*args,**kwargs):
"""Draws a default line between a and b on the plot ax"
ax.plot([a[0],b[0]], [a[1],b[1]], [a[2],b[2]], linestyle, *args, color=color, lw=lw, **kwargs)
def add_arrow(ax,a,b,linestyle=None,*args,**kwargs):
"""Draws a default arrow from a to b on the plot ax"
a = Arrow3D(a,b, linestyle=linestyle, *args, **kwargs)
ax.add_artist(a)
def add_coordinate_transform(ax,R,t,size=1.0,*args,**kwargs):
"""Draws a coordinate transform on the plot ax"""
axes = so3.matrix(so3.transpose(R))
colors = ['r','g','b']
for (v,c) in zip(axes,colors):
a = Arrow3D(t, vectorops.madd(t,v,size), lw=1, color=c, *args, **kwargs)
ax.add_artist(a)
if __name__ == '__main__':
####################################################
# This part is just for reference if
# you are interested where the data is
# coming from
# The plot is at the bottom
#####################################################
import numpy as np
from numpy import *
# Generate some example data
mu_vec1 = np.array([0,0,0])
cov_mat1 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class1_sample = np.random.multivariate_normal(mu_vec1, cov_mat1, 20)
mu_vec2 = np.array([1,1,1])
cov_mat2 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class2_sample = np.random.multivariate_normal(mu_vec2, cov_mat2, 20)
# concatenate data for PCA
samples = np.concatenate((class1_sample, class2_sample), axis=0)
# mean values
mean_x = mean(samples[:,0])
mean_y = mean(samples[:,1])
mean_z = mean(samples[:,2])
#eigenvectors and eigenvalues
eig_val, eig_vec = np.linalg.eig(cov_mat1)
################################
#plotting eigenvectors
################################
fig = plt.figure(figsize=(4,4),dpi=150)
ax = fig.add_subplot(111, projection='3d')
ax.plot(samples[:,0], samples[:,1], samples[:,2], 'o', markersize=10, color='g', alpha=0.2)
ax.plot([mean_x], [mean_y], [mean_z], 'o', markersize=10, color='red', alpha=0.5)
colors = ['r','g','b']
for c,v in zip(colors,eig_vec):
a = Arrow3D([mean_x, mean_y, mean_z], v, color=c)
ax.add_artist(a)
add_line(ax,[0,0,0],[1,1,1],'--',lw=1)
add_arrow(ax,[1,0,0],[2,1,1],'dashed',lw=1)
ax.set_xlabel(r'$\alpha x_values$')
ax.set_ylabel('y_values')
ax.set_zlabel('z_values')
plt.title('Eigenvectors')
plt.draw()
plt.show()
|
eth/vm/forks/berlin/__init__.py | dbfreem/py-evm | 1,641 | 11172345 | from typing import (
Type,
)
from eth.rlp.blocks import BaseBlock
from eth.vm.forks import (
MuirGlacierVM,
)
from eth.vm.state import BaseState
from .blocks import BerlinBlock
from .headers import (
compute_berlin_difficulty,
configure_berlin_header,
create_berlin_header_from_parent,
)
from .state import BerlinState
class BerlinVM(MuirGlacierVM):
# fork name
fork = 'berlin'
# classes
block_class: Type[BaseBlock] = BerlinBlock
_state_class: Type[BaseState] = BerlinState
# Methods
create_header_from_parent = staticmethod(create_berlin_header_from_parent) # type: ignore
compute_difficulty = staticmethod(compute_berlin_difficulty) # type: ignore
configure_header = configure_berlin_header
|
f5/bigip/tm/sys/test/unit/test_failover.py | nghia-tran/f5-common-python | 272 | 11172350 | <reponame>nghia-tran/f5-common-python<filename>f5/bigip/tm/sys/test/unit/test_failover.py<gh_stars>100-1000
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.bigip.tm.sys import Failover
from f5.sdk_exception import ExclusiveAttributesPresent
import mock
import pytest
@pytest.fixture
def FakeFailover():
fake_sys = mock.MagicMock()
fake_fail = Failover(fake_sys)
fake_fail._meta_data['bigip'].tmos_version = '11.6.0'
return fake_fail
class TestFailover(object):
def test_exclusive_attr(self):
fl = FakeFailover()
with pytest.raises(ExclusiveAttributesPresent) as err:
fl.exec_cmd('run', online=True, standby=True)
assert 'Mutually exclusive arguments submitted' \
in err.value.message
|
docs/manual/gears/examples/reduce_xor.py | bogdanvuk/pygears | 120 | 11172353 | from pygears.lib import reduce, drv, check
from pygears.typing import Queue, Uint
drv(t=Queue[Uint[8]], seq=[[0xff, 0xff, 0xff, 0xff]]) \
| reduce(init=Uint[8](0), f=lambda x, y: x ^ y) \
| check(ref=[0])
|
arm/machine.py | kevinyuan/pydgin | 159 | 11172364 | <reponame>kevinyuan/pydgin
#=======================================================================
# machine.py
#=======================================================================
from pydgin.machine import Machine
from pydgin.storage import RegisterFile
from pydgin.debug import pad, pad_hex
from pydgin.utils import r_uint, specialize
#-----------------------------------------------------------------------
# State
#-----------------------------------------------------------------------
class State( Machine ):
_virtualizable_ = ['pc', 'num_insts', 'N', 'Z', 'C', 'V']
def __init__( self, memory, debug, reset_addr=0x400 ):
Machine.__init__(self,
memory,
ArmRegisterFile( self, num_regs=16 ),
debug,
reset_addr=reset_addr )
self.rf[ 15 ] = self.pc
# current program status register (CPSR)
self.N = r_uint( 0b0 ) # Negative condition
self.Z = r_uint( 0b0 ) # Zero condition
self.C = r_uint( 0b0 ) # Carry condition
self.V = r_uint( 0b0 ) # Overflow condition
#self.J = 0b0 # Jazelle state flag
#self.I = 0b0 # IRQ Interrupt Mask
#self.F = 0b0 # FIQ Interrupt Mask
#self.T = 0b0 # Thumb state flag
#self.M = 0b00000 # Processor Mode
# processor modes:
# 0b10000 usr
# 0b10001 fiq
# 0b10010 irq
# 0b10011 svc (supervisor)
# 0b10111 abt (abort)
# 0b11011 und (undefined)
# 0b11111 sys
self.mode = r_uint( 0b10000 )
# syscall stuff... TODO: should this be here?
self.breakpoint = 0
def fetch_pc( self ):
return self.pc
def cpsr( self ):
return ( r_uint( self.N ) << 31 ) | \
( r_uint( self.Z ) << 30 ) | \
( r_uint( self.C ) << 29 ) | \
( r_uint( self.V ) << 28 ) | \
( r_uint( self.mode ) )
#-----------------------------------------------------------------------
# ArmRegisterFile
#-----------------------------------------------------------------------
class ArmRegisterFile( RegisterFile ):
def __init__( self, state, num_regs=16 ):
RegisterFile.__init__( self, constant_zero=False, num_regs=num_regs )
self.state = state
def __getitem__( self, idx ):
# special-case for idx = 15 which is the pc
if self.debug.enabled( "rf" ):
if idx == 15:
rd_str = pad_hex( self.state.pc ) + "+ 8"
else:
rd_str = pad_hex( self.regs[idx] )
print ':: RD.RF[%s] = %s' % ( pad( "%d" % idx, 2 ), rd_str ),
if idx == 15:
return self.state.pc + 8
else:
return self.regs[idx]
@specialize.argtype(2)
def __setitem__( self, idx, value ):
value = r_uint( value )
if idx == 15:
self.state.pc = value
if self.debug.enabled( "rf" ):
print ':: WR.RF[15] = %s' % ( pad_hex( value ) ),
else:
self.regs[idx] = value
if self.debug.enabled( "rf" ):
print ':: WR.RF[%s] = %s' % (
pad( "%d" % idx, 2 ),
pad_hex( value ) ),
# we also print the status flags on print_regs
def print_regs( self, per_row=6 ):
RegisterFile.print_regs( self, per_row )
print '%s%s%s%s' % (
'N' if self.state.N else '-',
'Z' if self.state.Z else '-',
'C' if self.state.C else '-',
'V' if self.state.V else '-'
)
|
wandb/trigger.py | borisgrafx/client | 3,968 | 11172395 | """Module to facilitate adding hooks to wandb actions
Usage:
import trigger
trigger.register('on_something', func)
trigger.call('on_something', *args, **kwargs)
trigger.unregister('on_something', func)
"""
_triggers = {}
def reset():
_triggers.clear()
def register(event, func):
_triggers.setdefault(event, []).append(func)
def call(event_str, *args, **kwargs):
for func in _triggers.get(event_str, []):
func(*args, **kwargs)
def unregister(event, func):
_triggers[event].remove(func)
|
app/ch16_mongodb/final/pypi_org/services/user_service.py | tbensonwest/data-driven-web-apps-with-flask | 496 | 11172400 | <filename>app/ch16_mongodb/final/pypi_org/services/user_service.py
from typing import Optional
from passlib.handlers.sha2_crypt import sha512_crypt as crypto
from pypi_org.nosql.users import User
def get_user_count() -> int:
return User.objects().count()
def find_user_by_email(email: str) -> Optional[User]:
return User.objects().filter(email=email).first()
def create_user(name: str, email: str, password: str) -> Optional[User]:
if find_user_by_email(email):
return None
user = User()
user.email = email
user.name = name
user.hashed_password = <PASSWORD>_text(password)
user.save()
return user
def hash_text(text: str) -> str:
hashed_text = crypto.encrypt(text, rounds=171204)
return hashed_text
def verify_hash(hashed_text: str, plain_text: str) -> bool:
return crypto.verify(plain_text, hashed_text)
def login_user(email: str, password: str) -> Optional[User]:
user = find_user_by_email(email)
if not user:
return None
if not verify_hash(user.hashed_password, password):
return None
return user
def find_user_by_id(user_id: int) -> Optional[User]:
user = User.objects().filter(id=user_id).first()
return user
|
all_data_augmentation/clipping_augmenter.py | LuChungYing/yt8mtest | 196 | 11172421 |
import tensorflow as tf
from tensorflow import flags
FLAGS = flags.FLAGS
class ClippingAugmenter:
"""This only works with frame data"""
def augment(self, model_input_raw, num_frames, labels_batch, **unused_params):
assert(FLAGS.frame_feature,
"AugmentationTransformer only works with frame feature")
feature_dim = len(model_input_raw.get_shape()) - 1
frame_dim = len(model_input_raw.get_shape()) - 2
max_frame = model_input_raw.get_shape().as_list()[frame_dim]
limit = tf.cast(tf.reduce_min(num_frames) / 4.0, tf.int32)
offset = tf.random_uniform(shape=[], dtype=tf.int32) % limit
input_trans1 = tf.pad(model_input_raw[:,offset:,:], paddings=[0,offset,0])
num_frames_trans1 = num_frames - offset
num_frames_trans1 = tf.cast(
tf.random_uniform(shape=num_frames.shape, minval=0.75, maxval=1.0,
dtype=tf.float32)
* num_frames_trans1, tf.int32)
model_input = tf.concat([model_input_raw, input_trans1], axis=0)
labels_batch = tf.concat([labels_batch, labels_batch], axis=0)
num_frames = tf.concat([num_frames, num_frames_trans1], axis=0)
return model_input, labels_batch, num_frames_new
|
tests/utils.py | odidev/dash-renderer | 109 | 11172493 | import time
TIMEOUT = 5 # Seconds
def invincible(func):
def wrap():
try:
return func()
except:
pass
return wrap
class WaitForTimeout(Exception):
"""This should only be raised inside the `wait_for` function."""
pass
def wait_for(condition_function, get_message=None, expected_value=None,
timeout=TIMEOUT, *args, **kwargs):
"""
Waits for condition_function to return truthy or raises WaitForTimeout.
:param (function) condition_function: Should return truthy or
expected_value on success.
:param (function) get_message: Optional failure message function
:param expected_value: Optional return value to wait for. If omitted,
success is any truthy value.
:param (float) timeout: max seconds to wait. Defaults to 5
:param args: Optional args to pass to condition_function.
:param kwargs: Optional kwargs to pass to condition_function.
if `timeout` is in kwargs, it will be used to override TIMEOUT
:raises: WaitForTimeout If condition_function doesn't return True in time.
Usage:
def get_element(selector):
# some code to get some element or return a `False`-y value.
selector = '.js-plotly-plot'
try:
wait_for(get_element, selector)
except WaitForTimeout:
self.fail('element never appeared...')
plot = get_element(selector) # we know it exists.
"""
def wrapped_condition_function():
"""We wrap this to alter the call base on the closure."""
if args and kwargs:
return condition_function(*args, **kwargs)
if args:
return condition_function(*args)
if kwargs:
return condition_function(**kwargs)
return condition_function()
start_time = time.time()
while time.time() < start_time + timeout:
condition_val = wrapped_condition_function()
if expected_value is None:
if condition_val:
return True
elif condition_val == expected_value:
return True
time.sleep(0.5)
if get_message:
message = get_message()
elif expected_value:
message = 'Final value: {}'.format(condition_val)
else:
message = ''
raise WaitForTimeout(message)
|
pycaret/tests/test_probability_threshold.py | hanaseleb/pycaret | 5,541 | 11172544 | <gh_stars>1000+
import os, sys
sys.path.insert(0, os.path.abspath(".."))
import pandas as pd
import pytest
import pycaret.classification
import pycaret.datasets
from pycaret.internal.meta_estimators import CustomProbabilityThresholdClassifier
def test():
# loading dataset
data = pycaret.datasets.get_data("juice")
assert isinstance(data, pd.core.frame.DataFrame)
# init setup
clf1 = pycaret.classification.setup(
data,
target="Purchase",
silent=True,
log_experiment=True,
html=False,
session_id=123,
n_jobs=1,
)
probability_threshold = 0.75
# compare models
top3 = pycaret.classification.compare_models(
n_select=100, exclude=["catboost"], probability_threshold=probability_threshold
)[:3]
assert isinstance(top3, list)
assert isinstance(top3[0], CustomProbabilityThresholdClassifier)
assert top3[0].probability_threshold == probability_threshold
# tune model
tuned_top3 = [pycaret.classification.tune_model(i, n_iter=3) for i in top3]
assert isinstance(tuned_top3, list)
assert isinstance(tuned_top3[0], CustomProbabilityThresholdClassifier)
assert tuned_top3[0].probability_threshold == probability_threshold
# ensemble model
bagged_top3 = [
pycaret.classification.ensemble_model(
i, probability_threshold=probability_threshold
)
for i in tuned_top3
]
assert isinstance(bagged_top3, list)
assert isinstance(bagged_top3[0], CustomProbabilityThresholdClassifier)
assert bagged_top3[0].probability_threshold == probability_threshold
# blend models
blender = pycaret.classification.blend_models(
top3, probability_threshold=probability_threshold
)
assert isinstance(blender, CustomProbabilityThresholdClassifier)
assert blender.probability_threshold == probability_threshold
# stack models
stacker = pycaret.classification.stack_models(
estimator_list=top3[1:],
meta_model=top3[0],
probability_threshold=probability_threshold,
)
assert isinstance(stacker, CustomProbabilityThresholdClassifier)
assert stacker.probability_threshold == probability_threshold
# calibrate model
calibrated = pycaret.classification.calibrate_model(estimator=top3[0])
assert isinstance(calibrated, CustomProbabilityThresholdClassifier)
assert calibrated.probability_threshold == probability_threshold
# plot model
lr = pycaret.classification.create_model(
"lr", probability_threshold=probability_threshold
)
pycaret.classification.plot_model(
lr, save=True
) # scale removed because build failed due to large image size
# select best model
best = pycaret.classification.automl()
assert isinstance(calibrated, CustomProbabilityThresholdClassifier)
assert calibrated.probability_threshold == probability_threshold
# hold out predictions
predict_holdout = pycaret.classification.predict_model(lr)
predict_holdout_0_5 = pycaret.classification.predict_model(
lr, probability_threshold=0.5
)
predict_holdout_0_75 = pycaret.classification.predict_model(
lr, probability_threshold=probability_threshold
)
assert isinstance(predict_holdout, pd.core.frame.DataFrame)
assert predict_holdout.equals(predict_holdout_0_75)
assert not predict_holdout.equals(predict_holdout_0_5)
# predictions on new dataset
predict_holdout = pycaret.classification.predict_model(lr, data=data)
predict_holdout_0_5 = pycaret.classification.predict_model(
lr, data=data, probability_threshold=0.5
)
predict_holdout_0_75 = pycaret.classification.predict_model(
lr, data=data, probability_threshold=probability_threshold
)
assert isinstance(predict_holdout, pd.core.frame.DataFrame)
assert predict_holdout.equals(predict_holdout_0_75)
assert not predict_holdout.equals(predict_holdout_0_5)
# finalize model
final_best = pycaret.classification.finalize_model(best)
assert isinstance(final_best, CustomProbabilityThresholdClassifier)
assert final_best.probability_threshold == probability_threshold
# save model
pycaret.classification.save_model(best, "best_model_23122019")
# load model
saved_best = pycaret.classification.load_model("best_model_23122019")
assert isinstance(saved_best._final_estimator, CustomProbabilityThresholdClassifier)
assert saved_best._final_estimator.probability_threshold == probability_threshold
assert 1 == 1
if __name__ == "__main__":
test()
|
timing/util/cell_timings.py | Keno/prjtrellis | 256 | 11172581 | #!/usr/bin/env python3
import parse_sdf
from os import path
import json
import sys
def include_cell(name, type):
return type.isupper() and "_" not in type
def rewrite_celltype(name, type):
return type
def rewrite_pin(name, type, pin):
return pin
def tupleise(x):
if type(x) is list:
return tuple(tupleise(_) for _ in x)
elif type(x) is tuple:
return tuple(tupleise(_) for _ in x)
elif type(x) is dict:
return "dict", tuple([(k, tupleise(v)) for k, v in sorted(x.items())])
else:
return x
def load_database(dbfile):
if not path.exists(dbfile):
database = {}
else:
database = {}
with open(dbfile, 'r') as dbf:
jsondb = json.load(dbf)
for cell, cdata in jsondb.items():
database[cell] = set()
for item in cdata:
database[cell].add(tupleise(item))
return database
def make_key(x):
if type(x) is tuple:
return ",".join(make_key(_) for _ in x)
else:
return str(x)
def save_database(dbfile, database):
jdb = {}
for cell, cdata in database.items():
jcdata = []
for dtype, dat in sorted(cdata, key=lambda x: make_key(x)):
assert dtype == "dict"
jcdata.append({k: v for k, v in dat})
jdb[cell] = jcdata
with open(dbfile, 'w') as dbf:
json.dump(jdb, dbf, indent=4, sort_keys=True)
def delay_tuple(delay):
return delay.minv, delay.typv, delay.maxv
def add_sdf_to_database(dbfile, sdffile, include_cell_predicate=include_cell, rewrite_cell_func=rewrite_celltype,
rewrite_pin_func=rewrite_pin):
db = load_database(dbfile)
sdf = parse_sdf.parse_sdf_file(sdffile)
for instname, cell in sdf.cells.items():
if not include_cell_predicate(cell.inst, cell.type):
continue
celltype = rewrite_cell_func(cell.inst, cell.type)
if celltype not in db:
db[celltype] = set()
for entry in cell.entries:
if type(entry) is parse_sdf.IOPath:
db[celltype].add(tupleise(
dict(type="IOPath", from_pin=rewrite_pin_func(cell.inst, cell.type, entry.from_pin),
to_pin=rewrite_pin_func(cell.inst, cell.type, entry.to_pin), rising=delay_tuple(entry.rising),
falling=delay_tuple(entry.falling))))
elif type(entry) is parse_sdf.SetupHoldCheck:
db[celltype].add(
tupleise(dict(type="SetupHold", pin=rewrite_pin_func(cell.inst, cell.type, entry.pin),
clock=rewrite_pin_func(cell.inst, cell.type, entry.clock),
setup=delay_tuple(entry.setup),
hold=delay_tuple(entry.hold))))
elif type(entry) is parse_sdf.WidthCheck:
db[celltype].add(tupleise(dict(type="Width", clock=rewrite_pin_func(cell.inst, cell.type, entry.clock),
width=delay_tuple(entry.width))))
else:
assert False
save_database(dbfile, db)
def main(argv):
if len(argv) < 3:
print("Usage: cell_timings.py database.json design.sdf")
return 2
add_sdf_to_database(argv[1], argv[2])
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
aiida/storage/psql_dos/migrations/versions/django_0018_django_1_11.py | mkrack/aiida-core | 153 | 11172590 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Change UUID type and add uniqueness constraints.
Revision ID: django_0018
Revises: django_0017
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from aiida.storage.psql_dos.migrations.utils import ReflectMigrations
from aiida.storage.psql_dos.migrations.utils.duplicate_uuids import verify_uuid_uniqueness
revision = 'django_0018'
down_revision = 'django_0017'
branch_labels = None
depends_on = None
def upgrade():
"""Migrations for the upgrade."""
reflect = ReflectMigrations(op)
reflect.drop_indexes('db_dbnode', 'uuid') # db_dbnode_uuid_62e0bf98_like
for table, unique in (
('db_dbcomment', 'db_dbcomment_uuid_49bac08c_uniq'),
('db_dbcomputer', 'db_dbcomputer_uuid_f35defa6_uniq'),
('db_dbgroup', 'db_dbgroup_uuid_af896177_uniq'),
('db_dbnode', None),
('db_dbworkflow', 'db_dbworkflow_uuid_08947ee2_uniq'),
):
op.alter_column(
table,
'uuid',
existing_type=sa.VARCHAR(length=36),
type_=postgresql.UUID(as_uuid=True),
nullable=False,
postgresql_using='uuid::uuid'
)
if unique:
verify_uuid_uniqueness(table, op.get_bind())
op.create_unique_constraint(unique, table, ['uuid'])
op.create_unique_constraint('db_dbuser_email_30150b7e_uniq', 'db_dbuser', ['email'])
op.create_index(
'db_dbuser_email_30150b7e_like',
'db_dbuser',
['email'],
postgresql_using='btree',
postgresql_ops={'email': 'varchar_pattern_ops'},
)
def downgrade():
"""Migrations for the downgrade."""
raise NotImplementedError('Downgrade of django_0018.')
|
dbaas/logical/management/commands/remove_quarantineDB.py | didindinn/database-as-a-service | 303 | 11172599 | from django.core.management.base import BaseCommand
import logging
from logical.models import Database
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
'''
remove database automatically when quarantine date expired
'''
def handle(self, *args, **options):
Database.purge_quarantine()
|
vendor-local/lib/python/south/tests/non_managed/models.py | glogiotatidis/affiliates | 285 | 11172617 | # -*- coding: UTF-8 -*-
"""
An app with a model that is not managed for testing that South does
not try to manage it in any way
"""
from django.db import models
class Legacy(models.Model):
name = models.CharField(max_length=10)
size = models.IntegerField()
class Meta:
db_table = "legacy_table"
managed = False
|
brawlstats/core.py | SharpBit/brawlstars | 101 | 11172703 | <filename>brawlstats/core.py
import asyncio
import json
import logging
import sys
import time
from typing import Union
import aiohttp
import requests
from cachetools import TTLCache
from .errors import Forbidden, NotFoundError, RateLimitError, ServerError, UnexpectedError
from .models import BattleLog, Brawlers, Club, Constants, Members, Player, Ranking
from .utils import API, bstag, typecasted
log = logging.getLogger(__name__)
class Client:
"""A sync/async client class that lets you access the Brawl Stars API
Parameters
------------
token: str
The API Key that you can get from https://developer.brawlstars.com
session: Union[requests.Session, aiohttp.ClientSession], optional
Use a current session or a make new one, by default None
timeout: int, optional
How long to wait in seconds before shutting down requests, by default 30
is_async: bool, optional
Setting this to ``True`` makes the client async, by default False
loop: asyncio.window_events._WindowsSelectorEventLoop, optional
The event loop to use for asynchronous operations, by default None
connector: aiohttp.TCPConnector, optional
Pass a TCPConnector into the client (aiohttp), by default None
debug: bool, optional
Whether or not to log info for debugging, by default False
prevent_ratelimit: bool, optional
Whether or not to wait between requests to prevent being ratelimited, by default False
base_url: str, optional
Sets a different base URL to make request to, by default None
"""
REQUEST_LOG = '{method} {url} recieved {text} has returned {status}'
def __init__(self, token, session=None, timeout=30, is_async=False, **options):
# Async options
self.is_async = is_async
self.loop = options.get('loop', asyncio.get_event_loop()) if self.is_async else None
self.connector = options.get('connector')
self.debug = options.get('debug', False)
self.cache = TTLCache(3200 * 3, 60 * 3) # 3200 requests per minute
# Session and request options
self.session = options.get('session') or (
aiohttp.ClientSession(loop=self.loop, connector=self.connector) if self.is_async else requests.Session()
)
self.timeout = timeout
self.prevent_ratelimit = options.get('prevent_ratelimit', False)
if self.is_async and self.prevent_ratelimit:
self.lock = asyncio.Lock(loop=self.loop)
self.api = API(base_url=options.get('base_url'), version=1)
# Request/response headers
self.headers = {
'Authorization': 'Bearer {}'.format(token),
'User-Agent': 'brawlstats/{0} (Python {1[0]}.{1[1]})'.format(self.api.VERSION, sys.version_info),
'Accept-Encoding': 'gzip'
}
# Load brawlers for get_rankings
if self.is_async:
self.loop.create_task(self.__ainit__())
else:
brawlers_info = self.get_brawlers()
self.api.set_brawlers(brawlers_info)
async def __ainit__(self):
"""Task created to run `get_brawlers` asynchronously"""
self.api.set_brawlers(await self.get_brawlers())
def __repr__(self):
return '<Client async={} timeout={} debug={}>'.format(self.is_async, self.timeout, self.debug)
def close(self):
return self.session.close()
def _raise_for_status(self, resp, text):
"""
Checks for invalid error codes returned by the API.
"""
try:
data = json.loads(text)
except json.JSONDecodeError:
data = text
code = getattr(resp, 'status', None) or getattr(resp, 'status_code')
url = resp.url
if self.debug:
log.debug(self.REQUEST_LOG.format(method='GET', url=url, text=text, status=code))
if 300 > code >= 200:
return data
if code == 403:
raise Forbidden(code, url, data['message'])
if code == 404:
raise NotFoundError(code, reason='Resource not found.')
if code == 429:
raise RateLimitError(code, url)
if code == 500:
raise UnexpectedError(code, url, data)
if code == 503:
raise ServerError(code, url)
def _resolve_cache(self, url):
"""Find any cached response for the same requested url."""
data = self.cache.get(url)
if not data:
return None
if self.debug:
log.debug('GET {} got result from cache.'.format(url))
return data
async def _arequest(self, url, use_cache=True):
"""Async method to request a url."""
# Try and retrieve from cache
if use_cache:
cache = self._resolve_cache(url)
else:
cache = None
if cache is not None:
return cache
try:
async with self.session.get(url, timeout=self.timeout, headers=self.headers) as resp:
data = self._raise_for_status(resp, await resp.text())
except asyncio.TimeoutError:
raise ServerError(503, url)
else:
# Cache the data if successful
self.cache[url] = data
return data
def _request(self, url, use_cache=True):
"""Sync method to request a url."""
if self.is_async:
return self._arequest(url, use_cache)
# Try and retrieve from cache
if use_cache:
cache = self._resolve_cache(url)
else:
cache = None
if cache is not None:
return cache
try:
with self.session.get(url, timeout=self.timeout, headers=self.headers) as resp:
data = self._raise_for_status(resp, resp.text)
except requests.Timeout:
raise ServerError(503, url)
else:
# Cache the data if successful
self.cache[url] = data
return data
async def _aget_model(self, url, model, use_cache=True, key=None):
"""Method to turn the response data into a Model class for the async client."""
if self.prevent_ratelimit:
# Use self.lock if prevent_ratelimit=True
async with self.lock:
data = await self._arequest(url, use_cache)
await asyncio.sleep(0.1)
else:
data = await self._arequest(url)
if model == Constants:
if key:
if data.get(key):
return model(self, data.get(key))
else:
raise KeyError('No such Constants key "{}"'.format(key))
return model(self, data)
def _get_model(self, url, model, use_cache=True, key=None):
"""Method to turn the response data into a Model class for the sync client."""
if self.is_async:
# Calls the async function
return self._aget_model(url, model=model, use_cache=use_cache, key=key)
data = self._request(url, use_cache)
if self.prevent_ratelimit:
time.sleep(0.1)
if model == Constants:
if key:
if data.get(key):
return model(self, data.get(key))
else:
raise KeyError('No such Constants key "{}"'.format(key))
return model(self, data)
@typecasted
def get_player(self, tag: bstag, use_cache=True) -> Player:
"""Gets a player's stats.
Parameters
----------
tag : str
A valid player tag.
Valid characters: 0289PYLQGRJCUV
use_cache : bool, optional
Whether to use the internal 3 minutes cache, by default True
Returns
-------
Player
A player object with all of its attributes.
"""
url = '{}/{}'.format(self.api.PROFILE, tag)
return self._get_model(url, model=Player, use_cache=use_cache)
get_profile = get_player
@typecasted
def get_battle_logs(self, tag: bstag, use_cache=True) -> BattleLog:
"""Gets a player's battle logs.
Parameters
----------
tag : str
A valid player tag.
Valid characters: 0289PYLQGRJCUV
use_cache : bool, optional
Whether to use the internal 3 minutes cache, by default True
Returns
-------
BattleLog
A player battle object with all of its attributes.
"""
url = '{}/{}/battlelog'.format(self.api.PROFILE, tag)
return self._get_model(url, model=BattleLog, use_cache=use_cache)
@typecasted
def get_club(self, tag: bstag, use_cache=True) -> Club:
"""Gets a club's stats.
Parameters
----------
tag : str
A valid club tag.
Valid characters: 0289PYLQGRJCUV
use_cache : bool, optional
Whether to use the internal 3 minutes cache, by default True
Returns
-------
Club
A club object with all of its attributes.
"""
url = '{}/{}'.format(self.api.CLUB, tag)
return self._get_model(url, model=Club, use_cache=use_cache)
@typecasted
def get_club_members(self, tag: bstag, use_cache=True) -> Members:
"""Gets the members of a club.
Parameters
----------
tag : str
A valid club tag.
Valid characters: 0289PYLQGRJCUV
use_cache : bool, optional
Whether to use the internal 3 minutes cache, by default True
Returns
-------
Members
A list of the members in a club.
"""
url = '{}/{}/members'.format(self.api.CLUB, tag)
return self._get_model(url, model=Members, use_cache=use_cache)
def get_rankings(
self, *, ranking: str, region: str=None, limit: int=200,
brawler: Union[str, int]=None, use_cache=True
) -> Ranking:
"""Gets the top count players/clubs/brawlers.
Parameters
----------
ranking : str
The type of ranking. Must be "players", "clubs", "brawlers".
region : str, optional
The region to retrieve from. Must be a 2 letter country code, by default None
limit : int, optional
The number of top players or clubs to fetch, by default 200
brawler : Union[str, int], optional
The brawler name or ID, by default None
use_cache : bool, optional
Whether to use the internal 3 minutes cache, by default True
Returns
-------
Ranking
A player or club ranking that contains a list of players or clubs.
Raises
------
ValueError
The brawler name or ID is invalid.
ValueError
`rankings` is not "players", "clubs", or "brawlers"
ValueError
`limit` is not between 1 and 200, inclusive.
"""
if brawler is not None:
if isinstance(brawler, str):
brawler = brawler.lower()
# Replace brawler name with ID
if brawler in self.api.CURRENT_BRAWLERS.keys():
brawler = self.api.CURRENT_BRAWLERS[brawler]
if brawler not in self.api.CURRENT_BRAWLERS.values():
raise ValueError('Invalid brawler.')
if region is None:
region = 'global'
# Check for invalid parameters
if ranking not in ('players', 'clubs', 'brawlers'):
raise ValueError("'ranking' must be 'players', 'clubs' or 'brawlers'.")
if not 0 < limit <= 200:
raise ValueError('Make sure limit is between 1 and 200.')
# Construct URL
url = '{}/{}/{}?limit={}'.format(self.api.RANKINGS, region, ranking, limit)
if ranking == 'brawlers':
url = '{}/{}/{}/{}?limit={}'.format(self.api.RANKINGS, region, ranking, brawler, limit)
return self._get_model(url, model=Ranking, use_cache=use_cache)
def get_constants(self, key: str=None, use_cache=True) -> Constants:
"""Gets Brawl Stars constants extracted from the app.
Parameters
----------
key : str, optional
Any key to get specific data, by default None
use_cache : bool, optional
Whether to use the internal 3 minutes cache, by default True
Returns
-------
Constants
Data containing some Brawl Stars constants.
"""
return self._get_model(self.api.CONSTANTS, model=Constants, key=key)
def get_brawlers(self, use_cache=True) -> Brawlers:
"""Gets available brawlers and information about them.
Parameters
----------
use_cache : bool, optional
Whether to use the internal 3 minutes cache, by default True
Returns
-------
Brawlers
A list of available brawlers and information about them.
"""
return self._get_model(self.api.BRAWLERS, model=Brawlers)
|
pylsy/pylsy.py | Leviathan1995/Pylsy | 531 | 11172715 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Pylsy Authors
# For a full list of authors, see the AUTHORS file at
# https://github.com/Leviathan1995/Pylsy/blob/master/AUTHORS.
# @license MIT
from __future__ import print_function
from wcwidth import wcwidth
class pylsytable(object):
def __init__(self, attributes):
"""Creates a new PylsyTable object with the given attrs (cols)."""
self.StrTable = ""
self.Table = []
self.AttributesLength = []
self.Lines_num = 0
if type(attributes) != list:
attributes = [attributes]
self.Attributes = [u"{0}".format(attr) for attr in attributes]
self.Cols_num = len(self.Attributes)
for attribute in self.Attributes:
col = dict()
col[attribute] = []
self.Table.append(col)
def _print_divide(self):
"""Prints all those table line dividers."""
for space in self.AttributesLength:
self.StrTable += "+ " + "- " * space
self.StrTable += "+"+"\n"
def append_data(self, attribute, values):
"""Appends the given value(s) to the attribute (column)."""
found = False
if type(values) != list:
values = [values]
for col in self.Table:
if attribute in col:
dict_values = [u"{0}".format(value) for value in values]
col[attribute] += dict_values
found = True
if not found:
raise KeyError(attribute)
def add_data(self, attribute, values):
"""Sets the given values for the attribute (column)."""
found = False
if type(values) != list:
values = [values]
for col in self.Table:
if attribute in col:
dict_values = [u"{0}".format(value) for value in values]
col[attribute] = dict_values
found = True
if not found:
raise KeyError(attribute)
def _create_table(self):
"""
Creates a pretty-printed string representation of the table as
``self.StrTable``.
"""
self.StrTable = ""
self.AttributesLength = []
self.Lines_num = 0
# Prepare some values..
for col in self.Table:
# Updates the table line count if necessary
values = list(col.values())[0]
self.Lines_num = max(self.Lines_num, len(values))
# find the length of longest value in current column
key_length = max([self._disp_width(v) for v in values] or [0])
# and also the table header
key_length = max(key_length, self._disp_width(list(col.keys())[0]))
self.AttributesLength.append(key_length)
# Do the real thing.
self._print_head()
self._print_value()
def _print_head(self):
"""Generates the table header."""
self._print_divide()
self.StrTable += "| "
for colwidth, attr in zip(self.AttributesLength, self.Attributes):
self.StrTable += self._pad_string(attr, colwidth * 2)
self.StrTable += "| "
self.StrTable += '\n'
self._print_divide()
def _print_value(self):
"""Generates the table values."""
for line in range(self.Lines_num):
for col, length in zip(self.Table, self.AttributesLength):
vals = list(col.values())[0]
val = vals[line] if len(vals) != 0 and line < len(vals) else ''
self.StrTable += "| "
self.StrTable += self._pad_string(val, length * 2)
self.StrTable += "|"+'\n'
self._print_divide()
def _disp_width(self, pwcs, n=None):
"""
A wcswidth that never gives -1. Copying existing code is evil, but..
github.com/jquast/wcwidth/blob/07cea7f/wcwidth/wcwidth.py#L182-L204
"""
# pylint: disable=C0103
# Invalid argument name "n"
# TODO: Shall we consider things like ANSI escape seqs here?
# We can implement some ignore-me segment like those wrapped by
# \1 and \2 in readline too.
end = len(pwcs) if n is None else n
idx = slice(0, end)
width = 0
for char in pwcs[idx]:
width += max(0, wcwidth(char))
return width
def _pad_string(self, str, colwidth):
"""Center-pads a string to the given column width using spaces."""
width = self._disp_width(str)
prefix = (colwidth - 1 - width) // 2
suffix = colwidth - prefix - width
return ' ' * prefix + str + ' ' * suffix
def __str__(self):
"""Returns a pretty-printed string representation of the table."""
self._create_table()
return self.StrTable
|
etl/parsers/etw/Microsoft_Windows_BranchCacheMonitoring.py | IMULMUL/etl-parser | 104 | 11172734 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-BranchCacheMonitoring
GUID : a2f55524-8ebc-45fd-88e4-a1b39f169e08
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=1, version=0)
class Microsoft_Windows_BranchCacheMonitoring_1_0(Etw):
pattern = Struct(
"Registrar" / WString,
"Mode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=2, version=0)
class Microsoft_Windows_BranchCacheMonitoring_2_0(Etw):
pattern = Struct(
"Registrar" / WString,
"Mode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=101, version=0)
class Microsoft_Windows_BranchCacheMonitoring_101_0(Etw):
pattern = Struct(
"CompletedDataDownloads" / Int64ul,
"SuccessfulDataDownloads" / Int64ul,
"MaxObservedSimultaneousDownloads" / Int64ul,
"AverageDownloadByteRate" / Int64ul,
"CompletedDataUploads" / Int64ul,
"SuccessfulDataUploads" / Int64ul,
"MaxObservedSimultaneousUploads" / Int64ul,
"AverageServingLatency" / Int32ul,
"MaxObservedServingLatency" / Int32ul,
"CurrentAverageInboundRequestFrequency" / Double,
"MaxObservedAverageInboundRequestFrequency" / Double,
"AverageDiscoveryTime" / Int64ul,
"AttemptedNetworkDiscoveries" / Int64ul,
"AttemptedV1NetworkDiscoveries" / Int64ul,
"AttemptedV2NetworkDiscoveries" / Int64ul,
"SuccessfulNetworkDiscoveries" / Int64ul,
"SuccessfulV1NetworkDiscoveries" / Int64ul,
"SuccessfulV2NetworkDiscoveries" / Int64ul,
"SuppressedDiscoveries" / Int64ul,
"PreDiscoveries" / Int64ul,
"CurrentAverageInboundDiscoveryFrequency" / Double,
"MaxObservedAverageInboundDiscoveryFrequency" / Double,
"TotalBytesServed" / Int64ul,
"TotalBytesRetrieved" / Int64ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=102, version=0)
class Microsoft_Windows_BranchCacheMonitoring_102_0(Etw):
pattern = Struct(
"ContentIdSize" / Int32ul,
"ContentId" / Bytes(lambda this: this.ContentIdSize),
"StringContentId" / WString,
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"SegmentOffsetInContent" / Int64ul,
"SegmentSize" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=103, version=0)
class Microsoft_Windows_BranchCacheMonitoring_103_0(Etw):
pattern = Struct(
"ContentIdSize" / Int32ul,
"ContentId" / Bytes(lambda this: this.ContentIdSize),
"StringContentId" / WString,
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"SegmentOffsetInContent" / Int64ul,
"SegmentSize" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=104, version=0)
class Microsoft_Windows_BranchCacheMonitoring_104_0(Etw):
pattern = Struct(
"ContentIdSize" / Int32ul,
"ContentId" / Bytes(lambda this: this.ContentIdSize),
"StringContentId" / WString,
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"DataOffsetInSegment" / Int64ul,
"DataSize" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=105, version=0)
class Microsoft_Windows_BranchCacheMonitoring_105_0(Etw):
pattern = Struct(
"ContentIdSize" / Int32ul,
"ContentId" / Bytes(lambda this: this.ContentIdSize),
"StringContentId" / WString,
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"ContentOffset" / Int64ul,
"SegmentOffset" / Int64ul,
"Bytes" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=106, version=0)
class Microsoft_Windows_BranchCacheMonitoring_106_0(Etw):
pattern = Struct(
"ContentIdSize" / Int32ul,
"ContentId" / Bytes(lambda this: this.ContentIdSize),
"StringContentId" / WString,
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"ContentOffset" / Int64ul,
"SegmentOffset" / Int64ul,
"Bytes" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=107, version=0)
class Microsoft_Windows_BranchCacheMonitoring_107_0(Etw):
pattern = Struct(
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"HostName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=108, version=0)
class Microsoft_Windows_BranchCacheMonitoring_108_0(Etw):
pattern = Struct(
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"HostName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=109, version=0)
class Microsoft_Windows_BranchCacheMonitoring_109_0(Etw):
pattern = Struct(
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"HostName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=110, version=0)
class Microsoft_Windows_BranchCacheMonitoring_110_0(Etw):
pattern = Struct(
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"HostName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=111, version=0)
class Microsoft_Windows_BranchCacheMonitoring_111_0(Etw):
pattern = Struct(
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"HostName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=112, version=0)
class Microsoft_Windows_BranchCacheMonitoring_112_0(Etw):
pattern = Struct(
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"HostName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=113, version=0)
class Microsoft_Windows_BranchCacheMonitoring_113_0(Etw):
pattern = Struct(
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"BlockId" / Int32ul,
"BlockSize" / Int32ul,
"HostName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=114, version=0)
class Microsoft_Windows_BranchCacheMonitoring_114_0(Etw):
pattern = Struct(
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"BlockId" / Int32ul,
"BlockSize" / Int32ul,
"HostName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=115, version=0)
class Microsoft_Windows_BranchCacheMonitoring_115_0(Etw):
pattern = Struct(
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"BlockId" / Int32ul,
"BlockSize" / Int32ul,
"HostName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=116, version=0)
class Microsoft_Windows_BranchCacheMonitoring_116_0(Etw):
pattern = Struct(
"SegmentIdSize" / Int32ul,
"SegmentId" / Bytes(lambda this: this.SegmentIdSize),
"BlockId" / Int32ul,
"BlockSize" / Int32ul,
"HostName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=201, version=0)
class Microsoft_Windows_BranchCacheMonitoring_201_0(Etw):
pattern = Struct(
"RequestId" / Int64ul,
"URL" / WString,
"ClientIPv4Address" / Int32ul,
"RangeRequest" / Int8ul,
"RangeCount" / Int32ul,
"FirstRangeOffset" / Int64ul,
"FirstRangeLenght" / Int64ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=202, version=0)
class Microsoft_Windows_BranchCacheMonitoring_202_0(Etw):
pattern = Struct(
"RequestId" / Int64ul,
"URL" / WString,
"ClientIPv4Address" / Int32ul,
"PeerDistMinContentInformationVersion" / Int64ul,
"PeerDistMaxContentInformationVersion" / Int64ul,
"PCCRTPProtocolVersion" / Int64ul,
"RangeRequest" / Int8ul,
"RangeCount" / Int32ul,
"FirstRangeOffset" / Int64ul,
"FirstRangeLenght" / Int64ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=203, version=0)
class Microsoft_Windows_BranchCacheMonitoring_203_0(Etw):
pattern = Struct(
"RequestId" / Int64ul,
"URL" / WString,
"ClientIPv4Address" / Int32ul,
"RangeRequest" / Int8ul,
"RangeCount" / Int32ul,
"FirstRangeOffset" / Int64ul,
"FirstRangeLenght" / Int64ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=204, version=0)
class Microsoft_Windows_BranchCacheMonitoring_204_0(Etw):
pattern = Struct(
"RequestId" / Int64ul,
"URL" / WString,
"ClientIPv4Address" / Int32ul,
"PeerDistMinContentInformationVersion" / Int64ul,
"PeerDistMaxContentInformationVersion" / Int64ul,
"PCCRTPProtocolVersion" / Int64ul,
"RangeRequest" / Int8ul,
"RangeCount" / Int32ul,
"FirstRangeOffset" / Int64ul,
"FirstRangeLenght" / Int64ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=205, version=0)
class Microsoft_Windows_BranchCacheMonitoring_205_0(Etw):
pattern = Struct(
"RequestId" / Int64ul,
"URL" / WString,
"ClientIPv4Address" / Int32ul,
"RangeRequest" / Int8ul,
"RangeCount" / Int32ul,
"FirstRangeOffset" / Int64ul,
"FirstRangeLenght" / Int64ul,
"HTTPProtocolMajorVersion" / Int16ul,
"HTTPProtocolMinorVersion" / Int16ul,
"HTTPStatusCode" / Int16ul,
"OriginalContentLength" / Int64ul,
"EncodedContentLength" / Int64ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=206, version=0)
class Microsoft_Windows_BranchCacheMonitoring_206_0(Etw):
pattern = Struct(
"RequestId" / Int64ul,
"URL" / WString,
"ClientIPv4Address" / Int32ul,
"PeerDistMinContentInformationVersion" / Int64ul,
"PeerDistMaxContentInformationVersion" / Int64ul,
"PCCRTPProtocolVersion" / Int64ul,
"RangeRequest" / Int8ul,
"RangeCount" / Int32ul,
"FirstRangeOffset" / Int64ul,
"FirstRangeLenght" / Int64ul,
"HTTPProtocolMajorVersion" / Int16ul,
"HTTPProtocolMinorVersion" / Int16ul,
"HTTPStatusCode" / Int16ul,
"OriginalContentLength" / Int64ul,
"EncodedContentLength" / Int64ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=207, version=0)
class Microsoft_Windows_BranchCacheMonitoring_207_0(Etw):
pattern = Struct(
"RequestId" / Int64ul,
"URL" / WString,
"ClientIPv4Address" / Int32ul,
"PeerDistMinContentInformationVersion" / Int64ul,
"PeerDistMaxContentInformationVersion" / Int64ul,
"PCCRTPProtocolVersion" / Int64ul,
"RangeRequest" / Int8ul,
"RangeCount" / Int32ul,
"FirstRangeOffset" / Int64ul,
"FirstRangeLenght" / Int64ul,
"HTTPProtocolMajorVersion" / Int16ul,
"HTTPProtocolMinorVersion" / Int16ul,
"HTTPStatusCode" / Int16ul,
"OriginalContentLength" / Int64ul,
"EncodedContentLength" / Int64ul
)
@declare(guid=guid("a2f55524-8ebc-45fd-88e4-a1b39f169e08"), event_id=208, version=0)
class Microsoft_Windows_BranchCacheMonitoring_208_0(Etw):
pattern = Struct(
"RequestId" / Int64ul,
"URL" / WString,
"ClientIPv4Address" / Int32ul,
"RangeRequest" / Int8ul,
"RangeCount" / Int32ul,
"FirstRangeOffset" / Int64ul,
"FirstRangeLenght" / Int64ul,
"HTTPProtocolMajorVersion" / Int16ul,
"HTTPProtocolMinorVersion" / Int16ul,
"HTTPStatusCode" / Int16ul,
"OriginalContentLength" / Int64ul,
"EncodedContentLength" / Int64ul
)
|
alipay/aop/api/response/AlipayCommerceIotSdarttoolPrintSendResponse.py | antopen/alipay-sdk-python-all | 213 | 11172736 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceIotSdarttoolPrintSendResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceIotSdarttoolPrintSendResponse, self).__init__()
self._print_no = None
@property
def print_no(self):
return self._print_no
@print_no.setter
def print_no(self, value):
self._print_no = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceIotSdarttoolPrintSendResponse, self).parse_response_content(response_content)
if 'print_no' in response:
self.print_no = response['print_no']
|
.github/actions/pr-to-update-go/pr_to_update_go/__main__.py | qtweng/trafficcontrol | 598 | 11172739 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from argparse import ArgumentParser, Namespace
from github.MainClass import Github
from pr_to_update_go.go_pr_maker import GoPRMaker
from pr_to_update_go.constants import ENV_GITHUB_TOKEN
def main() -> None:
parser = ArgumentParser()
parser.add_argument('--update-version-only', type=bool, default=False, help='Exit after updating the GO_VERSION file')
args: Namespace = parser.parse_args()
try:
github_token: str = os.environ[ENV_GITHUB_TOKEN]
except KeyError:
print(f'Environment variable {ENV_GITHUB_TOKEN} must be defined.')
sys.exit(1)
gh = Github(login_or_token=github_token)
GoPRMaker(gh).run(args.update_version_only)
main()
|
app/models/orm/migrations/env.py | leosussan/fastapi-gino-arq-postgres | 289 | 11172766 | <filename>app/models/orm/migrations/env.py
# isort:skip_file
import sys
sys.path.extend(["./"])
from logging.config import fileConfig
from sqlalchemy import engine_from_config, pool
from alembic import context
from app.settings.globals import ALEMBIC_CONFIG
######################## --- MODELS FOR MIGRATIONS --- ########################
from app.application import db
from app.models.orm.user import User
# To include a model in migrations, add a line here.
# from app.models.orm.person import Person
###############################################################################
config = context.config
fileConfig(config.config_file_name)
target_metadata = db
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=ALEMBIC_CONFIG.url.__to_string__(hide_password=False),
target_metadata=target_metadata,
literal_binds=True,
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
{
"sqlalchemy.url": ALEMBIC_CONFIG.url.__to_string__(
hide_password=False
)
},
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
tests/emukit/core/optimization/test_multi_source_acquisition_optimizer.py | ndalchau/emukit | 152 | 11172769 | <gh_stars>100-1000
import numpy as np
from numpy.testing import assert_array_equal
from emukit.core import ContinuousParameter
from emukit.core import InformationSourceParameter
from emukit.core import ParameterSpace
from emukit.core.optimization import GradientAcquisitionOptimizer
from emukit.core.optimization import MultiSourceAcquisitionOptimizer
def test_multi_source_acquisition_optimizer(simple_square_acquisition):
space = ParameterSpace([ContinuousParameter('x', 0, 1),
InformationSourceParameter(2)])
single_optimizer = GradientAcquisitionOptimizer(space)
optimizer = MultiSourceAcquisitionOptimizer(single_optimizer, space)
opt_x, opt_val = optimizer.optimize(simple_square_acquisition)
assert_array_equal(opt_x, np.array([[0., 1.]]))
assert_array_equal(opt_val, np.array([[2.]]))
|
securitybot/tasker/sql_tasker.py | gurpradeep/securitybot | 1,053 | 11172792 | '''
A tasker on top of a SQL database.
'''
from securitybot.tasker.tasker import Task, Tasker, STATUS_LEVELS
from securitybot.sql import SQLEngine
from typing import List
# Note: this order is provided to match the SQLTask constructor
GET_ALERTS = '''
SELECT HEX(alerts.hash),
title,
ldap,
reason,
description,
url,
performed,
comment,
authenticated,
status
FROM alerts
JOIN user_responses ON alerts.hash = user_responses.hash
JOIN alert_status ON alerts.hash = alert_status.hash
WHERE status = %s
'''
class SQLTasker(Tasker):
def _get_tasks(self, level):
# type: (int) -> List[Task]
'''
Gets all tasks of a certain level.
Args:
level (int): One of STATUS_LEVELS
Returns:
List of SQLTasks.
'''
alerts = SQLEngine.execute(GET_ALERTS, (level,))
return [SQLTask(*alert) for alert in alerts]
def get_new_tasks(self):
# type: () -> List[Task]
return self._get_tasks(STATUS_LEVELS.OPEN)
def get_active_tasks(self):
# type: () -> List[Task]
return self._get_tasks(STATUS_LEVELS.INPROGRESS)
def get_pending_tasks(self):
# type: () -> List[Task]
return self._get_tasks(STATUS_LEVELS.VERIFICATION)
SET_STATUS = '''
UPDATE alert_status
SET status=%s
WHERE hash=UNHEX(%s)
'''
SET_RESPONSE = '''
UPDATE user_responses
SET comment=%s,
performed=%s,
authenticated=%s
WHERE hash=UNHEX(%s)
'''
class SQLTask(Task):
def __init__(self, hsh, title, username, reason, description, url,
performed, comment, authenticated, status):
# type: (str, str, str, str, str, str, bool, str, bool, int) -> None
'''
Args:
hsh (str): SHA256 primary key hash.
'''
super(SQLTask, self).__init__(title, username, reason, description, url,
performed, comment, authenticated, status)
self.hash = hsh
def _set_status(self, status):
# type: (int) -> None
'''
Sets the status of a task in the DB.
Args:
status (int): The new status to use.
'''
SQLEngine.execute(SET_STATUS, (status, self.hash))
def _set_response(self):
# type: () -> None
'''
Updates the user response for this task.
'''
SQLEngine.execute(SET_RESPONSE, (self.comment,
self.performed,
self.authenticated,
self.hash))
def set_open(self):
self._set_status(STATUS_LEVELS.OPEN)
def set_in_progress(self):
self._set_status(STATUS_LEVELS.INPROGRESS)
def set_verifying(self):
self._set_status(STATUS_LEVELS.VERIFICATION)
self._set_response()
|
pfrl/nn/recurrent.py | g-votte/pfrl | 824 | 11172809 | class Recurrent(object):
"""Recurrent module interface.
This class defines the interface of a recurrent module PFRL support.
The interface is similar to that of `torch.nn.LSTM` except that sequential
data are expected to be packed in `torch.nn.utils.rnn.PackedSequence`.
To implement a model with recurrent layers, you can either use
default container classes such as
`pfrl.nn.RecurrentSequential` and
`pfrl.nn.RecurrentBranched` or write your module
extending this class and `torch.nn.Module`.
"""
def forward(self, packed_input, recurrent_state):
"""Multi-step batch forward computation.
Args:
packed_input (object): Input sequences. Tensors must be packed in
`torch.nn.utils.rnn.PackedSequence`.
recurrent_state (object or None): Batched recurrent state.
If set to None, it is initialized.
Returns:
object: Output sequences. Tensors will be packed in
`torch.nn.utils.rnn.PackedSequence`.
object or None: New batched recurrent state.
"""
raise NotImplementedError
|
cacreader/swig-4.0.2/Examples/test-suite/python/python_append_runme.py | kyletanyag/LL-Smartcard | 1,031 | 11172814 | <filename>cacreader/swig-4.0.2/Examples/test-suite/python/python_append_runme.py
from python_append import *
# test not relevant for -builtin
if is_python_builtin():
exit(0)
t = Test()
t.funk()
t.static_func()
if grabpath() != os.path.dirname(mypath):
raise RuntimeError("grabpath failed")
if grabstaticpath() != os.path.basename(mypath):
raise RuntimeError("grabstaticpath failed")
clearstaticpath()
if grabstaticpath() != None:
raise RuntimeError("Resetting staticfuncpath failed")
Test.static_func()
if grabstaticpath() != os.path.basename(mypath):
raise RuntimeError("grabstaticpath failed")
# slots test
fs = ForSlots()
if fs.ValidVariable != 99:
raise RuntimeError("ValidVariable failed")
fs.ValidVariable = 11
if fs.ValidVariable != 11:
raise RuntimeError("ValidVariable failed")
try:
fs.Invalid = 22
raise RuntimeError("It should not be possible to set a random variable name")
except AttributeError:
pass
|
recipes/Python/577546_linecountpy/recipe-577546.py | tdiprima/code | 2,023 | 11172825 | <reponame>tdiprima/code
#!/usr/bin/python
# -*- mode: python; coding: utf-8 -*-
#
# Copyright 2011 (C) by <NAME> <<EMAIL>i.thebault - at - gmail - dot - com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
from optparse import OptionParser
from fnmatch import fnmatch
usage = '''
linecount.py [Options] Targets
Targets must be list of valid files or directories
Ex for a C++ project :
linecount.py --exts="h cc" --excludes="*build*" myprojectroot'''
sh_exts = 'sh py pl rb'.split()
c_exts = 'h hpp c cc cpp cxx java cs'.split()
m_exts = 'm'.split()
# the few following lines will try to fetch terminal width for better output
# 1st try for POSIX systems (Linux, MacOSX)
# 2nd try for MS systems
# if failure, defaults to 80
termwidth = 80
try:
import fcntl, termios, struct
cr = struct.unpack('hh', fcntl.ioctl(sys.stdout.fileno(),
termios.TIOCGWINSZ, '1234'))
(h, termwidth) = cr
except:
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-11)
csb = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csb)
if res:
import struct
(bufx, bufy, curx, cury, wattr, left, top, right, bottom, maxx,
maxy) = struct.unpack("hhhhHhhhhhh", csb.raw)
termwidth = right - left + 1
except:
pass
# some output formatting utility
defaultFmtTag = '\033[0m'
boldFmtTag = '\033[1;1m'
redFmtTag = '\033[91m'
greenFmtTag = '\033[92m'
def boldFmt(str, closeTag=defaultFmtTag):
return boldFmtTag + str + closeTag
def greenFmt(str, closeTag=defaultFmtTag):
return greenFmtTag + str + closeTag
def redFmt(str, closeTag=defaultFmtTag):
return redFmtTag + str + closeTag
def errorMsg(mes):
sys.stderr.write(boldFmt(redFmt('Error: ' + mes)) + '\n')
def getExt(file):
(root, ext) = os.path.splitext(file)
if len(ext) > 0:
return ext[1:]
return ''
def fileMatches(file, options):
if options.excludes:
for exc in options.excludes:
if fnmatch(file, exc):
return False
if options.syntax:
return True
ext = getExt(file)
if options.exts:
if ext in options.exts:
if ext in sh_exts or ext in c_exts or ext in m_exts:
return True
else:
return True
return False
def doCFile(file):
count = 0
f = open(file, 'r')
incomment = False
for line in f:
line = line.strip()
if incomment:
end = line.find('*/')
if end < 0:
continue
else:
incomment = False
line = line[end+2:]
if len(line) == 0:
continue
if line.startswith('//'):
continue
ind = line.find('/*')
if ind >= 0:
incomment = True
ind2 = line[ind+2:].find('*/') >= 0
if ind2 >= 0:
incomment = False
if ind > 0 or ind2 < len(line)-2:
count += 1
continue
count += 1
return count
def doRegularFile(file, cmtStr):
count = 0
f = open(file, 'r')
for line in f:
line = line.strip()
if len(line) > 0 and not line.startswith(cmtStr):
count += 1
return count
def doShFile(file):
return doRegularFile(file, '#')
def doMFile(file):
return doRegularFile(file, '%')
formatstr = '{0:.<' + str(termwidth-11) + '}' + boldFmt(greenFmt('{1:>5d} lines'))
filecount = 0
def doFile(file, options):
global formatstr
global filecount
if options.syntax:
if options.syntax == 'S':
count = doShFile(file)
elif options.syntax == 'C':
count = doCFile(file)
elif options.syntax == 'M':
count = doMFile(file)
else:
ext = getExt(file)
count = 0
if ext in sh_exts:
count = doShFile(file)
elif ext in c_exts:
count = doCFile(file)
elif ext in m_exts:
count = doMFile(file)
print formatstr.format(file, count)
filecount += 1
return count
def doDir(dir, options):
files = sorted(os.listdir(dir))
count = 0
for file in files:
fname = os.path.join(dir, file)
if os.path.islink(fname):
continue
if os.path.isdir(fname) and options.recurs:
count += doDir(fname, options)
elif fileMatches(fname, options):
count += doFile(fname, options)
return count
if __name__ == '__main__':
parser = OptionParser(usage)
parser.add_option('-e', '--exts', dest='exts', action='store',
help='list of extensions of files to be parsed (mandatory if a dir '
+ ' is in targets')
parser.add_option('-x', '--excludes', dest='excludes', action='store',
help='Blob syntax list of files to be excluded from count '
'(only useful when parsing dirs)')
parser.add_option('-s', '--syntax', dest='syntax', action='store',
help='Force parsing mode to the given syntax ' +
'(S: Shell-style, C: C-style, M: Matlab-style). If not specified, '
'syntax is based on file extension')
parser.add_option('-r', '--non-recursive', dest='recurs',
action='store_false', default=True,
help='Do not enter subdirectories recursively')
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
errorMsg('you must specify a destination')
sys.exit(1)
args = sorted(args)
for dest in args:
if os.path.exists(dest) and os.path.isdir(dest):
if not options.exts:
parser.print_help()
errorMsg('option ' + greenFmt('-e', redFmtTag) + ' or ' +
greenFmt('--exts', redFmtTag) + ' is needed')
sys.exit(1)
break
if options.exts:
options.exts = options.exts.split()
if options.excludes:
options.excludes = options.excludes.split()
if options.syntax:
if not options.syntax in 'S C M'.split():
parser.print_help()
errorMsg('accepted values for ' + greenFmt('--syntax', redFmtTag) + ' are:\n' +
' S for shell-style\n' +
' C for C-style\n' +
' M for matlab-style')
sys.exit(1)
count = 0
err = 0
printresume = len(args) > 1
for dest in args:
if os.path.exists(dest):
if os.path.isdir(dest):
c = doDir(dest, options)
if len(args)>1:
print repr(c) + ' lines of code in ' + dest
count += c
printresume = True
elif os.path.isfile(dest):
if fileMatches(dest, options):
count += doFile(dest, options)
else:
errorMsg('file ' + dest + ' doesn\'t match your options')
err += 1
else:
errorMsg('target ' + dest + ' is not valid')
err += 1
if err == 0 or count > 0:
if printresume:
resume = 'total count : ' + repr(count) + ' line'
if count > 1:
resume += 's'
resume += ' of code in ' + repr(filecount)+ ' file'
if filecount > 1:
resume += 's'
print boldFmt(greenFmt(resume))
else:
parser.print_help()
errorMsg('Aborting because of errors')
sys.exit(1)
sys.exit(0)
|
packs/cloudflare/tests/test_action_get_ips.py | userlocalhost2000/st2contrib | 164 | 11172828 | import yaml
import requests_mock
from mock import patch
from st2tests.base import BaseActionTestCase
from get_ips import GetIPsAction
__all__ = [
'GetIPsActionTestCase'
]
MOCK_CONFIG_BLANK = yaml.safe_load(open(
'packs/cloudflare/tests/fixture/blank.yaml').read())
MOCK_CONFIG_FULL = yaml.safe_load(open(
'packs/cloudflare/tests/fixture/full.yaml').read())
MOCK_DATA_INVALID_JSON = "{'dd': doo}"
MOCK_DATA_SUCCESS = open(
'packs/cloudflare/tests/fixture/success.json').read()
MOCK_DATA_FAIL = open(
'packs/cloudflare/tests/fixture/fail.json').read()
class GetIPsActionTestCase(BaseActionTestCase):
action_cls = GetIPsAction
def test_run_no_config(self):
self.assertRaises(ValueError, self.action_cls, MOCK_CONFIG_BLANK)
def test_run_is_instance(self):
action = self.get_action_instance(MOCK_CONFIG_FULL)
self.assertIsInstance(action, self.action_cls)
self.assertEqual(action.api_key, "API-Key")
self.assertEqual(action.API_HOST, "https://api.cloudflare.com")
@patch('get_ips.GetIPsAction.API_HOST', "mock://api.cloudflare.com")
def test_run_status_404(self):
action = self.get_action_instance(MOCK_CONFIG_FULL)
adapter = requests_mock.Adapter()
action.session.mount('mock', adapter)
adapter.register_uri('GET',
"mock://api.cloudflare.com/client/v4/ips",
status_code=404)
self.assertRaises(ValueError,
action.run)
@patch('get_ips.GetIPsAction.API_HOST', "mock://api.cloudflare.com")
def test_run_invalid_json(self):
action = self.get_action_instance(MOCK_CONFIG_FULL)
adapter = requests_mock.Adapter()
action.session.mount('mock', adapter)
adapter.register_uri('GET',
"mock://api.cloudflare.com/client/v4/ips",
text=MOCK_DATA_INVALID_JSON)
self.assertRaises(ValueError,
action.run)
@patch('get_ips.GetIPsAction.API_HOST', "mock://api.cloudflare.com")
def test_run_success_true(self):
expected = {'ipv4_cidrs': [u'192.168.3.11/21'],
'ipv6_cidrs': [u'2400:cb00::/32'],
'messages': []}
action = self.get_action_instance(MOCK_CONFIG_FULL)
adapter = requests_mock.Adapter()
action.session.mount('mock', adapter)
adapter.register_uri('GET',
"mock://api.cloudflare.com/client/v4/ips",
text=MOCK_DATA_SUCCESS)
result = action.run()
self.assertEqual(result, expected)
@patch('get_ips.GetIPsAction.API_HOST', "mock://api.cloudflare.com")
def test_run_success_false(self):
action = self.get_action_instance(MOCK_CONFIG_FULL)
adapter = requests_mock.Adapter()
action.session.mount('mock', adapter)
adapter.register_uri('GET',
"mock://api.cloudflare.com/client/v4/ips",
text=MOCK_DATA_FAIL)
self.assertRaises(Exception,
action.run)
|
dumpall.py | nian-hua/dumpall | 625 | 11172872 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
"""
python3 dumpall.py <url>
"""
import os, sys
_srcdir = "%s/" % os.path.dirname(os.path.realpath(__file__))
_filepath = os.path.dirname(sys.argv[0])
sys.path.insert(1, os.path.join(_filepath, _srcdir))
if sys.version_info[0] == 3:
import dumpall
if __name__ == "__main__":
dumpall.main()
else: # Python 2
print("Python3 Only.")
|
galileo/framework/tf/python/callbacks/metrics_time.py | YaoPu2021/galileo | 115 | 11172901 | # Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from timeit import default_timer
from collections import defaultdict
from galileo.platform.utils import get_time_str
from galileo.platform.export import export
import tensorflow as tf
from tensorflow.python.eager import context
@export('galileo.tf')
class MetricsTimeCallback(tf.keras.callbacks.Callback):
r'''
trainning time and metrics
'''
def __init__(self, summary_dir=None, skip_first=True):
super().__init__()
with context.eager_mode():
self.summary_writer = tf.summary.create_file_writer(
summary_dir) if summary_dir else None
self.skip_first = skip_first
self.global_step = 0
def append_metrics(self, logs):
if logs:
for k, v in logs.items():
if k not in ['batch', 'size']:
self.metrics[k].append(v)
def on_train_begin(self, logs=None):
self.train_begin_time = default_timer()
self.epoch_times = []
self.batch_times = []
self.metrics = defaultdict(list)
self.global_step = 0
def on_epoch_begin(self, epoch, logs=None):
self.epoch_begin_time = default_timer()
def on_batch_begin(self, batch, logs=None):
self.batch_begin_time = default_timer()
def on_batch_end(self, batch, logs=None):
self.global_step += 1
self.batch_times.append(default_timer() - self.batch_begin_time)
self.append_metrics(logs)
if self.summary_writer:
with context.eager_mode():
with self.summary_writer.as_default():
tf.summary.scalar('batch_time',
self.batch_times[-1],
step=self.global_step)
self.summary_writer.flush()
def on_epoch_end(self, epoch, logs=None):
self.epoch_times.append(default_timer() - self.epoch_begin_time)
self.append_metrics(logs)
if self.summary_writer:
with context.eager_mode():
with self.summary_writer.as_default():
tf.summary.scalar('epoch_time',
self.epoch_times[-1],
step=epoch)
self.summary_writer.flush()
def on_train_end(self, logs=None):
train_time = default_timer() - self.train_begin_time
out = 'Summary:'
if self.epoch_times:
out += f'\n\tTotal epochs: {len(self.epoch_times)}'
epoch_times = self.epoch_times[1:] if self.skip_first and \
len(self.epoch_times) > 1 else self.epoch_times
epoch_time = get_time_str(np.mean(epoch_times))
out += f'\n\tMean per epoch time: {epoch_time}'
if self.batch_times:
out += f'\n\tTotal steps: {len(self.batch_times)}'
batch_times = self.batch_times[1:] if self.skip_first and \
len(self.batch_times) > 1 else self.batch_times
batch_time = get_time_str(np.mean(batch_times))
out += f'\n\tMean per step time: {batch_time}'
if self.metrics:
for k, v in self.metrics.items():
ts = np.array(v)
a, b, c = ts.min(), ts.mean(), ts.max()
out += f'\n\tmin/mean/max {k}: {a:.4f}/{b:.4f}/{c:.4f}'
out += f'\nTrain elapse {get_time_str(train_time)}'
print(out, flush=True)
|
backend/src/baserow/contrib/database/ws/pages.py | cjh0613/baserow | 839 | 11172913 | from baserow.ws.registries import PageType
from baserow.core.exceptions import UserNotInGroup
from baserow.contrib.database.table.handler import TableHandler
from baserow.contrib.database.table.exceptions import TableDoesNotExist
class TablePageType(PageType):
type = "table"
parameters = ["table_id"]
def can_add(self, user, web_socket_id, table_id, **kwargs):
"""
The user should only have access to this page if the table exists and if he
has access to the table.
"""
if not table_id:
return False
try:
handler = TableHandler()
table = handler.get_table(table_id)
table.database.group.has_user(user, raise_error=True)
except (UserNotInGroup, TableDoesNotExist):
return False
return True
def get_group_name(self, table_id, **kwargs):
return f"table-{table_id}"
|
sdk/python/pulumi_azure/monitoring/scheduled_query_rules_alert.py | henriktao/pulumi-azure | 109 | 11172918 | <filename>sdk/python/pulumi_azure/monitoring/scheduled_query_rules_alert.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ScheduledQueryRulesAlertArgs', 'ScheduledQueryRulesAlert']
@pulumi.input_type
class ScheduledQueryRulesAlertArgs:
def __init__(__self__, *,
action: pulumi.Input['ScheduledQueryRulesAlertActionArgs'],
data_source_id: pulumi.Input[str],
frequency: pulumi.Input[int],
query: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
time_window: pulumi.Input[int],
trigger: pulumi.Input['ScheduledQueryRulesAlertTriggerArgs'],
authorized_resource_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
auto_mitigation_enabled: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
query_type: Optional[pulumi.Input[str]] = None,
severity: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
throttling: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a ScheduledQueryRulesAlert resource.
:param pulumi.Input['ScheduledQueryRulesAlertActionArgs'] action: An `action` block as defined below.
:param pulumi.Input[str] data_source_id: The resource URI over which log search query is to be run.
:param pulumi.Input[int] frequency: Frequency (in minutes) at which rule condition should be evaluated. Values must be between 5 and 1440 (inclusive).
:param pulumi.Input[str] query: Log search query.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the scheduled query rule instance.
:param pulumi.Input[int] time_window: Time window for which data needs to be fetched for query (must be greater than or equal to `frequency`). Values must be between 5 and 2880 (inclusive).
:param pulumi.Input['ScheduledQueryRulesAlertTriggerArgs'] trigger: The condition that results in the alert rule being run.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_resource_ids: List of Resource IDs referred into query.
:param pulumi.Input[bool] auto_mitigation_enabled: Should the alerts in this Metric Alert be auto resolved? Defaults to `false`.
> **NOTE** `auto_mitigation_enabled` and `throttling` are mutually exclusive and cannot both be set.
:param pulumi.Input[str] description: The description of the scheduled query rule.
:param pulumi.Input[bool] enabled: Whether this scheduled query rule is enabled. Default is `true`.
:param pulumi.Input[str] name: The name of the scheduled query rule. Changing this forces a new resource to be created.
:param pulumi.Input[int] severity: Severity of the alert. Possible values include: 0, 1, 2, 3, or 4.
:param pulumi.Input[int] throttling: Time (in minutes) for which Alerts should be throttled or suppressed. Values must be between 0 and 10000 (inclusive).
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "data_source_id", data_source_id)
pulumi.set(__self__, "frequency", frequency)
pulumi.set(__self__, "query", query)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "time_window", time_window)
pulumi.set(__self__, "trigger", trigger)
if authorized_resource_ids is not None:
pulumi.set(__self__, "authorized_resource_ids", authorized_resource_ids)
if auto_mitigation_enabled is not None:
pulumi.set(__self__, "auto_mitigation_enabled", auto_mitigation_enabled)
if description is not None:
pulumi.set(__self__, "description", description)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if query_type is not None:
pulumi.set(__self__, "query_type", query_type)
if severity is not None:
pulumi.set(__self__, "severity", severity)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if throttling is not None:
pulumi.set(__self__, "throttling", throttling)
@property
@pulumi.getter
def action(self) -> pulumi.Input['ScheduledQueryRulesAlertActionArgs']:
"""
An `action` block as defined below.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['ScheduledQueryRulesAlertActionArgs']):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="dataSourceId")
def data_source_id(self) -> pulumi.Input[str]:
"""
The resource URI over which log search query is to be run.
"""
return pulumi.get(self, "data_source_id")
@data_source_id.setter
def data_source_id(self, value: pulumi.Input[str]):
pulumi.set(self, "data_source_id", value)
@property
@pulumi.getter
def frequency(self) -> pulumi.Input[int]:
"""
Frequency (in minutes) at which rule condition should be evaluated. Values must be between 5 and 1440 (inclusive).
"""
return pulumi.get(self, "frequency")
@frequency.setter
def frequency(self, value: pulumi.Input[int]):
pulumi.set(self, "frequency", value)
@property
@pulumi.getter
def query(self) -> pulumi.Input[str]:
"""
Log search query.
"""
return pulumi.get(self, "query")
@query.setter
def query(self, value: pulumi.Input[str]):
pulumi.set(self, "query", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to create the scheduled query rule instance.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="timeWindow")
def time_window(self) -> pulumi.Input[int]:
"""
Time window for which data needs to be fetched for query (must be greater than or equal to `frequency`). Values must be between 5 and 2880 (inclusive).
"""
return pulumi.get(self, "time_window")
@time_window.setter
def time_window(self, value: pulumi.Input[int]):
pulumi.set(self, "time_window", value)
@property
@pulumi.getter
def trigger(self) -> pulumi.Input['ScheduledQueryRulesAlertTriggerArgs']:
"""
The condition that results in the alert rule being run.
"""
return pulumi.get(self, "trigger")
@trigger.setter
def trigger(self, value: pulumi.Input['ScheduledQueryRulesAlertTriggerArgs']):
pulumi.set(self, "trigger", value)
@property
@pulumi.getter(name="authorizedResourceIds")
def authorized_resource_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Resource IDs referred into query.
"""
return pulumi.get(self, "authorized_resource_ids")
@authorized_resource_ids.setter
def authorized_resource_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "authorized_resource_ids", value)
@property
@pulumi.getter(name="autoMitigationEnabled")
def auto_mitigation_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Should the alerts in this Metric Alert be auto resolved? Defaults to `false`.
> **NOTE** `auto_mitigation_enabled` and `throttling` are mutually exclusive and cannot both be set.
"""
return pulumi.get(self, "auto_mitigation_enabled")
@auto_mitigation_enabled.setter
def auto_mitigation_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_mitigation_enabled", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the scheduled query rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this scheduled query rule is enabled. Default is `true`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the scheduled query rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="queryType")
def query_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "query_type")
@query_type.setter
def query_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query_type", value)
@property
@pulumi.getter
def severity(self) -> Optional[pulumi.Input[int]]:
"""
Severity of the alert. Possible values include: 0, 1, 2, 3, or 4.
"""
return pulumi.get(self, "severity")
@severity.setter
def severity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "severity", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def throttling(self) -> Optional[pulumi.Input[int]]:
"""
Time (in minutes) for which Alerts should be throttled or suppressed. Values must be between 0 and 10000 (inclusive).
"""
return pulumi.get(self, "throttling")
@throttling.setter
def throttling(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "throttling", value)
@pulumi.input_type
class _ScheduledQueryRulesAlertState:
def __init__(__self__, *,
action: Optional[pulumi.Input['ScheduledQueryRulesAlertActionArgs']] = None,
authorized_resource_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
auto_mitigation_enabled: Optional[pulumi.Input[bool]] = None,
data_source_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
frequency: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
query: Optional[pulumi.Input[str]] = None,
query_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
severity: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
throttling: Optional[pulumi.Input[int]] = None,
time_window: Optional[pulumi.Input[int]] = None,
trigger: Optional[pulumi.Input['ScheduledQueryRulesAlertTriggerArgs']] = None):
"""
Input properties used for looking up and filtering ScheduledQueryRulesAlert resources.
:param pulumi.Input['ScheduledQueryRulesAlertActionArgs'] action: An `action` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_resource_ids: List of Resource IDs referred into query.
:param pulumi.Input[bool] auto_mitigation_enabled: Should the alerts in this Metric Alert be auto resolved? Defaults to `false`.
> **NOTE** `auto_mitigation_enabled` and `throttling` are mutually exclusive and cannot both be set.
:param pulumi.Input[str] data_source_id: The resource URI over which log search query is to be run.
:param pulumi.Input[str] description: The description of the scheduled query rule.
:param pulumi.Input[bool] enabled: Whether this scheduled query rule is enabled. Default is `true`.
:param pulumi.Input[int] frequency: Frequency (in minutes) at which rule condition should be evaluated. Values must be between 5 and 1440 (inclusive).
:param pulumi.Input[str] name: The name of the scheduled query rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] query: Log search query.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the scheduled query rule instance.
:param pulumi.Input[int] severity: Severity of the alert. Possible values include: 0, 1, 2, 3, or 4.
:param pulumi.Input[int] throttling: Time (in minutes) for which Alerts should be throttled or suppressed. Values must be between 0 and 10000 (inclusive).
:param pulumi.Input[int] time_window: Time window for which data needs to be fetched for query (must be greater than or equal to `frequency`). Values must be between 5 and 2880 (inclusive).
:param pulumi.Input['ScheduledQueryRulesAlertTriggerArgs'] trigger: The condition that results in the alert rule being run.
"""
if action is not None:
pulumi.set(__self__, "action", action)
if authorized_resource_ids is not None:
pulumi.set(__self__, "authorized_resource_ids", authorized_resource_ids)
if auto_mitigation_enabled is not None:
pulumi.set(__self__, "auto_mitigation_enabled", auto_mitigation_enabled)
if data_source_id is not None:
pulumi.set(__self__, "data_source_id", data_source_id)
if description is not None:
pulumi.set(__self__, "description", description)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if frequency is not None:
pulumi.set(__self__, "frequency", frequency)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if query is not None:
pulumi.set(__self__, "query", query)
if query_type is not None:
pulumi.set(__self__, "query_type", query_type)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if severity is not None:
pulumi.set(__self__, "severity", severity)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if throttling is not None:
pulumi.set(__self__, "throttling", throttling)
if time_window is not None:
pulumi.set(__self__, "time_window", time_window)
if trigger is not None:
pulumi.set(__self__, "trigger", trigger)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input['ScheduledQueryRulesAlertActionArgs']]:
"""
An `action` block as defined below.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input['ScheduledQueryRulesAlertActionArgs']]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="authorizedResourceIds")
def authorized_resource_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Resource IDs referred into query.
"""
return pulumi.get(self, "authorized_resource_ids")
@authorized_resource_ids.setter
def authorized_resource_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "authorized_resource_ids", value)
@property
@pulumi.getter(name="autoMitigationEnabled")
def auto_mitigation_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Should the alerts in this Metric Alert be auto resolved? Defaults to `false`.
> **NOTE** `auto_mitigation_enabled` and `throttling` are mutually exclusive and cannot both be set.
"""
return pulumi.get(self, "auto_mitigation_enabled")
@auto_mitigation_enabled.setter
def auto_mitigation_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_mitigation_enabled", value)
@property
@pulumi.getter(name="dataSourceId")
def data_source_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource URI over which log search query is to be run.
"""
return pulumi.get(self, "data_source_id")
@data_source_id.setter
def data_source_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_source_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the scheduled query rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this scheduled query rule is enabled. Default is `true`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def frequency(self) -> Optional[pulumi.Input[int]]:
"""
Frequency (in minutes) at which rule condition should be evaluated. Values must be between 5 and 1440 (inclusive).
"""
return pulumi.get(self, "frequency")
@frequency.setter
def frequency(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "frequency", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the scheduled query rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def query(self) -> Optional[pulumi.Input[str]]:
"""
Log search query.
"""
return pulumi.get(self, "query")
@query.setter
def query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query", value)
@property
@pulumi.getter(name="queryType")
def query_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "query_type")
@query_type.setter
def query_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to create the scheduled query rule instance.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def severity(self) -> Optional[pulumi.Input[int]]:
"""
Severity of the alert. Possible values include: 0, 1, 2, 3, or 4.
"""
return pulumi.get(self, "severity")
@severity.setter
def severity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "severity", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def throttling(self) -> Optional[pulumi.Input[int]]:
"""
Time (in minutes) for which Alerts should be throttled or suppressed. Values must be between 0 and 10000 (inclusive).
"""
return pulumi.get(self, "throttling")
@throttling.setter
def throttling(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "throttling", value)
@property
@pulumi.getter(name="timeWindow")
def time_window(self) -> Optional[pulumi.Input[int]]:
"""
Time window for which data needs to be fetched for query (must be greater than or equal to `frequency`). Values must be between 5 and 2880 (inclusive).
"""
return pulumi.get(self, "time_window")
@time_window.setter
def time_window(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "time_window", value)
@property
@pulumi.getter
def trigger(self) -> Optional[pulumi.Input['ScheduledQueryRulesAlertTriggerArgs']]:
"""
The condition that results in the alert rule being run.
"""
return pulumi.get(self, "trigger")
@trigger.setter
def trigger(self, value: Optional[pulumi.Input['ScheduledQueryRulesAlertTriggerArgs']]):
pulumi.set(self, "trigger", value)
class ScheduledQueryRulesAlert(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[pulumi.InputType['ScheduledQueryRulesAlertActionArgs']]] = None,
authorized_resource_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
auto_mitigation_enabled: Optional[pulumi.Input[bool]] = None,
data_source_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
frequency: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
query: Optional[pulumi.Input[str]] = None,
query_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
severity: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
throttling: Optional[pulumi.Input[int]] = None,
time_window: Optional[pulumi.Input[int]] = None,
trigger: Optional[pulumi.Input[pulumi.InputType['ScheduledQueryRulesAlertTriggerArgs']]] = None,
__props__=None):
"""
Manages an AlertingAction Scheduled Query Rules resource within Azure Monitor.
## Import
Scheduled Query Rule Alerts can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:monitoring/scheduledQueryRulesAlert:ScheduledQueryRulesAlert example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Insights/scheduledqueryrules/myrulename
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ScheduledQueryRulesAlertActionArgs']] action: An `action` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_resource_ids: List of Resource IDs referred into query.
:param pulumi.Input[bool] auto_mitigation_enabled: Should the alerts in this Metric Alert be auto resolved? Defaults to `false`.
> **NOTE** `auto_mitigation_enabled` and `throttling` are mutually exclusive and cannot both be set.
:param pulumi.Input[str] data_source_id: The resource URI over which log search query is to be run.
:param pulumi.Input[str] description: The description of the scheduled query rule.
:param pulumi.Input[bool] enabled: Whether this scheduled query rule is enabled. Default is `true`.
:param pulumi.Input[int] frequency: Frequency (in minutes) at which rule condition should be evaluated. Values must be between 5 and 1440 (inclusive).
:param pulumi.Input[str] name: The name of the scheduled query rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] query: Log search query.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the scheduled query rule instance.
:param pulumi.Input[int] severity: Severity of the alert. Possible values include: 0, 1, 2, 3, or 4.
:param pulumi.Input[int] throttling: Time (in minutes) for which Alerts should be throttled or suppressed. Values must be between 0 and 10000 (inclusive).
:param pulumi.Input[int] time_window: Time window for which data needs to be fetched for query (must be greater than or equal to `frequency`). Values must be between 5 and 2880 (inclusive).
:param pulumi.Input[pulumi.InputType['ScheduledQueryRulesAlertTriggerArgs']] trigger: The condition that results in the alert rule being run.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ScheduledQueryRulesAlertArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an AlertingAction Scheduled Query Rules resource within Azure Monitor.
## Import
Scheduled Query Rule Alerts can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:monitoring/scheduledQueryRulesAlert:ScheduledQueryRulesAlert example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Insights/scheduledqueryrules/myrulename
```
:param str resource_name: The name of the resource.
:param ScheduledQueryRulesAlertArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ScheduledQueryRulesAlertArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[pulumi.InputType['ScheduledQueryRulesAlertActionArgs']]] = None,
authorized_resource_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
auto_mitigation_enabled: Optional[pulumi.Input[bool]] = None,
data_source_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
frequency: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
query: Optional[pulumi.Input[str]] = None,
query_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
severity: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
throttling: Optional[pulumi.Input[int]] = None,
time_window: Optional[pulumi.Input[int]] = None,
trigger: Optional[pulumi.Input[pulumi.InputType['ScheduledQueryRulesAlertTriggerArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ScheduledQueryRulesAlertArgs.__new__(ScheduledQueryRulesAlertArgs)
if action is None and not opts.urn:
raise TypeError("Missing required property 'action'")
__props__.__dict__["action"] = action
__props__.__dict__["authorized_resource_ids"] = authorized_resource_ids
__props__.__dict__["auto_mitigation_enabled"] = auto_mitigation_enabled
if data_source_id is None and not opts.urn:
raise TypeError("Missing required property 'data_source_id'")
__props__.__dict__["data_source_id"] = data_source_id
__props__.__dict__["description"] = description
__props__.__dict__["enabled"] = enabled
if frequency is None and not opts.urn:
raise TypeError("Missing required property 'frequency'")
__props__.__dict__["frequency"] = frequency
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if query is None and not opts.urn:
raise TypeError("Missing required property 'query'")
__props__.__dict__["query"] = query
__props__.__dict__["query_type"] = query_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["severity"] = severity
__props__.__dict__["tags"] = tags
__props__.__dict__["throttling"] = throttling
if time_window is None and not opts.urn:
raise TypeError("Missing required property 'time_window'")
__props__.__dict__["time_window"] = time_window
if trigger is None and not opts.urn:
raise TypeError("Missing required property 'trigger'")
__props__.__dict__["trigger"] = trigger
super(ScheduledQueryRulesAlert, __self__).__init__(
'azure:monitoring/scheduledQueryRulesAlert:ScheduledQueryRulesAlert',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[pulumi.InputType['ScheduledQueryRulesAlertActionArgs']]] = None,
authorized_resource_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
auto_mitigation_enabled: Optional[pulumi.Input[bool]] = None,
data_source_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
frequency: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
query: Optional[pulumi.Input[str]] = None,
query_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
severity: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
throttling: Optional[pulumi.Input[int]] = None,
time_window: Optional[pulumi.Input[int]] = None,
trigger: Optional[pulumi.Input[pulumi.InputType['ScheduledQueryRulesAlertTriggerArgs']]] = None) -> 'ScheduledQueryRulesAlert':
"""
Get an existing ScheduledQueryRulesAlert resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ScheduledQueryRulesAlertActionArgs']] action: An `action` block as defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_resource_ids: List of Resource IDs referred into query.
:param pulumi.Input[bool] auto_mitigation_enabled: Should the alerts in this Metric Alert be auto resolved? Defaults to `false`.
> **NOTE** `auto_mitigation_enabled` and `throttling` are mutually exclusive and cannot both be set.
:param pulumi.Input[str] data_source_id: The resource URI over which log search query is to be run.
:param pulumi.Input[str] description: The description of the scheduled query rule.
:param pulumi.Input[bool] enabled: Whether this scheduled query rule is enabled. Default is `true`.
:param pulumi.Input[int] frequency: Frequency (in minutes) at which rule condition should be evaluated. Values must be between 5 and 1440 (inclusive).
:param pulumi.Input[str] name: The name of the scheduled query rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] query: Log search query.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the scheduled query rule instance.
:param pulumi.Input[int] severity: Severity of the alert. Possible values include: 0, 1, 2, 3, or 4.
:param pulumi.Input[int] throttling: Time (in minutes) for which Alerts should be throttled or suppressed. Values must be between 0 and 10000 (inclusive).
:param pulumi.Input[int] time_window: Time window for which data needs to be fetched for query (must be greater than or equal to `frequency`). Values must be between 5 and 2880 (inclusive).
:param pulumi.Input[pulumi.InputType['ScheduledQueryRulesAlertTriggerArgs']] trigger: The condition that results in the alert rule being run.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ScheduledQueryRulesAlertState.__new__(_ScheduledQueryRulesAlertState)
__props__.__dict__["action"] = action
__props__.__dict__["authorized_resource_ids"] = authorized_resource_ids
__props__.__dict__["auto_mitigation_enabled"] = auto_mitigation_enabled
__props__.__dict__["data_source_id"] = data_source_id
__props__.__dict__["description"] = description
__props__.__dict__["enabled"] = enabled
__props__.__dict__["frequency"] = frequency
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["query"] = query
__props__.__dict__["query_type"] = query_type
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["severity"] = severity
__props__.__dict__["tags"] = tags
__props__.__dict__["throttling"] = throttling
__props__.__dict__["time_window"] = time_window
__props__.__dict__["trigger"] = trigger
return ScheduledQueryRulesAlert(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def action(self) -> pulumi.Output['outputs.ScheduledQueryRulesAlertAction']:
"""
An `action` block as defined below.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter(name="authorizedResourceIds")
def authorized_resource_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of Resource IDs referred into query.
"""
return pulumi.get(self, "authorized_resource_ids")
@property
@pulumi.getter(name="autoMitigationEnabled")
def auto_mitigation_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Should the alerts in this Metric Alert be auto resolved? Defaults to `false`.
> **NOTE** `auto_mitigation_enabled` and `throttling` are mutually exclusive and cannot both be set.
"""
return pulumi.get(self, "auto_mitigation_enabled")
@property
@pulumi.getter(name="dataSourceId")
def data_source_id(self) -> pulumi.Output[str]:
"""
The resource URI over which log search query is to be run.
"""
return pulumi.get(self, "data_source_id")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the scheduled query rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether this scheduled query rule is enabled. Default is `true`.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def frequency(self) -> pulumi.Output[int]:
"""
Frequency (in minutes) at which rule condition should be evaluated. Values must be between 5 and 1440 (inclusive).
"""
return pulumi.get(self, "frequency")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the scheduled query rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def query(self) -> pulumi.Output[str]:
"""
Log search query.
"""
return pulumi.get(self, "query")
@property
@pulumi.getter(name="queryType")
def query_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "query_type")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the scheduled query rule instance.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def severity(self) -> pulumi.Output[Optional[int]]:
"""
Severity of the alert. Possible values include: 0, 1, 2, 3, or 4.
"""
return pulumi.get(self, "severity")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def throttling(self) -> pulumi.Output[Optional[int]]:
"""
Time (in minutes) for which Alerts should be throttled or suppressed. Values must be between 0 and 10000 (inclusive).
"""
return pulumi.get(self, "throttling")
@property
@pulumi.getter(name="timeWindow")
def time_window(self) -> pulumi.Output[int]:
"""
Time window for which data needs to be fetched for query (must be greater than or equal to `frequency`). Values must be between 5 and 2880 (inclusive).
"""
return pulumi.get(self, "time_window")
@property
@pulumi.getter
def trigger(self) -> pulumi.Output['outputs.ScheduledQueryRulesAlertTrigger']:
"""
The condition that results in the alert rule being run.
"""
return pulumi.get(self, "trigger")
|
src/openglgenerator.py | sioliv/University | 210 | 11172954 | """Refactored version of the opengl generator using ctypeslib
"""
try:
from ctypeslib.codegen import codegenerator
from ctypeslib import xml2py
except ImportError, err:
try:
from ctypes_codegen import codegenerator, xml2py
except ImportError, err:
from ctypes.wrap import codegenerator, xml2py
try:
from cStringIO import StringIO
except ImportError, err:
from StringIO import StringIO
import sys, logging
log = logging.getLogger( 'openglgenerator' )
import ctypes
from OpenGL.platform import GL, GLU, GLUT, GLE
from OpenGL import constant
def indent( code, indentation='\t' ):
"""Indent given code by given indentation"""
lines = code.splitlines()
return "\n".join( [ '%s%s'%(indentation,line) for line in lines] )
class OpenGLGenerator( codegenerator.Generator ):
"""Subclass of code generator providing PyOpenGL integration"""
_super = codegenerator.Generator
MODULE_HEADER = """from ctypes import *
from OpenGL import platform, arrays
from OpenGL.constant import Constant
from OpenGL import constants as GLconstants
GLvoid = GL_types.GLvoid
"""
@classmethod
def defaultEmitters( cls ):
"""Produce the set of default emitter classes
"""
return [
OpenGLFunction(),
OpenGLConstant(),
] + cls._super.defaultEmitters()
@classmethod
def importAble( cls, name, value ):
"""Determine whether this name/object should be imported from known symbols"""
return (
isinstance( value, type ) or
isinstance( value, constant.Constant ) or
value.__class__.__name__.endswith( 'CFunctionType') # this should be available *somewhere*!
)
def filter_items( self, items, expressions=None,symbols=None, types=None ):
"""Filter out PFN functions"""
items = [
i for i in items
# skip the pointer-to-function meta-types...
if not getattr( i,'name','').startswith( 'PFN' )
]
return self._super.filter_items( self, items, expressions=expressions, symbols=symbols, types=types )
def get_sharedlib(self, dllname, cc):
"""Override so that all references to shared libraries go through "platform" module"""
if dllname in ('libGL','GL','libGL.so.1'):
return 'platform.PLATFORM.GL'
elif dllname in ('libGLU','GLU','libGLU.so.1'):
return 'platform.PLATFORM.GLU'
elif dllname in ('libglut','glut','libglut.so.3'):
return 'platform.PLATFORM.GLUT'
elif dllname in ('libgle','gle','libgle.so.3' ):
return 'platform.PLATFORM.GLE'
else:
raise NotImplementedError( """Haven't done %s yet!"""%(dllname) )
def cmpitems( self, a, b ):
"""Dumb sorting helper to order by name instead of position"""
try:
return cmp( (a.name,getattr(a, "location", -1), a.__class__), (b.name,getattr(b, "location", 1),b.__class__))
except (AttributeError,TypeError,ValueError), err:
return cmp( a, b )
class OpenGLFunction( codegenerator.Function ):
"""Replaces the ctypes default code generator for functions"""
TEMPLATE = """%(location)s%(name)s = platform.createBaseFunction(
%(name)r, dll=%(libname)s, resultType=%(returnType)s,
argTypes=[%(argTypes)s],
doc=%(documentation)r,
argNames=%(argNames)r,
)
"""
def emit(self, generator, func):
"""Produce a function via a call to platform-provided function"""
result = []
libname = self.libName( generator, func )
if libname:
self.increment()
result.append( self.generateHeader( generator, func ))
args = self.getArgs( generator, func )
argTypes = ",".join( args )
argNames = self.getArgNames( generator, func )
location = self.locationComment( generator, func )
name = func.name
returnType = generator.type_name(func.returns)
documentation = self.documentFunction( generator, func )
generator.names.add(func.name)
result.append( self.TEMPLATE %locals() )
return result
elif not func.name.startswith( '__builtin_' ):
log.warning( """Could not find DLL name for function: %r""", func.name )
return ''
def arrayTypeName( self, generator, argType ):
"""Retrieve the array type name for argType or None"""
if generator.type_name(argType).startswith( 'POINTER' ):
# side effect should be to make the type available,
# but doesn't work with GLvoid
typeName = generator.type_name(argType.typ)
if typeName in self.CTYPE_TO_ARRAY_TYPE:
return 'arrays.%s'%(self.CTYPE_TO_ARRAY_TYPE[typeName])
elif (typeName == 'GLvoid'):
# normal to not have pointers to it...
log.info( 'GLvoid pointer %r, using POINTER(%s)', typeName, typeName )
else:
log.warning( 'No mapping for %r, using POINTER(%s)', typeName, typeName )
return None
def getArgs( self, generator, func ):
"""Retrieve arg type-names for all arguments in function typedef"""
return [
self.arrayTypeName( generator, a ) or generator.type_name(a)
for a in func.iterArgTypes()
]
def documentFunction( self, generator, func ):
"""Customisation point for documenting a given function"""
args = self.getArgs(generator,func)
argnames = self.getArgNames( generator, func )
return str("%s( %s ) -> %s"%(
func.name,
", ".join(
[ '%s(%s)'%( name, typ) for (name,typ) in zip(args,argnames) ]
),
generator.type_name(func.returns),
))
SUFFIX_TO_ARRAY_DATATYPE = [
('ub','GLGL_1_0.GL_UNSIGNED_BYTE'),
('us','GLGL_1_0.GL_UNSIGNED_SHORT'),
('ui','GLGL_1_0.GL_UNSIGNED_INT'),
('f','GLGL_1_0.GL_FLOAT'),
('d','GLGL_1_0.GL_DOUBLE'),
('i','GLGL_1_0.GL_INT'),
('s','GLGL_1_0.GL_SHORT'),
('b','GLGL_1_0.GL_BYTE'),
]
CTYPE_TO_ARRAY_TYPE = {
'GLfloat': 'GLfloatArray',
'float': 'GLfloatArray',
'GLclampf': 'GLclampfArray',
'GLdouble': 'GLdoubleArray',
'double': 'GLdoubleArray',
'int': 'GLintArray',
'GLint': 'GLintArray',
'GLuint': 'GLuintArray',
'unsigned int':'GLuintArray',
'unsigned char': 'GLbyteArray',
'uint': 'GLuintArray',
'GLshort': 'GLshortArray',
'GLushort': 'GLushortArray',
'short unsigned int':'GLushortArray',
'GLubyte': 'GLubyteArray',
'GLbyte': 'GLbyteArray',
'char': 'GLbyteArray',
'gleDouble': 'GLdoubleArray',
# following should all have special sub-classes that enforce dimensions
'gleDouble * 4': 'GLdoubleArray',
'gleDouble * 3': 'GLdoubleArray',
'gleDouble * 2': 'GLdoubleArray',
'c_float * 3': 'GLfloatArray',
'gleDouble * 3 * 2': 'GLdoubleArray',
}
class OpenGLConstant( codegenerator.Variable ):
"""Override to produce OpenGL.constant.Constant instances"""
TEMPLATE = """%(name)s = Constant( %(name)r, %(value)r)"""
def emit( self, generator, typedef ):
"""Filter out constants that don't have all-uppercase names"""
if typedef.name.upper() != typedef.name:
return ""
return super( OpenGLConstant, self ).emit( generator, typedef )
class OpenGLDecorator( OpenGLFunction ):
"""Produces decorated versions of the functions in a separate module
This is passed in as an emitter for a separate pass, so that only the
annotations get into the separate module.
"""
def isPointer( self, generator, arg ):
"""Is given arg-type a pointer?"""
return generator.type_name( arg ).startswith( 'POINTER' )
def hasPointer( self, generator, args ):
"""Given set of arg-types, is one a pointer?"""
return [ arg for arg in args if self.isPointer( generator, arg ) ]
def emit( self, generator, func ):
"""Emit code to create a copy of the function with pointer-size annotations"""
name = func.name
size = None
typ = None
if not self.hasPointer( generator, func.iterArgTypes() ):
return None
libname = self.libName( generator, func )
if not libname:
return None
base = name
if name.endswith( 'ARB' ):
base = base[:-3]
if base.endswith( 'v' ):
base = base[:-1]
found = 0
for suffix,typ in self.SUFFIX_TO_ARRAY_DATATYPE:
if base.endswith(suffix):
found = 1
base = base[:-len(suffix)]
try:
size = int(base[-1])
except ValueError, err:
size = None
break
elif base[:-1].endswith( 'Matrix' ):
# glLoadMatrix, glMultMatrix
for suffix,typ in self.SUFFIX_TO_ARRAY_DATATYPE:
if name.endswith( suffix ):
size = 16
break
result = ''
for index,(arg,argName) in enumerate( zip(func.iterArgTypes(),func.iterArgNames()) ):
type = self.arrayTypeName( generator, arg )
argName = str(argName )
if type:
generator.names.add(func.name)
if result:
previous = indent( result, '\t' )
else:
previous = '\traw.%(name)s'%locals()
if type and size is None:
# should only print this if it's a normal array type...
result = """arrays.setInputArraySizeType(
%(previous)s,
None, # XXX Could not determine size of argument %(argName)s for %(name)s %(type)s
%(type)s,
%(argName)r,
)
"""%locals()
elif type:
result = """arrays.setInputArraySizeType(
%(previous)s,
%(size)s,
%(type)s,
%(argName)r,
)
"""%locals()
if result:
return '%(name)s = %(result)s'%locals()
return None
if __name__ == "__main__":
import sys, logging
logging.basicConfig()
codegenerator.Generator = OpenGLGenerator
sys.exit(xml2py.main())
|
idaes/surrogate/helmet/Helmet.py | eyoung55/idaes-pse | 112 | 11172988 | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
HELMholtz Energy Thermodynamics (HELMET)
Main capabilities of HELMET
default HELMET use
"""
__author__ = "<NAME> <<EMAIL>>"
import os
import platform
import subprocess
from idaes.surrogate import alamopy
from . import AncillaryEquations # , Certainty
from . import Plotting, DataImport, DataManipulation
from . import GAMSWrite, BasisFunctions
from matplotlib import cm
# global R
R = 8.314472 # kJ mol^-1 K^-1
# global molecule, filename, gamsname, data_name
molecule, filename, gamsname, data_name = None, None, None, None
# global sample, sample_ratio
sample = False
sample_ratio = 5
# global critT, critP, critD, M, triple, acc
critT, critP, critD, M, triple, acc = 0, 0, 0, 0, 0, 0
# global max_time, num_terms
max_time = 500
num_terms = 12
# global props
props = []
# global has_alamo
has_alamo = None
# global flag_dirty
flag_dirty = False
def initialize(**kwargs):
"""
filename - location of data
gamsname - name of the gams file made
molecule - name of the molecule/compound
data_name - name of the data
fluid data - [critT, critP, critD, M, triple, acentric factor]
R - gas constant value
"""
global R, molecule, filename, gamsname, data_name
global critT, critP, critD, M, triple, acc
global max_time, num_terms, props
global sample, sample_ratio
global flag_dirty
global has_alamo
k_dict = {
"R": R,
"filename": filename,
"gamsname": gamsname,
"molecule": molecule,
"fluid_data": (critT, critP, critD, M, triple, acc),
"max_time": max_time,
"num_terms": num_terms,
"props": props,
"sample": 1,
}
for arg in k_dict:
if arg in kwargs:
if arg == "R":
R = kwargs[arg]
elif arg == "filename":
filename = kwargs[arg]
elif arg == "gamsname":
gamsname = kwargs[arg]
elif arg == "molecule":
molecule = kwargs[arg]
elif arg == "fluid_data":
(critT, critP, critD, M, triple, acc) = kwargs[arg]
elif arg == "max_time":
max_time = kwargs[arg]
elif arg == "num_terms":
num_terms = kwargs[arg]
elif arg == "props":
props = kwargs[arg]
elif arg == "sample":
sample = True
sample_ratio = kwargs[arg]
else:
raise Exception("Not a keyword argument")
if has_alamo is None:
has_alamo = alamopy.multos.has_alamo()
if not has_alamo:
print("No ALAMO software found.")
updateModelSettings()
flag_dirty = True
def updateModelSettings():
"""
Settings of the model based on the chemical passed to the
different python methods
"""
global R, molecule, filename, gamsname, data_name
global critT, critP, critD, M, triple, acc
global max_time, num_terms, props
global flag_dirty
# SoaveDensity.molData((critT, critP, critD, M, triple, acc), molecule, R)
Plotting.molData((critT, critP, critD, M, triple, acc), molecule, R)
Plotting.props = props
DataImport.molData((critT, critP, critD, M, triple, acc), R)
DataImport.filename = filename
DataManipulation.molData((critT, critP, critD, M, triple, acc), molecule, R)
AncillaryEquations.molecule = molecule
AncillaryEquations.max_time = max_time
GAMSWrite.molData(
(critT, critP, critD, M, triple, acc), molecule, data_name, num_terms, max_time
)
BasisFunctions.molData((critT, critP, critD, M, triple, acc), molecule, R)
flag_dirty = False
def prepareAncillaryEquations(plot=False, keepFiles=False):
"""
Develops ancillary equations of state using ALAMOPY
DL - saturated liquid density
DV - saturated vapor density
PV - vapor pressure
Dependent on ALAMO
"""
global has_alamo
if has_alamo:
AncillaryEquations.DL()
AncillaryEquations.DV()
AncillaryEquations.PV()
if plot:
Plotting.viewAnc()
if not keepFiles:
for p in ["DL", "DV", "PV"]:
os.remove("%s%s" % (molecule, p))
os.remove("%s%s.lst" % (molecule, p))
else:
if plot:
Plotting.viewAnc()
print("Couldn't regress ancillary equations. ALAMO executable not found")
def viewPropertyData():
"""
Plot imported data
"""
Plotting.viewData()
def setupRegression(numTerms=14, gams=False, pyomo=False):
"""
setup gams regression
"""
global props, sample, sample_ratio
GAMSWrite.num_terms = numTerms
GAMSWrite.props = props
GAMSWrite.sample = sample
GAMSWrite.sample = sample_ratio
GAMSWrite.importData()
GAMSWrite.runFile = "gdx"
GAMSWrite.GenerateGDXGamsFiledtlmv()
GAMSWrite.runFile = "main"
GAMSWrite.GenerateGamsShell()
def runRegression(gams = False, pyomo=False):
"""
Runs the gdx and main regression gams file
"""
GAMSWrite.runFile ="gdx"
command = "gams %s%s.gms"%(molecule,GAMSWrite.runFile)
process = subprocess.check_call(command, shell=True)
GAMSWrite.runFile ="main"
command = "gams %s%s.gms"%(molecule,GAMSWrite.runFile)
process = subprocess.check_call(command, shell=True)
def getFlag():
"""
Returns flag, marks a change in the construction of the model
"""
return flag_dirty
def viewResults(lstFile=None, plot = False, report=False, surface=cm.coolwarm):
"""
Plot results from gams or pyomo
lstFile - gams listing file
surface - colormapping color eg. cm.coolwarm
"""
Plotting.sseCombo(lstFile = lstFile, plot= plot, report=report, surface= surface)
def viewMultResults(lstFile, numTerms=0):
"""
View mutliple results from a lst file
"""
# PYLINT-TODO-FIX the multSSECombo function doesn't seem exist in the Plotting module
# pylint: disable=no-member
Plotting.multSSECombo(lstFile, numTerms)
def deletefile(*fname):
"""
Deletes files
"""
tos = platform.platform()
if "Windows" in tos:
for name in fname:
os.system("del " + name)
else:
for name in fname:
os.system("rm " + name)
|
pynes/cartridge.py | timgates42/pyNES | 1,046 | 11172996 | <gh_stars>1000+
class Cartridge:
def __init__(self):
self.banks = {}
self.bank_id = 0
self.pc = 0
self.inespgr = 1
self.ineschr = 1
self.inesmap = 1
self.inesmir = 1
self.rs = 0
self.path = ''
def nes_id(self):
# NES
return [0x4e, 0x45, 0x53, 0x1a]
def nes_get_header(self):
id = self.nes_id()
unused = [0, 0, 0, 0, 0, 0, 0, 0]
header = []
header.extend(id)
header.append(self.inespgr)
header.append(self.ineschr)
header.append(self.inesmir)
header.append(self.inesmap)
header.extend(unused)
return header
def set_iNES_prg(self, inespgr):
self.inespgr = inespgr
def set_iNES_chr(self, ineschr):
self.ineschr = ineschr
def set_iNES_map(self, inesmap):
self.inesmap = inesmap
def set_iNES_mir(self, inesmir):
self.inesmir = inesmir
def set_bank_id(self, id):
if id not in self.banks:
self.banks[id] = dict(code=[], start=None, size=(1024 * 8))
self.bank_id = id
def set_org(self, org):
if self.bank_id not in self.banks:
self.set_bank_id(self.bank_id)
if not self.banks[self.bank_id]['start']:
self.banks[self.bank_id]['start'] = org
self.pc = org
else:
while self.pc < org:
self.append_code([0xff])
self.pc = org
def append_code(self, code):
if self.bank_id not in self.banks:
self.set_bank_id(self.bank_id)
for c in code:
assert c <= 0xff
self.banks[self.bank_id]['code'].extend(code)
self.pc += len(code)
def get_code(self):
if self.bank_id not in self.banks:
self.set_bank_id(self.bank_id)
return self.banks[self.bank_id]['code']
def get_ines_code(self):
if self.bank_id not in self.banks:
self.set_bank_id(self.bank_id)
bin = []
nes_header = self.nes_get_header()
bin.extend(nes_header)
for i in self.banks:
for j in range(len(self.banks[i]['code']), self.banks[i]['size']):
self.banks[i]['code'].append(0xff)
bin.extend(self.banks[i]['code'])
return bin
|
test/run/t291.py | timmartin/skulpt | 2,671 | 11173005 | <reponame>timmartin/skulpt<filename>test/run/t291.py
print -3 % 2
print 3 % 2
print -3 % 3
print 3 % 3
print
print -3 % -2
print 3 % -2
print -3 % -3
print 3 % -3
print
print 0 % 1
print 0 % -1
|
pycharm2020.1.3/script/TcpConn.py | LaudateCorpus1/realtime-server | 465 | 11173013 | from __future__ import annotations
import time
import asyncio
import struct
import typing
from asyncio import transports
from asyncio.exceptions import CancelledError
from ConnBase import HEARTBEAT_TIMEOUT, HEARTBEAT_INTERVAL, ConnBase, RPC_HANDLER_ID_LEN, RECONNECT_MAX_TIMES, \
RECONNECT_INTERVAL, CONN_STATE_CONNECTED, CONN_STATE_CONNECTING, CONN_STATE_DISCONNECTED
from ConnMgr import ROLE_TYPE_ACTIVE
from core.common.protocol_def import PROTO_TYPE_TCP, TcpProtocol
from common import gv
from core.common.IdManager import IdManager
from core.util.TimerHub import TimerHub
from core.util.UtilApi import wait_or_not
if typing.TYPE_CHECKING:
from RpcHandler import RpcHandler
# from common import gr
# from core.common import MsgpackSupport
# from core.common.EntityFactory import EntityFactory
from core.mobilelog.LogManager import LogManager
class TcpConn(ConnBase):
def __init__(
self,
role_type: int,
addr: typing.Tuple[str, int],
# asyncio_writer: asyncio.StreamWriter,
# asyncio_reader: asyncio.StreamReader,
rpc_handler: RpcHandler = None,
close_cb: typing.Callable = lambda: None,
is_proxy: bool = False,
transport: transports.BaseTransport = None
):
super(TcpConn, self).__init__(role_type, addr, rpc_handler, close_cb, is_proxy, transport)
self._proto_type = PROTO_TYPE_TCP
async def try_connect(self) -> bool:
self.set_connection_state(CONN_STATE_CONNECTING)
while self._try_connect_times < RECONNECT_MAX_TIMES:
try:
self._try_connect_times += 1
transport, protocol = await gv.get_ev_loop().create_connection(
lambda: TcpProtocol(ROLE_TYPE_ACTIVE),
self._addr[0], self._addr[1])
except Exception as e:
self._logger.error(str(e))
await asyncio.sleep(RECONNECT_INTERVAL)
# if self._try_connect_times < RECONNECT_MAX_TIMES:
self._logger.warning(f"try reconnect tcp: {str(self._addr)} ... {self._try_connect_times}")
else:
self._try_connect_times = 0
self.set_connection_state(CONN_STATE_CONNECTED)
self._transport = transport
return True
else:
self._logger.error(f"try {RECONNECT_MAX_TIMES} times , still can't connect tcp remote addr: {self._addr}")
self._try_connect_times = 0
self.set_connection_state(CONN_STATE_DISCONNECTED)
return False
|
dynaconf/vendor/box/from_file.py | RonnyPfannschmidt/dynaconf | 2,293 | 11173032 | from json import JSONDecodeError
from pathlib import Path
from typing import Union
from dynaconf.vendor.toml import TomlDecodeError
from dynaconf.vendor.ruamel.yaml import YAMLError
from .exceptions import BoxError
from .box import Box
from .box_list import BoxList
__all__=['box_from_file']
def _to_json(data):
try:return Box.from_json(data)
except JSONDecodeError:raise BoxError('File is not JSON as expected')
except BoxError:return BoxList.from_json(data)
def _to_yaml(data):
try:return Box.from_yaml(data)
except YAMLError:raise BoxError('File is not YAML as expected')
except BoxError:return BoxList.from_yaml(data)
def _to_toml(data):
try:return Box.from_toml(data)
except TomlDecodeError:raise BoxError('File is not TOML as expected')
def box_from_file(file,file_type=None,encoding='utf-8',errors='strict'):
C=file_type;A=file
if not isinstance(A,Path):A=Path(A)
if not A.exists():raise BoxError(f'file "{A}" does not exist')
B=A.read_text(encoding=encoding,errors=errors)
if C:
if C.lower()=='json':return _to_json(B)
if C.lower()=='yaml':return _to_yaml(B)
if C.lower()=='toml':return _to_toml(B)
raise BoxError(f'"{C}" is an unknown type, please use either toml, yaml or json')
if A.suffix in('.json','.jsn'):return _to_json(B)
if A.suffix in('.yaml','.yml'):return _to_yaml(B)
if A.suffix in('.tml','.toml'):return _to_toml(B)
raise BoxError(f"Could not determine file type based off extension, please provide file_type") |
src/models/backbones_3d/pfe/__init__.py | reinforcementdriving/SA-Det3D | 134 | 11173074 | <gh_stars>100-1000
from .voxel_set_abstraction import VoxelSetAbstraction
from .sa_voxel_set_abstraction import SAVoxelSetAbstraction
from .def_voxel_set_abstraction import DefVoxelSetAbstraction
__all__ = {
'VoxelSetAbstraction': VoxelSetAbstraction,
'DefVoxelSetAbstraction': DefVoxelSetAbstraction,
'SAVoxelSetAbstraction': SAVoxelSetAbstraction,
}
|
tests/browser_base.py | petrklus/pyquery | 1,758 | 11173091 |
class TextExtractionMixin():
def _prepare_dom(self, html):
self.last_html = '<html><body>' + html + '</body></html>'
def _simple_test(self, html, expected_sq, expected_nosq, **kwargs):
raise NotImplementedError
def test_inline_tags(self):
self._simple_test(
'Phas<em>ell</em>us<i> eget </i>sem <b>facilisis</b> justo',
'Phasellus eget sem facilisis justo',
'Phasellus eget sem facilisis justo',
)
self._simple_test(
'Phasellus <span> eget </span> sem <b>facilisis\n</b> justo',
'Phasellus eget sem facilisis justo',
'Phasellus eget sem facilisis\n justo',
)
self._simple_test(
('Phasellus <span>\n eget\n '
'sem\n\tfacilisis</span> justo'),
'Phasellus eget sem facilisis justo',
'Phasellus \n eget\n sem\n\tfacilisis justo'
)
def test_block_tags(self):
self._simple_test(
'Phas<p>ell</p>us<div> eget </div>sem <h1>facilisis</h1> justo',
'Phas\nell\nus\neget\nsem\nfacilisis\njusto',
'Phas\nell\nus\n eget \nsem \nfacilisis\n justo',
)
self._simple_test(
'<p>In sagittis</p> <p>rutrum</p><p>condimentum</p>',
'In sagittis\nrutrum\ncondimentum',
'In sagittis\n \nrutrum\n\ncondimentum',
)
self._simple_test(
'In <p>\nultricies</p>\n erat et <p>\n\n\nmaximus\n\n</p> mollis',
'In\nultricies\nerat et\nmaximus\nmollis',
'In \n\nultricies\n\n erat et \n\n\n\nmaximus\n\n\n mollis',
)
self._simple_test(
('Integer <div><div>\n <div>quis commodo</div></div> '
'</div> libero'),
'Integer\nquis commodo\nlibero',
'Integer \n\n\n \nquis commodo\n\n \n libero',
)
self._simple_test(
'Heading<ul><li>one</li><li>two</li><li>three</li></ul>',
'Heading\none\ntwo\nthree',
'Heading\n\none\n\ntwo\n\nthree',
)
def test_separators(self):
self._simple_test(
'Some words<br>test. Another word<br><br> <br> test.',
'Some words\ntest. Another word\n\n\ntest.',
'Some words\ntest. Another word\n\n \n test.',
)
self._simple_test(
'Inline <span> splitted by\nbr<br>tag</span> test',
'Inline splitted by br\ntag test',
'Inline splitted by\nbr\ntag test',
)
self._simple_test(
'Some words<hr>test. Another word<hr><hr> <hr> test.',
'Some words\ntest. Another word\ntest.',
'Some words\n\ntest. Another word\n\n\n\n \n\n test.',
)
def test_strip(self):
self._simple_test(
' text\n',
'text',
' text\n',
)
def test_ul_li(self):
self._simple_test(
'<ul> <li> </li> </ul>',
'',
' \n \n '
)
|
segmentation/models/model.py | dataflowr/evaluating_bdl | 110 | 11173106 | # code-checked
# server-checked
import torch.nn as nn
from torch.nn import functional as F
import torch
import os
import sys
import numpy as np
from torch.autograd import Variable
import functools
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
from resnet_block import conv3x3, Bottleneck
from aspp import ASPP
sys.path.append(os.path.join(BASE_DIR, '../inplace_abn'))
from bn import InPlaceABNSync
BatchNorm2d = functools.partial(InPlaceABNSync, activation='none')
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes):
print ("model.py")
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=False)
self.conv2 = conv3x3(64, 64)
self.bn2 = BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=False)
self.conv3 = conv3x3(64, 128)
self.bn3 = BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # NOTE! (ceil_mode=True will do that x (batch_size, 128, h/4, w/4) e.g. has shape (batch_size, 128, 33, 33) instead of (batch_size, 128, 32, 32) if h == w == 256)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1,1,1))
self.aspp = ASPP()
self.cls = nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1):
downsample = None
if stride != 1 or self.inplanes != planes*block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes*block.expansion, kernel_size=1, stride=stride, bias=False),
BatchNorm2d(planes*block.expansion, affine=True))
layers = []
generate_multi_grid = lambda index, grids: grids[index%len(grids)] if isinstance(grids, tuple) else 1
layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample, multi_grid=generate_multi_grid(0, multi_grid)))
self.inplanes = planes*block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid)))
return nn.Sequential(*layers)
def forward(self, x):
# (x has shape: (batch_size, 3, h, w))
x = self.relu1(self.bn1(self.conv1(x))) # (shape: (batch_size, 64, h/2, w/2))
x = self.relu2(self.bn2(self.conv2(x))) # (shape: (batch_size, 64, h/2, w/2))
x = self.relu3(self.bn3(self.conv3(x))) # (shape: (batch_size, 128, h/2, w/2))
x = self.maxpool(x) # (shape: (batch_size, 128, h/4, w/4))
x = self.layer1(x) # (shape: (batch_size, 256, h/4, w/4))
x = self.layer2(x) # (shape: (batch_size, 512, h/8, w/8))
x = self.layer3(x) # (shape: (batch_size, 1024, h/8, w/8))
x = self.layer4(x) # (shape: (batch_size, 2048, h/8, w/8))
x = self.aspp(x) # (shape: (batch_size, 512, h/8, h/8))
x = self.cls(x) # (shape: (batch_size, num_classes, h/8, w/8))
return x
def get_model(num_classes=19):
model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes)
return model
|
thumbsup/tests.py | annevandalfsen/screenbird | 121 | 11173109 | <gh_stars>100-1000
import logging
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from videos.models import Video
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger('thumbsup_test_suite')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
class VideosTest(TestCase):
fixtures = ['test_site_data.json',
'test_user_data.json',
'test_channel_data.json',
'test_videos_data.json']
def setUp(self):
self.user1 = User.objects.get(pk = 2) #Normal User
self.video1 = Video.objects.get(pk = 1)
#Login first for other views to work
client = self.client
url = reverse('login')
#Connect to View and attempt to login
response = client.post(url,
{'username': self.user1.username, 'password': 'password'})
def test_vote(self):
#Test the vote() view
client = self.client
url = reverse('vote_ajax')
#Check video thumbs up and down count (should be 0 for both)
self.assertTrue(self.video1.thumbs.thumbs_up_count() == 0)
self.assertTrue(self.video1.thumbs.thumbs_down_count() == 0)
#Like the video
response = client.post(url,
{
'id': 1,
'type': 'up',
'action': ''
})
#Recheck video thumbs up count
video = Video.objects.get(pk=1)
self.assertTrue(video.thumbs.thumbs_up_count() == 1)
#Dislike the video
response = client.post(url,
{
'id': 1,
'type': 'down',
'action': ''
})
#Recheck video thumbs down count
video = Video.objects.get(pk=1)
self.assertTrue(video.thumbs.thumbs_down_count() == 1)
|
tests/sam.py | SantoshSrinivas79/aws-lambda-r-runtime | 134 | 11173118 | import logging
from subprocess import Popen
import boto3
import botocore
from tests import wait_for_port
class LocalApi:
def __init__(self,
host: str = '127.0.0.1',
port: int = 3000,
template_path: str = None,
parameter_overrides: dict = None,
):
self.host = host
self.port = port
command = ['sam', 'local', 'start-api', '--host', self.host, '--port', str(self.port)]
if template_path:
command += ['--template', template_path]
if parameter_overrides:
command += ['--parameter-overrides', create_parameter_overrides(parameter_overrides)]
self.process = Popen(command)
def kill(self):
self.process.kill()
return_code = self.process.wait()
logging.info('Killed server with code %s', return_code)
def wait(self, interval: int = 10, retries: int = 6):
wait_for_port(self.port, self.host, interval=interval, retries=retries)
def get_uri(self) -> str:
return 'http://{}:{}'.format(self.host, self.port)
def start_local_api(host: str = '127.0.0.1',
port: int = 3000,
template_path: str = None,
parameter_overrides: dict = None) -> LocalApi:
server = LocalApi(host=host,
port=port,
template_path=template_path,
parameter_overrides=parameter_overrides)
server.wait()
return server
def create_parameter_overrides(parameter_overrides):
return "'" + ' '.join(['ParameterKey={},ParameterValue={}'.format(key, value) for key, value in
parameter_overrides.items()]) + "'"
class LocalLambdaServer:
def __init__(self,
host: str = '127.0.0.1',
port: int = 3001,
template_path: str = None,
parameter_overrides: dict = None,
):
self.host = host
self.port = port
command = ['sam', 'local', 'start-lambda', '--host', self.host, '--port', str(self.port)]
if template_path:
command += ['--template', template_path]
if parameter_overrides:
command += ['--parameter-overrides', create_parameter_overrides(parameter_overrides)]
self.process = Popen(command)
def get_client(self):
config = botocore.client.Config(signature_version=botocore.UNSIGNED,
read_timeout=900,
retries={'max_attempts': 0},
)
return boto3.client('lambda',
endpoint_url="http://{}:{}".format(self.host, self.port),
use_ssl=False,
verify=False,
config=config,
)
def kill(self):
self.process.kill()
return_code = self.process.wait()
logging.info('Killed server with code %s', return_code)
def wait(self, interval: int = 10, retries: int = 6):
wait_for_port(self.port, self.host, interval=interval, retries=retries)
def start_local_lambda(host: str = '127.0.0.1',
port: int = 3001,
template_path: str = None,
parameter_overrides: dict = None) -> LocalLambdaServer:
server = LocalLambdaServer(host=host,
port=port,
template_path=template_path,
parameter_overrides=parameter_overrides)
server.wait()
return server
|
rotkehlchen/exchanges/poloniex.py | rotkehlchenio/rotkehlchen | 137 | 11173131 | <reponame>rotkehlchenio/rotkehlchen<filename>rotkehlchen/exchanges/poloniex.py
import csv
import hashlib
import hmac
import logging
import os
from collections import defaultdict
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Optional, Tuple, Union
from urllib.parse import urlencode
import gevent
import requests
from rotkehlchen.accounting.ledger_actions import LedgerAction
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.assets.asset import Asset
from rotkehlchen.assets.converters import asset_from_poloniex
from rotkehlchen.constants.assets import A_LEND
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE, QUERY_RETRY_TIMES
from rotkehlchen.errors.asset import UnknownAsset, UnprocessableTradePair, UnsupportedAsset
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.exchanges.data_structures import (
AssetMovement,
Loan,
MarginPosition,
Trade,
TradeType,
)
from rotkehlchen.exchanges.exchange import ExchangeInterface, ExchangeQueryBalances
from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val
from rotkehlchen.history.deserialization import deserialize_price
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.serialization.deserialize import (
deserialize_asset_amount,
deserialize_asset_amount_force_positive,
deserialize_fee,
deserialize_timestamp,
deserialize_timestamp_from_poloniex_date,
get_pair_position_str,
)
from rotkehlchen.types import (
ApiKey,
ApiSecret,
AssetMovementCategory,
Fee,
Location,
Timestamp,
TradePair,
)
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.misc import create_timestamp, ts_now_in_ms
from rotkehlchen.utils.mixins.cacheable import cache_response_timewise
from rotkehlchen.utils.mixins.lockable import protect_with_lock
from rotkehlchen.utils.serialization import jsonloads_dict, jsonloads_list
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def trade_from_poloniex(poloniex_trade: Dict[str, Any], pair: TradePair) -> Trade:
"""Turn a poloniex trade returned from poloniex trade history to our common trade
history format
Throws:
- UnsupportedAsset due to asset_from_poloniex()
- DeserializationError due to the data being in unexpected format
- UnprocessableTradePair due to the pair data being in an unexpected format
"""
try:
trade_type = TradeType.deserialize(poloniex_trade['type'])
amount = deserialize_asset_amount(poloniex_trade['amount'])
rate = deserialize_price(poloniex_trade['rate'])
perc_fee = deserialize_fee(poloniex_trade['fee'])
base_currency = asset_from_poloniex(get_pair_position_str(pair, 'first'))
quote_currency = asset_from_poloniex(get_pair_position_str(pair, 'second'))
timestamp = deserialize_timestamp_from_poloniex_date(poloniex_trade['date'])
except KeyError as e:
raise DeserializationError(
f'Poloniex trade deserialization error. Missing key entry for {str(e)} in trade dict',
) from e
cost = rate * amount
if trade_type == TradeType.BUY:
fee = Fee(amount * perc_fee)
fee_currency = quote_currency
elif trade_type == TradeType.SELL:
fee = Fee(cost * perc_fee)
fee_currency = base_currency
else:
raise DeserializationError(f'Got unexpected trade type "{trade_type}" for poloniex trade')
if poloniex_trade['category'] == 'settlement':
if trade_type == TradeType.BUY:
trade_type = TradeType.SETTLEMENT_BUY
else:
trade_type = TradeType.SETTLEMENT_SELL
log.debug(
'Processing poloniex Trade',
timestamp=timestamp,
order_type=trade_type,
base_currency=base_currency,
quote_currency=quote_currency,
amount=amount,
fee=fee,
rate=rate,
)
return Trade(
timestamp=timestamp,
location=Location.POLONIEX,
# Since in Poloniex the base currency is the cost currency, iow in poloniex
# for BTC_ETH we buy ETH with BTC and sell ETH for BTC, we need to turn it
# into the Rotkehlchen way which is following the base/quote approach.
base_asset=quote_currency,
quote_asset=base_currency,
trade_type=trade_type,
amount=amount,
rate=rate,
fee=fee,
fee_currency=fee_currency,
link=str(poloniex_trade['globalTradeID']),
)
def process_polo_loans(
msg_aggregator: MessagesAggregator,
data: List[Dict],
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[Loan]:
"""Takes in the list of loans from poloniex as returned by the return_lending_history
api call, processes it and returns it into our loan format
"""
new_data = []
for loan in reversed(data):
log.debug('processing poloniex loan', **loan)
try:
close_time = deserialize_timestamp_from_poloniex_date(loan['close'])
open_time = deserialize_timestamp_from_poloniex_date(loan['open'])
if open_time < start_ts:
continue
if close_time > end_ts:
continue
our_loan = Loan(
location=Location.POLONIEX,
open_time=open_time,
close_time=close_time,
currency=asset_from_poloniex(loan['currency']),
fee=deserialize_fee(loan['fee']),
earned=deserialize_asset_amount(loan['earned']),
amount_lent=deserialize_asset_amount(loan['amount']),
)
except UnsupportedAsset as e:
msg_aggregator.add_warning(
f'Found poloniex loan with unsupported asset'
f' {e.asset_name}. Ignoring it.',
)
continue
except UnknownAsset as e:
msg_aggregator.add_warning(
f'Found poloniex loan with unknown asset'
f' {e.asset_name}. Ignoring it.',
)
continue
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
msg_aggregator.add_error(
'Deserialization error while reading a poloniex loan. Check '
'logs for more details. Ignoring it.',
)
log.error(
'Deserialization error while reading a poloniex loan',
loan=loan,
error=msg,
)
continue
new_data.append(our_loan)
new_data.sort(key=lambda loan: loan.open_time)
return new_data
def _post_process(before: Dict) -> Dict:
"""Poloniex uses datetimes so turn them into timestamps here"""
after = before
if 'return' in after:
if isinstance(after['return'], list):
for x in range(0, len(after['return'])):
if isinstance(after['return'][x], dict):
if('datetime' in after['return'][x] and
'timestamp' not in after['return'][x]):
after['return'][x]['timestamp'] = float(
create_timestamp(after['return'][x]['datetime']),
)
return after
class Poloniex(ExchangeInterface): # lgtm[py/missing-call-to-init]
def __init__(
self,
name: str,
api_key: ApiKey,
secret: ApiSecret,
database: 'DBHandler',
msg_aggregator: MessagesAggregator,
):
super().__init__(
name=name,
location=Location.POLONIEX,
api_key=api_key,
secret=secret,
database=database,
)
self.uri = 'https://poloniex.com/'
self.public_uri = self.uri + 'public?command='
self.session.headers.update({'Key': self.api_key})
self.msg_aggregator = msg_aggregator
def first_connection(self) -> None:
if self.first_connection_made:
return
self.first_connection_made = True
def edit_exchange_credentials(
self,
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
) -> bool:
changed = super().edit_exchange_credentials(api_key, api_secret, passphrase)
if api_key is not None:
self.session.headers.update({'Key': self.api_key})
return changed
def validate_api_key(self) -> Tuple[bool, str]:
try:
self.return_fee_info()
except RemoteError as e:
error = str(e)
if 'Invalid API key' in error:
return False, 'Provided API Key or secret is invalid'
# else reraise
raise
return True, ''
def api_query_dict(self, command: str, req: Optional[Dict] = None) -> Dict:
result = self._api_query(command, req)
if not isinstance(result, Dict):
raise RemoteError(
f'Poloniex query for {command} did not return a dict result. Result: {result}',
)
return result
def api_query_list(self, command: str, req: Optional[Dict] = None) -> List:
result = self._api_query(command, req)
if not isinstance(result, List):
raise RemoteError(
f'Poloniex query for {command} did not return a list result. Result: {result}',
)
return result
def _single_query(self, command: str, req: Dict[str, Any]) -> Optional[requests.Response]:
"""A single api query for poloniex
Returns the response if all went well or None if a recoverable poloniex
error occured such as a 504.
Can raise:
- RemoteError if there is a problem with the response
- ConnectionError if there is a problem connecting to poloniex.
"""
if command in ('returnTicker', 'returnCurrencies'):
log.debug(f'Querying poloniex for {command}')
response = self.session.get(self.public_uri + command, timeout=DEFAULT_TIMEOUT_TUPLE)
else:
req['command'] = command
req['nonce'] = ts_now_in_ms()
post_data = str.encode(urlencode(req))
sign = hmac.new(self.secret, post_data, hashlib.sha512).hexdigest()
self.session.headers.update({'Sign': sign})
response = self.session.post(
'https://poloniex.com/tradingApi',
req,
timeout=DEFAULT_TIMEOUT_TUPLE,
)
if response.status_code == 504:
# backoff and repeat
return None
if response.status_code != 200:
raise RemoteError(
f'Poloniex query responded with error status code: {response.status_code}'
f' and text: {response.text}',
)
# else all is good
return response
def _api_query(self, command: str, req: Optional[Dict] = None) -> Union[Dict, List]:
"""An api query to poloniex. May make multiple requests
Can raise:
- RemoteError if there is a problem reaching poloniex or with the returned response
"""
if req is None:
req = {}
log.debug(
'Poloniex API query',
command=command,
post_data=req,
)
tries = QUERY_RETRY_TIMES
while tries >= 0:
try:
response = self._single_query(command, req)
except requests.exceptions.RequestException as e:
raise RemoteError(f'Poloniex API request failed due to {str(e)}') from e
if response is None:
if tries >= 1:
backoff_seconds = 20 / tries
log.debug(
f'Got a recoverable poloniex error. '
f'Backing off for {backoff_seconds}',
)
gevent.sleep(backoff_seconds)
tries -= 1
continue
else:
break
if response is None:
raise RemoteError(
f'Got a recoverable poloniex error and did not manage to get a '
f'request through even after {QUERY_RETRY_TIMES} '
f'incremental backoff retries',
)
result: Union[Dict, List]
try:
if command == 'returnLendingHistory':
result = jsonloads_list(response.text)
else:
# For some reason poloniex can also return [] for an empty trades result
if response.text == '[]':
result = {}
else:
result = jsonloads_dict(response.text)
result = _post_process(result)
except JSONDecodeError as e:
raise RemoteError(f'Poloniex returned invalid JSON response: {response.text}') from e
if isinstance(result, dict) and 'error' in result:
raise RemoteError(
'Poloniex query for "{}" returned error: {}'.format(
command,
result['error'],
))
return result
def return_currencies(self) -> Dict:
response = self.api_query_dict('returnCurrencies')
return response
def return_fee_info(self) -> Dict:
response = self.api_query_dict('returnFeeInfo')
return response
def return_lending_history(
self,
start_ts: Optional[Timestamp] = None,
end_ts: Optional[Timestamp] = None,
limit: Optional[int] = None,
) -> List:
"""Default limit for this endpoint seems to be 500 when I tried.
So to be sure all your loans are included put a very high limit per call
and also check if the limit was reached after each call.
Also maximum limit seems to be 12660
"""
req: Dict[str, Union[int, Timestamp]] = {}
if start_ts is not None:
req['start'] = start_ts
if end_ts is not None:
req['end'] = end_ts
if limit is not None:
req['limit'] = limit
response = self.api_query_list('returnLendingHistory', req)
return response
def return_trade_history(
self,
start: Timestamp,
end: Timestamp,
) -> Dict[str, List[Dict[str, Any]]]:
"""If `currency_pair` is all, then it returns a dictionary with each key
being a pair and each value a list of trades. If `currency_pair` is a specific
pair then a list is returned"""
limit = 10000
pair = 'all'
data: DefaultDict[str, List[Dict[str, Any]]] = defaultdict(list)
while True:
new_data = self.api_query_dict('returnTradeHistory', {
'currencyPair': pair,
'start': start,
'end': end,
'limit': limit,
})
results_length = 0
for _, v in new_data.items():
results_length += len(v)
if data == {} and results_length < limit:
return new_data # simple case - only one query needed
latest_ts = start
# add results to data and prepare for next query
for market, trades in new_data.items():
existing_ids = {x['globalTradeID'] for x in data['market']}
for trade in trades:
try:
timestamp = deserialize_timestamp_from_poloniex_date(trade['date'])
latest_ts = max(latest_ts, timestamp)
# since we query again from last ts seen make sure no duplicates make it in
if trade['globalTradeID'] not in existing_ids:
data[market].append(trade)
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_warning(
'Error deserializing a poloniex trade. Check the logs for details',
)
log.error(
'Error deserializing poloniex trade',
trade=trade,
error=msg,
)
continue
if results_length < limit:
break # last query has less than limit. We are done.
# otherwise we query again from the last ts seen in the last result
start = latest_ts
continue
return data
def return_deposits_withdrawals(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> Dict:
response = self.api_query_dict(
'returnDepositsWithdrawals',
{'start': start_ts, 'end': end_ts},
)
return response
# ---- General exchanges interface ----
@protect_with_lock()
@cache_response_timewise()
def query_balances(self) -> ExchangeQueryBalances:
try:
resp = self.api_query_dict('returnCompleteBalances', {"account": "all"})
except RemoteError as e:
msg = (
'Poloniex API request failed. Could not reach poloniex due '
'to {}'.format(e)
)
log.error(msg)
return None, msg
assets_balance: Dict[Asset, Balance] = {}
for poloniex_asset, v in resp.items():
try:
available = deserialize_asset_amount(v['available'])
on_orders = deserialize_asset_amount(v['onOrders'])
except DeserializationError as e:
self.msg_aggregator.add_error(
f'Could not deserialize amount from poloniex due to '
f'{str(e)}. Ignoring its balance query.',
)
continue
if available != ZERO or on_orders != ZERO:
try:
asset = asset_from_poloniex(poloniex_asset)
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found unsupported poloniex asset {e.asset_name}. '
f' Ignoring its balance query.',
)
continue
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found unknown poloniex asset {e.asset_name}. '
f' Ignoring its balance query.',
)
continue
except DeserializationError:
log.error(
f'Unexpected poloniex asset type. Expected string '
f' but got {type(poloniex_asset)}',
)
self.msg_aggregator.add_error(
'Found poloniex asset entry with non-string type. '
' Ignoring its balance query.',
)
continue
if asset == A_LEND: # poloniex mistakenly returns LEND balances
continue # https://github.com/rotki/rotki/issues/2530
try:
usd_price = Inquirer().find_usd_price(asset=asset)
except RemoteError as e:
self.msg_aggregator.add_error(
f'Error processing poloniex balance entry due to inability to '
f'query USD price: {str(e)}. Skipping balance entry',
)
continue
amount = available + on_orders
usd_value = amount * usd_price
assets_balance[asset] = Balance(
amount=amount,
usd_value=usd_value,
)
log.debug(
'Poloniex balance query',
currency=asset,
amount=amount,
usd_value=usd_value,
)
return assets_balance, ''
def query_online_trade_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> Tuple[List[Trade], Tuple[Timestamp, Timestamp]]:
raw_data = self.return_trade_history(
start=start_ts,
end=end_ts,
)
results_length = 0
for _, v in raw_data.items():
results_length += len(v)
log.debug('Poloniex trade history query', results_num=results_length)
our_trades = []
for pair, trades in raw_data.items():
for trade in trades:
category = trade.get('category', None)
try:
if category in ('exchange', 'settlement'):
timestamp = deserialize_timestamp_from_poloniex_date(trade['date'])
if timestamp < start_ts or timestamp > end_ts:
continue
our_trades.append(trade_from_poloniex(trade, TradePair(pair)))
elif category == 'marginTrade':
# We don't take poloniex margin trades into account at the moment
continue
else:
self.msg_aggregator.add_error(
f'Error deserializing a poloniex trade. Unknown trade '
f'category {category} found.',
)
continue
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found poloniex trade with unsupported asset'
f' {e.asset_name}. Ignoring it.',
)
continue
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found poloniex trade with unknown asset'
f' {e.asset_name}. Ignoring it.',
)
continue
except (UnprocessableTradePair, DeserializationError) as e:
self.msg_aggregator.add_error(
'Error deserializing a poloniex trade. Check the logs '
'and open a bug report.',
)
log.error(
'Error deserializing poloniex trade',
trade=trade,
error=str(e),
)
continue
return our_trades, (start_ts, end_ts)
def parse_loan_csv(self) -> List:
"""Parses (if existing) the lendingHistory.csv and returns the history in a list
It can throw OSError, IOError if the file does not exist and csv.Error if
the file is not proper CSV"""
# the default filename, and should be (if at all) inside the data directory
path = os.path.join(self.db.user_data_dir, "lendingHistory.csv")
lending_history = []
with open(path, 'r') as csvfile:
history = csv.reader(csvfile, delimiter=',', quotechar='|')
next(history) # skip header row
for row in history:
try:
lending_history.append({
'currency': asset_from_poloniex(row[0]),
'earned': deserialize_asset_amount(row[6]),
'amount': deserialize_asset_amount(row[2]),
'fee': deserialize_asset_amount(row[5]),
'open': row[7],
'close': row[8],
})
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found loan with asset {e.asset_name}. Ignoring it.',
)
continue
except DeserializationError as e:
self.msg_aggregator.add_warning(
f'Failed to deserialize amount from loan due to {str(e)}. Ignoring it.',
)
continue
return lending_history
def query_loan_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
from_csv: Optional[bool] = False,
) -> List:
"""
WARNING: Querying from returnLendingHistory endpoint instead of reading from
the CSV file can potentially return unexpected/wrong results.
That is because the `returnLendingHistory` endpoint has a hidden limit
of 12660 results. In our code we use the limit of 12000 but poloniex may change
the endpoint to have a lower limit at which case this code will break.
To be safe compare results of both CSV and endpoint to make sure they agree!
"""
try:
if from_csv:
return self.parse_loan_csv()
except (OSError, csv.Error):
pass
loans_query_return_limit = 12000
result = self.return_lending_history(
start_ts=start_ts,
end_ts=end_ts,
limit=loans_query_return_limit,
)
data = list(result)
log.debug('Poloniex loan history query', results_num=len(data))
# since I don't think we have any guarantees about order of results
# using a set of loan ids is one way to make sure we get no duplicates
# if poloniex can guarantee me that the order is going to be ascending/descending
# per open/close time then this can be improved
id_set = set()
while len(result) == loans_query_return_limit:
# Find earliest timestamp to re-query the next batch
min_ts = end_ts
for loan in result:
ts = deserialize_timestamp_from_poloniex_date(loan['close'])
min_ts = min(min_ts, ts)
id_set.add(loan['id'])
result = self.return_lending_history(
start_ts=start_ts,
end_ts=min_ts,
limit=loans_query_return_limit,
)
log.debug('Poloniex loan history query', results_num=len(result))
for loan in result:
if loan['id'] not in id_set:
data.append(loan)
return data
def query_exchange_specific_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> Optional[Any]:
"""The exchange specific history for poloniex is its loans"""
return self.query_loan_history(
start_ts=start_ts,
end_ts=end_ts,
from_csv=True, # TODO: Change this and make them queriable
)
def _deserialize_asset_movement(
self,
movement_type: AssetMovementCategory,
movement_data: Dict[str, Any],
) -> Optional[AssetMovement]:
"""Processes a single deposit/withdrawal from polo and deserializes it
Can log error/warning and return None if something went wrong at deserialization
"""
try:
if movement_type == AssetMovementCategory.DEPOSIT:
fee = Fee(ZERO)
uid_key = 'depositNumber'
transaction_id = get_key_if_has_val(movement_data, 'txid')
else:
fee = deserialize_fee(movement_data['fee'])
uid_key = 'withdrawalNumber'
split = movement_data['status'].split(':')
if len(split) != 2:
transaction_id = None
else:
transaction_id = split[1].lstrip()
if transaction_id == '':
transaction_id = None
asset = asset_from_poloniex(movement_data['currency'])
return AssetMovement(
location=Location.POLONIEX,
category=movement_type,
address=deserialize_asset_movement_address(movement_data, 'address', asset),
transaction_id=transaction_id,
timestamp=deserialize_timestamp(movement_data['timestamp']),
asset=asset,
amount=deserialize_asset_amount_force_positive(movement_data['amount']),
fee_asset=asset,
fee=fee,
link=str(movement_data[uid_key]),
)
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found {str(movement_type)} of unsupported poloniex asset '
f'{e.asset_name}. Ignoring it.',
)
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found {str(movement_type)} of unknown poloniex asset '
f'{e.asset_name}. Ignoring it.',
)
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Unexpected data encountered during deserialization of a poloniex '
'asset movement. Check logs for details and open a bug report.',
)
log.error(
f'Unexpected data encountered during deserialization of poloniex '
f'{str(movement_type)}: {movement_data}. Error was: {msg}',
)
return None
def query_online_deposits_withdrawals(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[AssetMovement]:
result = self.return_deposits_withdrawals(start_ts, end_ts)
log.debug(
'Poloniex deposits/withdrawal query',
results_num=len(result['withdrawals']) + len(result['deposits']),
)
movements = []
for withdrawal in result['withdrawals']:
asset_movement = self._deserialize_asset_movement(
movement_type=AssetMovementCategory.WITHDRAWAL,
movement_data=withdrawal,
)
if asset_movement:
movements.append(asset_movement)
for deposit in result['deposits']:
asset_movement = self._deserialize_asset_movement(
movement_type=AssetMovementCategory.DEPOSIT,
movement_data=deposit,
)
if asset_movement:
movements.append(asset_movement)
return movements
def query_online_margin_history(
self, # pylint: disable=no-self-use
start_ts: Timestamp, # pylint: disable=unused-argument
end_ts: Timestamp, # pylint: disable=unused-argument
) -> List[MarginPosition]:
return [] # noop for poloniex
def query_online_income_loss_expense(
self, # pylint: disable=no-self-use
start_ts: Timestamp, # pylint: disable=unused-argument
end_ts: Timestamp, # pylint: disable=unused-argument
) -> List[LedgerAction]:
return [] # noop for poloniex
|
examples/eigenvector_localization.py | jafluri/pygsp | 341 | 11173146 | r"""
Localization of Fourier modes
=============================
The Fourier modes (the eigenvectors of the graph Laplacian) can be localized in
the spacial domain. As a consequence, graph signals can be localized in both
space and frequency (which is impossible for Euclidean domains or manifolds, by
the Heisenberg's uncertainty principle).
This example demonstrates that the more isolated a node is, the more a Fourier
mode will be localized on it.
The mutual coherence between the basis of Kronecker deltas and the basis formed
by the eigenvectors of the Laplacian, :attr:`pygsp.graphs.Graph.coherence`, is
a measure of the localization of the Fourier modes. The larger the value, the
more localized the eigenvectors can be.
See `Global and Local Uncertainty Principles for Signals on Graphs
<https://arxiv.org/abs/1603.03030>`_ for details.
"""
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
import pygsp as pg
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
for w, ax in zip([10, 1, 0.1, 0.01], axes.flatten()):
adjacency = [
[0, w, 0, 0],
[w, 0, 1, 0],
[0, 1, 0, 1],
[0, 0, 1, 0],
]
graph = pg.graphs.Graph(adjacency)
graph.compute_fourier_basis()
# Plot eigenvectors.
ax.plot(graph.U)
ax.set_ylim(-1, 1)
ax.set_yticks([-1, 0, 1])
ax.legend([f'$u_{i}(v)$, $\lambda_{i}={graph.e[i]:.1f}$' for i in
range(graph.n_vertices)], loc='upper right')
ax.text(0, -0.9, f'coherence = {graph.coherence:.2f}'
f'$\in [{1/np.sqrt(graph.n_vertices)}, 1]$')
# Plot vertices.
ax.set_xticks(range(graph.n_vertices))
ax.set_xticklabels([f'$v_{i}$' for i in range(graph.n_vertices)])
# Plot graph.
x, y = np.arange(0, graph.n_vertices), -1.20*np.ones(graph.n_vertices)
line = mpl.lines.Line2D(x, y, lw=3, color='k', marker='.', markersize=20)
line.set_clip_on(False)
ax.add_line(line)
# Plot edge weights.
for i in range(graph.n_vertices - 1):
j = i+1
ax.text(i+0.5, -1.15, f'$w_{{{i}{j}}} = {adjacency[i][j]}$',
horizontalalignment='center')
fig.tight_layout()
|
tools/FixIncompleteCrashReport/fixer.py | JiPRA/openlierox | 192 | 11173161 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
if len(sys.argv) != 2:
print "usage:", sys.argv[0], " <mail>"
exit(1)
mail = open(sys.argv[1])
if not mail:
print "cannot open", sys.argv[1]
exit(1)
import os
symsdir = ""
import re
import string
def findSymfile(module):
fn = symsdir + "/" + module + ".x86.sym"
if os.path.exists(fn):
return fn
return None
def findFunction(searchaddr, symfile):
f = open(symfile)
curfunc = None
files = {}
for line in f.readlines():
line = string.strip(line, "\n")
file = re.match("FILE (?P<nr>[0-9]+) (?P<file>.*)", line)
if file:
files[int(file.group("nr"))] = os.path.basename(file.group("file").replace("\\", "/"))
continue
func = re.match("FUNC (?P<addr>[0-9a-f]+) (?P<size>[0-9a-f]+) [0-9a-f]+ (?P<func>.*)", line)
if func:
# if there was an earlier curfunc, we didn't find the exact line
if curfunc: break
addr = int(func.group("addr"), 16)
size = int(func.group("size"), 16)
# checks if we are already ahead
if addr > searchaddr: break
# check if out-of-range
if addr + size <= searchaddr: continue
# this is the right function!
curfunc = func.group("func")
continue
if not curfunc: continue
line = re.match("(?P<addr>[0-9a-f]+) (?P<size>[0-9a-f]+) (?P<line>[0-9]+) (?P<filenum>[0-9]+)", line)
if not line: break
addr = int(line.group("addr"), 16)
size = int(line.group("size"), 16)
# checks if we are already ahead
if addr > searchaddr: break
# check if out-of-range
if addr + size < searchaddr: continue
# this is the right line!
return curfunc + " (%s:%s)" % (files[int(line.group("filenum"))], line.group("line"))
return curfunc
inthread = False
for line in mail.readlines():
line = string.strip(line, "\n")
if not inthread:
print line
subj = re.match("^Subject: (\[.*\])? OpenLieroX (?P<ver>.*) crash report", line)
if subj:
symsdir = subj.group("ver").replace(" ", "-")
continue
opsys = re.match("Operating system: (?P<os>\S+)", line)
if opsys:
symsdir = symsdir + "-" + opsys.group("os") + "-syms"
if not os.path.exists(symsdir):
print "cannot find symsdir", symsdir
quit(1)
inthread = re.match("^Thread [0-9]+.*$", line)
else:
m = re.match("\s*(?P<tid>[0-9]+)\s+(?P<mod>\S+)\s+([0-9.]+\s*)?0x[0-9a-f]+\s*\(0x(?P<reladdr>[0-9a-f]+)\)\s*(?P<funcname>\S+)", line)
if not m:
print line
inthread = re.match("\s*(?P<tid>[0-9]+)", line)
else:
fallbackstr = "%s %s %s %s" % ( m.group("tid").rjust(2), m.group("mod").ljust(20), ("(0x" + m.group("reladdr") + ")").rjust(8), "??")
if m.group("funcname") == "??":
symfile = findSymfile(m.group("mod"))
if not symfile: print fallbackstr; continue
func = findFunction(int(m.group("reladdr"),16), symfile)
if not func: print fallbackstr; continue
print "%s %s %s %s" % ( m.group("tid").rjust(2), m.group("mod").ljust(20), ("(0x" + m.group("reladdr") + ")").rjust(8), func)
else:
print fallbackstr
|
tests/math/soft_one_hot_test.py | claycurry34/e3nn | 385 | 11173163 | <filename>tests/math/soft_one_hot_test.py
import pytest
import torch
from e3nn.math import soft_one_hot_linspace
@pytest.mark.parametrize('basis', ['gaussian', 'cosine', 'fourier', 'bessel', 'smooth_finite'])
def test_zero_out(basis):
x1 = torch.linspace(-2.0, -1.1, 20)
x2 = torch.linspace(2.1, 3.0, 20)
x = torch.cat([x1, x2])
y = soft_one_hot_linspace(x, -1.0, 2.0, 5, basis, cutoff=True)
if basis == 'gaussian':
assert y.abs().max() < 0.22
else:
assert y.abs().max() == 0.0
@pytest.mark.parametrize('basis', ['gaussian', 'cosine', 'fourier', 'smooth_finite'])
@pytest.mark.parametrize('cutoff', [True, False])
def test_normalized(basis, cutoff):
x = torch.linspace(-14.0, 105.0, 50)
y = soft_one_hot_linspace(x, -20.0, 120.0, 12, basis, cutoff)
assert 0.4 < y.pow(2).sum(1).min()
assert y.pow(2).sum(1).max() < 2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.