ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a31d896acc586e613caec39befa2075affdb6b3 | # Generated by Django 3.1.2 on 2020-11-18 11:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('metadata', '0017_add_gov_organisations'),
]
operations = [
migrations.AlterModelOptions(
name='organisation',
options={'ordering': ('name',)},
),
migrations.AlterUniqueTogether(
name='organisation',
unique_together={('name', 'organisation_type')},
),
]
|
py | 1a31d922f43f5a9429ab747c5796d942f141ee7c | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.parser.pattern.nodes.base import PatternNode
from programy.parser.pattern.equalsmatch import EqualsMatch
from programy.parser.exceptions import ParserException
class PatternBotNode(PatternNode):
def __init__(self, attribs, text, userid='*'):
PatternNode.__init__(self, userid)
if 'name' in attribs:
self._property = attribs['name']
elif 'property' in attribs:
self._property = attribs['property']
elif text:
self._property = text
else:
raise ParserException("Invalid bot node, neither name or property specified as attribute or text")
def is_bot(self):
return True
@property
def property(self):
return self._property
def to_xml(self, client_context, include_user=False):
string = ""
if include_user is True:
string += '<bot userid="%s" property="%s">\n'%(self.userid, self.property)
else:
string += '<bot property="%s">\n' % self.property
string += super(PatternBotNode, self).to_xml(client_context)
string += "</bot>"
return string
def to_string(self, verbose=True):
if verbose is True:
return "BOT [%s] [%s] property=[%s]" % (self.userid, self._child_count(verbose), self.property)
return "BOT property=[%s]" % (self.property)
def equivalent(self, other):
if other.is_bot():
if self.userid == other.userid:
if self.property == other.property:
return True
return False
def equals(self, client_context, words, word_no):
word = words.word(word_no)
if self.userid != '*':
if self.userid != client_context.userid:
return EqualsMatch(False, word_no)
if client_context.brain.properties.has_property(self.property):
if word == client_context.brain.properties.property(self.property):
YLogger.debug(client_context, "Found word [%s] as bot property", word)
return EqualsMatch(True, word_no, word)
return EqualsMatch(False, word_no)
|
py | 1a31d963385be0023f0261bf5abb9d83780fde88 | from django.test import TestCase
from django.utils.timezone import now, timedelta
from polls.models import Choice, Question
from django.contrib.auth.models import User
from django.urls import reverse
# Create your tests here.
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
time = now() + timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
time = now() - timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
time = now() - timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
class IndexViewTests(TestCase):
def test_get_no_question(self):
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
# self.assertContains(response, 'No polls are available.')
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_get_question(self):
Question.objects.create(question_text='Demo question.')
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['latest_question_list'],
['<Question: Demo question.>'])
class DetailViewTests(TestCase):
def setUp(self) -> None:
self.user = User.objects.create_user(username='libin', password='123')
self.question = Question.objects.create(
question_text='unit_test question?')
self.choice_good = Choice.objects.create(question=self.question,
choice_text='good',
votes=0)
self.choice_soso = Choice.objects.create(question=self.question,
choice_text='soso',
votes=0)
self.choice_bad = Choice.objects.create(question=self.question,
choice_text='bad',
votes=0)
def tearDown(self) -> None:
self.question.delete()
self.user.delete()
def test_get(self):
self.client.login(username='libin', password='123')
response = self.client.get(
reverse('polls:detail', kwargs={'id': self.question.id}))
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['question']),
self.question.question_text)
def test_post(self):
self.client.login(username='libin', password='123')
response = self.client.post(reverse('polls:detail',
kwargs={'id': self.question.id}),
data={
'choice': self.choice_good.id,
})
self.assertEqual(response.status_code, 302)
good_choice_votes = Choice.objects.get(id=self.choice_good.id).votes
self.assertEqual(good_choice_votes, 1)
|
py | 1a31d9daa2d4011e485ebe521e0acce1e2b8a0b5 | """
make_training_data.py
Executing functions for creating npz files containing the training data
Functions will create training data for either
- Patchwise sampling
- Fully convolutional training of single image conv-nets
- Fully convolutional training of movie conv-nets
Files should be plased in training directories with each separate
dataset getting its own folder
@author: David Van Valen
"""
"""
Import packages
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import glob
import os
import skimage as sk
import scipy as sp
from scipy import ndimage
from skimage import feature
from sklearn.utils import class_weight
from deepcell import get_image
from deepcell import format_coord as cf
from skimage import morphology as morph
import matplotlib.pyplot as plt
from skimage.transform import resize
from deepcell import make_training_data
# Define maximum number of training examples
max_training_examples = 1e6
window_size = 30
# Load data
direc_name = '/data/training_data/nuclei_broad/'
file_name_save = os.path.join('/data/training_data_npz/nuclei_broad/', 'nuclei_broad_same_disc_61x61.npz')
training_direcs = ['set1', 'set2', 'set3', 'set4', 'set5']
channel_names = ['nuclear']
# Specify the number of feature masks that are present
num_of_features = 2
# Specify which feature is the edge feature
edge_feature = [1,0,0]
# Create the training data
make_training_data(max_training_examples = max_training_examples, window_size_x = window_size, window_size_y = window_size,
direc_name = direc_name,
file_name_save = file_name_save,
training_direcs = training_direcs,
channel_names = channel_names,
num_of_features = 2,
edge_feature = edge_feature,
dilation_radius = 0,
border_mode = "same",
sample_mode = "all",
output_mode = "disc",
reshape_size = 512,
display = False,
verbose = True,
process_std = False)
|
py | 1a31da73103c7018bd84d5455d1dd17ba47ad9ba | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 [email protected]
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from sexpr import sexp
import pprint
import copy
import hexdump
DEBUG = 0
def u8(x):
return x & 0xff
def i16(x):
return x & 0xffff
class LEDVMError(Exception):
pass
class OpCodeInfo(object):
def __init__(self, name, data_len, arg_type):
self.name = name
self.data_len = data_len
self.arg_type = arg_type
ARG_NONE = 0
ARG_REFRENCES = 1
class OpCode(object):
SHOW_HSV = 0x00
SHOW_RGB = 0x01
LOAD_PIXEL = 0x02
ADD_VEC3 = 0x03
SUB_VEC3 = 0x04
IF_EQ = 0x05
OP_CODE_TABLE = {
# CODE , MENOMIC , DATA_SIZE
SHOW_HSV : OpCodeInfo("SHOW_HSV" , 0 , OpCodeInfo.ARG_NONE) ,
SHOW_RGB : OpCodeInfo("SHOW_RGB" , 0 , OpCodeInfo.ARG_NONE) ,
LOAD_PIXEL : OpCodeInfo("LOAD_PIXEL" , 3 , OpCodeInfo.ARG_REFRENCES) ,
ADD_VEC3 : OpCodeInfo("ADD_VEC3" , 3 , OpCodeInfo.ARG_REFRENCES) ,
SUB_VEC3 : OpCodeInfo("SUB_VEC3" , 3 , OpCodeInfo.ARG_REFRENCES) ,
IF_EQ : OpCodeInfo("IF_EQ" , 3 , OpCodeInfo.ARG_REFRENCES) ,
}
@staticmethod
def to_string(code):
if code in OpCode.OP_CODE_TABLE:
name = OpCode.OP_CODE_TABLE[code].name
return "{}<{}>".format(name, code)
else:
return "{}<{}>".format("UnknownOpCode", code)
def __init__(self, name, data_len=0):
self.name = name
self.data_len = data_len
class Register(object):
# Register codes
PIXEL_NUM = 0
OUTPUT_TYPE = 1
KEY_STATE = 2
MOUSE_X = 3
MOUSE_Y = 4
OUTPUT_TYPE_RGB = 0
OUTPUT_TYPE_HSV = 1
def __init__(self, name, default_value=0):
self.name = name
self.value = default_value
self.default_value = default_value
class LEDEffectVM(object):
REGISTER_TABLE = {
Register.PIXEL_NUM : Register("PIXEL_NUM", 0),
Register.OUTPUT_TYPE : Register("OUTPUT_TYPE", 0),
Register.KEY_STATE : Register("KEY_STATE", 0),
Register.MOUSE_X : Register("MOUSE_X", 0),
Register.MOUSE_Y : Register("MOUSE_Y", 0),
}
def __init__(self, led_program_table={'main': []}, num_pixels=None):
self.pixels = [(0, 0, 0)] * num_pixels
self.led_program_table = led_program_table
self.set_active_progarm('main')
self.instr_ptr = 0
self.registers = {}
for reg in self.REGISTER_TABLE:
self.registers[reg] = self.REGISTER_TABLE[reg].default_value
def set_active_progarm(self, name):
self._current_program_name = name
self.current_program = self.led_program_table[name]
def goto_start(self):
self.instr_ptr = 0
def rel_jump(self, offset):
self.instr_ptr += (offset)
def get_next_word(self):
if self.instr_ptr >= len(self.current_program):
return None
result = self.current_program[self.instr_ptr]
self.instr_ptr += 1
return result
def read_op_code(self):
code = self.get_next_word()
if code == None:
return None, None
self.vm_assert(code in OpCode.OP_CODE_TABLE, "Invalid OpCode: {}".format(code))
op_code = OpCode.OP_CODE_TABLE[code]
data = []
for i in range(op_code.data_len):
data.append(self.get_next_word())
# if DEBUG >= 1
if DEBUG >= 5:
print("Instruction: {}".format(self.instr_ptr))
print("Current code: {}, data:{}".format(
OpCode.to_string(code), data
)
)
return code, data
REFERENCE_TYPE_IMMEDIATE = 0
REFERENCE_TYPE_REGISTER = 1
REFERENCE_TYPE_PIXEL = 2
def lookup_refrence(self, ref):
# Refrences either an immediate value or another register value
# Format of refrence values (in hex):
# * 00xx -> Single byte immediate value
# * 01xx -> Single byte immediate value
value = (ref >> 0) & 0xff
ref_type = (ref >> 8) & 0xff
if ref_type == self.REFERENCE_TYPE_IMMEDIATE:
return value
elif ref_type == self.REFERENCE_TYPE_PIXEL:
assert(value < 3)
return self.get_current_pixel()[value]
elif ref_type == self.REFERENCE_TYPE_REGISTER:
assert(value in self.REGISTER_TABLE)
return self.registers[value]
def get_pixel(self, pixel_num):
return self.pixels[pixel_num]
def get_pixel_type(self, pixel_num):
return self.registers[Register.OUTPUT_TYPE]
def get_current_pixel(self):
return self.pixels[self.registers[Register.PIXEL_NUM]]
def set_current_pixel(self, x, y, z):
self.pixels[self.registers[Register.PIXEL_NUM]] = (x, y, z)
def execute_op_code(self, code, data):
"""
Return True if the program has finished executing
"""
if code == OpCode.SHOW_HSV:
self.registers[Register.OUTPUT_TYPE] = Register.OUTPUT_TYPE_HSV
return True
elif code == OpCode.SHOW_RGB:
self.registers[Register.OUTPUT_TYPE] = Register.OUTPUT_TYPE_RGB
return True
elif code == OpCode.LOAD_PIXEL:
self.set_current_pixel(
self.lookup_refrence(data[0]),
self.lookup_refrence(data[1]),
self.lookup_refrence(data[2])
)
elif code == OpCode.ADD_VEC3:
old_value = self.get_current_pixel()
self.set_current_pixel(
u8(old_value[0] + self.lookup_refrence(data[0])),
u8(old_value[1] + self.lookup_refrence(data[1])),
u8(old_value[2] + self.lookup_refrence(data[2]))
)
elif code == OpCode.SUB_VEC3:
old_value = self.get_current_pixel()
self.set_current_pixel(
u8(old_value[0] - self.lookup_refrence(data[0])),
u8(old_value[1] - self.lookup_refrence(data[1])),
u8(old_value[2] - self.lookup_refrence(data[2]))
)
elif code == OpCode.IF_EQ:
lhs = self.lookup_refrence(data[0])
rhs = self.lookup_refrence(data[1])
jmp_pos = self.lookup_refrence(data[2])
if DEBUG >= 5:
print("lhs, rhs, == :", lhs, rhs, lhs == rhs)
if lhs != rhs:
self.rel_jump(jmp_pos)
else:
raise LEDVMError("Unknown opcode {}".format(code))
return False
def execute_program(self, program_name):
self.set_active_progarm(program_name)
for (pixel_i, _) in enumerate(self.pixels):
self.execute_program_pixel(pixel_i)
def execute_program_pixel(self, pixel_number):
self.goto_start()
self.registers[Register.PIXEL_NUM] = pixel_number
is_running = True
if DEBUG:
print("Starting program for pixel: {}".format(pixel_number))
while is_running:
(code, data) = self.read_op_code()
if code == None:
break;
if DEBUG:
print("(OpCode {}, Data {})".format(code, data))
is_running = not self.execute_op_code(code, data)
def vm_assert(self, exp, msg=""):
if exp != True:
self.print_core_dump(msg)
if msg == "":
LEDVMError("LEDVMError: unspecified error")
else:
LEDVMError("LEDVMError: {}".format(msg))
def print_core_dump(self, error_msg):
print(
"\n"
"Core dump while executing program '{}':\n"
"Error message: {}\n"
"instr_ptr: {}\n"
"program: {}\n"
.format(
self._current_program_name,
error_msg,
self.instr_ptr,
self.current_program
)
)
class LEDEffectVMParser(object):
def __init__(self):
# The Parser needs the inverse mappings of the op_code/register lookup
# tables, so generate them here
self.op_code_lookup_table = {}
for code in OpCode.OP_CODE_TABLE:
name = OpCode.OP_CODE_TABLE[code].name
self.op_code_lookup_table[name] = code
self.register_lookup_table = {}
for reg in LEDEffectVM.REGISTER_TABLE:
name = LEDEffectVM.REGISTER_TABLE[reg].name
self.register_lookup_table[name] = reg
# def exp_as_arrays(self, exp):
# print(exp)
# arr = exp[0]
# result = []
# for child in arr:
# result.append(self.exp_as_arrays(child))
# return result
def parse_asm(self, program_str):
sexpression = sexp.parseString(program_str, parseAll=True)
if DEBUG:
print(sexpression)
pprint.pprint(sexpression)
# sexpression = self.exp_as_arrays(sexpression)
byte_code = []
byte_code += self.parse_program(sexpression)
return byte_code
def generate_ref(self, ref):
if isinstance(ref, int):
assert(ref <= 255)
ref_type = LEDEffectVM.REFERENCE_TYPE_IMMEDIATE
value = ref
elif isinstance(ref, str):
if ref in self.register_lookup_table:
ref_type = LEDEffectVM.REFERENCE_TYPE_REGISTER
value = self.register_lookup_table[ref]
elif ref in ('r', 'g', 'b', 'h', 's', 'v'):
ref_type = LEDEffectVM.REFERENCE_TYPE_PIXEL
value = {
'r': 0,
'h': 0,
'g': 1,
's': 1,
'b': 2,
'v': 2,
}[ref]
else:
raise LEDVMError("Unknown reference: {}".format(ref))
else:
return None
lo_byte = (value << 0)
hi_byte = (ref_type << 8)
return [lo_byte | hi_byte]
def parse_instruction(self, exp):
if DEBUG:
print("Parse Instruction: ", exp)
name = exp[0]
result = []
if not name in self.op_code_lookup_table:
raise LEDVMError("Unknown opcode menomic: {}".format(name))
op_code = self.op_code_lookup_table[name]
op_info = OpCode.OP_CODE_TABLE[op_code]
# Add the op_code to the result
result += [op_code]
OP_CODE_POS = 1
data = exp[OP_CODE_POS:]
if len(data) != op_info.data_len:
raise LEDVMError("Expected {} arguments to opcode {}, got {}".format(
op_info.data_len,
name,
len(data)
)
)
if op_code == OpCode.IF_EQ:
print(data)
print(data[0], data[1], data[2])
LHS_POS = 0
RHS_POS = 1
JUMP_POS = 2
result += self.generate_ref(data[LHS_POS])
result += self.generate_ref(data[RHS_POS])
if_block_exp = data[JUMP_POS]
ref_data = self.generate_ref(if_block_exp)
if ref_data != None:
result += ref_data
else:
print('ifblock:', if_block_exp)
if_block = self.parse_instruction_list(if_block_exp)
jmp_offset = i16(len(if_block))
result += [jmp_offset]
result += if_block
print('ifBlockResult:', result)
elif op_info.arg_type == OpCodeInfo.ARG_NONE:
pass # Don't need to add data
elif op_info.arg_type == OpCodeInfo.ARG_REFRENCES:
for ref in data:
result += self.generate_ref(ref)
return result
def parse_instruction_list(self, instruction_list):
result = []
for instruction in instruction_list:
result += self.parse_instruction(instruction)
return result
def parse_program(self, exp):
if DEBUG:
print("Parse program: ", exp)
exp = exp[0]
# pprint.pprint(exp)
return self.parse_instruction_list(exp)
if __name__ == "__main__":
init_prog = """
(
(LOAD_PIXEL PIXEL_NUM 255 200)
)
"""
# main_prog = """
# (
# (LOAD_PIXEL r 255 200)
# (ADD_VEC3 1 0 0)
# (IF_EQ v 199
# (
# (ADD_VEC3 1 0 0)
# )
# )
# (IF_EQ v 200
# (
# (SUB_VEC3 1 0 0)
# )
# )
# (SHOW_HSV)
# )
# """
main_prog = """
(
(IF_EQ h 0
(
(LOAD_PIXEL h 255 199)
)
)
(IF_EQ h 255
(
(LOAD_PIXEL h 255 200)
)
)
(IF_EQ v 200
(
(SUB_VEC3 1 0 0)
)
)
(IF_EQ v 199
(
(ADD_VEC3 1 0 0)
)
)
(SHOW_HSV)
)
"""
vm_parser = LEDEffectVMParser()
led_programs = {
"init": vm_parser.parse_asm(init_prog),
"main": vm_parser.parse_asm(main_prog),
}
vm = LEDEffectVM(led_programs, num_pixels=64)
for prog in led_programs:
print(prog, led_programs[prog])
byte_code_as_bytes = bytes([])
for word in led_programs[prog]:
byte_code_as_bytes += bytes([word & 0xff, word>>8 & 0xff])
hexdump.hexdump(byte_code_as_bytes)
vm.execute_program('init')
for i in range(300):
vm.execute_program('main')
print(vm.pixels)
|
py | 1a31da7f366c37d563f3fb991a9abedab6a7a260 | import torch.nn as nn
import torch.nn.functional as F
import torch
class Classifier(nn.Module):
def __init__(self, input_nc=3, ndf=64, norm_layer=nn.BatchNorm2d):
super(Classifier, self).__init__()
kw = 3
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(3):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2),
norm_layer(ndf * nf_mult, affine=True),
nn.LeakyReLU(0.2, True)
]
self.before_linear = nn.Sequential(*sequence)
sequence = [
nn.Linear(ndf * nf_mult, 1024),
nn.Linear(1024, 10)
]
self.after_linear = nn.Sequential(*sequence)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.criterionCLS = torch.nn.modules.CrossEntropyLoss()
def forward(self, x, lbl=None, ita=1.5):
bs = x.size(0)
out = self.after_linear(self.before_linear(x).view(bs, -1))
x = out
P = F.softmax(x, dim=1) # [B, 19, H, W]
logP = F.log_softmax(x, dim=1) # [B, 19, H, W]
PlogP = P * logP # [B, 19, H, W]
ent = -1.0 * PlogP.sum(dim=1) # [B, 1, H, W]
ent = ent / 2.9444 # chanage when classes is not 19
# compute robust entropy
ent = ent ** 2.0 + 1e-8
ent = ent ** ita
self.loss_ent = ent.mean()
if lbl is not None:
self.loss_cls = self.criterionCLS(x, lbl)
return x
def get_lr_params(self):
b = []
b.append(self.before_linear.parameters())
b.append(self.after_linear.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def optim_parameters(self, args):
return [{'params': self.get_lr_params(), 'lr': args.learning_rate}]
def adjust_learning_rate(self, args, optimizer, i):
lr = args.learning_rate * ( (1-float(i)/args.num_steps) ** (args.power) )
optimizer.param_groups[0]['lr'] = lr
if len(optimizer.param_groups) > 1:
optimizer.param_groups[1]['lr'] = lr * 10
def CLSNet(restore_from=None):
model = Classifier()
if restore_from is not None:
model.load_state_dict(torch.load(restore_from + '.pth', map_location=lambda storage, loc: storage))
return model
|
py | 1a31dce6b15d6270fd35407bc5a13067733c68b8 | from Common import *
from save_to_mysql import Save_MySQL
# file_date_time = "2019-10-17"
# stif_time = "201910170900"
# 生成个人表
def make_stan_person(num):
"""字段列表
"busi_reg_no":"客户号",
"ctnm":"客户名称",
"cten":"拼音/英文名称",
"client_tp":"客户类别",1客户,2商户
"account_tp":"账户分类",1/2/3代表1、2、3类账号
"busi_type":"业务类型",
"smid":"主体特约商户编号",
"citp":"证件类型",
"citp_ori":"证件类型原值",
"citp_nt":"证件类型说明",
"ctid":"证件号码",
"ctid_edt":"证件有效期",
"sex":"性别",
"country":"国籍",
"nation":"民族",
"birthday":"出生日期",
"education":"学历",
"ctvc":"主体的职业类别",
"picm":"个人年收入",
"ficm":"家庭年收入",
"marriage":"婚姻状况",
"ceml":"电子邮件",
"rgdt":"开户日期",
"cls_dt":"销户日期",
"remark":"备注",
"indu_code":"行业代码",
"stat_flag_ori":"客户状态原值",
"stat_flag":"客户状态",
"mer_prov":"省",
"mer_city":"市",
"mer_area":"区县",
"address":"详细地址",
"tel":"联系电话",
"mer_unit":"管理机构",
"is_line":"是否线上{注册}",
"certification ":"建立渠道",
"cer_num":"通过身份验证渠道数量",
"con_acc_name":"经营名称",
"bord_flag":"境内外标识",
"web_info":"网络支付商户网址信息",
"con_nation":"商户所属国家或地区",
"bind_card":"银行绑定标识",
"ip_code":"注册地IP地址",
"mac_info":"注册设备MAC或IMEI地址",
"self_acc_no":"特约商户收单结算账号",
"acc_type1":"账户类型",
"bank_acc_name":"银行账户名称",
"reals":"客户真实有效性",
"batch_pay":"批量代付标识",
"statement_type":"结算类型"
:return:
"""
busi_reg_no = "p_{}".format(num)
ctnm = make_name_data()
cten = word_to_pinyin(ctnm)
client_tp = random.choice(["1", "2"])
busi_type = make_busi_type_data()
account_tp = make_account_tp_data(busi_type)
if client_tp == "2":
smid = random.randint(1000, 9999) # 该字段值待确定
smid = str(smid)
else:
smid = ""
citp = make_citp_data()
citp_ori = citp # 该值暂定
citp_nt = "有效证件"
ctid = make_ctid_data()
ctid_edt = make_Card_valid_date(ctid)
sex = make_sex(ctid)
country = choice_contry()
nation = str(random.randint(1, 57))
birthday = ctid[6:14]
education = str(random.randint(1, 7))
ctvc = random.choice(["1A", "1B", "1C", "1D", "1E", "1F", "1G", "1H"])
picm = "300000"
ficm = "500000"
marriage = make_marriage_data(ctid)
ceml = make_email_data()
rgdt = make_register_date()
cls_dt = make_cls_dt_data(busi_reg_no)
remark = "这是一个备注"
indu_code = make_indu_code_data()
stat_flag_ori = "888888"
stat_flag = make_stat_flag_data(cls_dt)
# mer_prov = get_province_data(ctid[:6])
mer_prov = get_province_code_data(ctid[:6])
# mer_city = make_province_city_data(ctid[:6])[0]
mer_city = make_province_city_code_data(ctid[:6])
# mer_area = make_province_city_data(ctid[:6])[-1]
mer_area = ctid[:6]
address = make_address(ctid[:6])
tel = make_tel_num()
mer_unit = make_mer_unit_data()
is_line = random.choice(["0", "1"])
certification = random.choice(["1", "2", "3"])
cer_num = str(random.randint(0, 6))
con_acc_name = "默认经营名称" # 网络支付、预付卡、银行卡收单必须填写,暂为空
bord_flag = make_bord_flag_data()
web_info = make_web_info_data(busi_type) # 非网络支付业务,无网址用户可不填
con_nation = make_con_nation_data(bord_flag)
bind_card = make_bind_card_data(busi_type) # 仅需网络支付填写
ip_code = make_ip_data(busi_type) # 仅需网络支付填写
mac_info = make_mac_info_data(busi_type) # PC机填写MAC,移动终端填写IMEI(需网络支付,预付卡填写), 暂为空
self_acc_no = make_self_acc_no_data(client_tp) # 非商户不填,网络支付、预付卡、银行卡收单必须填写
acc_type1 = make_acc_type1_data(client_tp) # 非商户不填,网络支付、预付卡、银行卡收单必须填写
bank_acc_name = make_bank_acc_name_data(acc_type1)
reals = make_reals_data()
batch_pay = make_batch_pay_data(busi_type, client_tp)
statement_type = make_statement_type_data(client_tp)
# print(busi_reg_no, ctnm, cten, client_tp, account_tp, busi_type, smid, citp, citp_ori, citp_nt, ctid, ctid_edt, sex,
# country, nation, birthday, education, ctvc, picm, ficm, marriage, ceml, rgdt, cls_dt, remark, indu_code,
# stat_flag_ori, stat_flag, mer_prov, mer_city, mer_area, address, tel, mer_unit, is_line, certification,
# cer_num, con_acc_name, bord_flag, web_info, con_nation, bind_card, ip_code, mac_info, self_acc_no, acc_type1,
# bank_acc_name, reals, batch_pay, statement_type)
# contect_data = make_connect_data([
# busi_reg_no, ctnm, cten, client_tp, account_tp, busi_type, smid, citp, citp_ori, citp_nt, ctid, ctid_edt, sex,
# country, nation, birthday, education, ctvc, picm, ficm, marriage, ceml, rgdt, cls_dt, remark, indu_code,
# stat_flag_ori, stat_flag, mer_prov, mer_city, mer_area, address, tel, mer_unit, is_line, certification,
# cer_num, con_acc_name, bord_flag, web_info, con_nation, bind_card, ip_code, mac_info, self_acc_no, acc_type1,
# bank_acc_name, reals, batch_pay, statement_type
# ])
contect_data = "busi_reg_no,ctnm,cten,client_tp,account_tp,busi_type,smid,citp,citp_ori,citp_nt,ctid,ctid_edt,sex,country,nation,birthday,education,ctvc,picm,ficm,marriage,ceml,rgdt,cls_dt,remark,indu_code,stat_flag_ori,stat_flag,mer_prov,mer_city,mer_area,address,tel,mer_unit,is_line,certification,cer_num,con_acc_name,bord_flag,web_info,con_nation,bind_card,ip_code,mac_info,self_acc_no,acc_type1,bank_acc_name,reals,batch_pay,statement_type"
return {
"busi_reg_no": busi_reg_no,
"ctnm": ctnm,
"cten": cten,
"client_tp": client_tp,
"account_tp": account_tp,
"busi_type": busi_type,
"smid": smid,
"citp": citp,
"citp_ori": citp_ori,
"citp_nt": citp_nt,
"ctid": ctid,
"ctid_edt": ctid_edt,
"sex": sex,
"country": country,
"nation": nation,
"birthday": birthday,
"education": education,
"ctvc": ctvc,
"picm": picm,
"ficm": ficm,
"marriage": marriage,
"ceml": ceml,
"rgdt": rgdt,
"cls_dt": cls_dt,
"remark": remark,
"indu_code": indu_code,
"stat_flag_ori": stat_flag_ori,
"stat_flag": stat_flag,
"mer_prov": mer_prov,
"mer_city": mer_city,
"mer_area": mer_area,
"address": address,
"tel": tel,
"mer_unit": mer_unit,
"is_line": is_line,
"certification": certification,
"cer_num": cer_num,
"con_acc_name": con_acc_name,
"bord_flag": bord_flag,
"web_info": web_info,
"con_nation": con_nation,
"bind_card": bind_card,
"ip_code": ip_code,
"mac_info": mac_info,
"self_acc_no": self_acc_no,
"acc_type1": acc_type1,
"bank_acc_name": bank_acc_name,
"reals": reals,
"batch_pay": batch_pay,
"statement_type": statement_type
}, contect_data
# 生成机构表
def make_stan_org(num):
"""
busi_reg_no: 客户号
ctnm: 客户名称
cten: 拼音/英文名称
client_tp: 客户类别
account_tp: 账户分类
busi_type: 业务类型
smid: 主体特约商户编号
citp: 证件类型_报送
citp_ori: 证件类型原值
ctid: 证件号码
ctid_edt: 证件有效期
citp_nt: 证件类型说明
id_type: 证件类型_现场检查
org_no: 组织机构代码
linkman: 联系人姓名
linktel: 联系人手机号
linkjob: 联系人职务
linkmail: 联系人邮箱
linkphone: 联系人固定电话
ceml: 电子邮件
ctvc: 主体的行业类别
crnm: 主体的法定代表人姓名
crit: 主体的法定代表人身份证件类型
crit_ori: 主体的法定代表人身份证件类型原值
crit_nt: 主体的法定代表人身份证件类型说明
crid: 主体的法定代表人身份证件号码
crid_edt: 主体的法定代表人证件有效期
rgdt: 开户日期
cls_dt: 销户日期
scale: 企业规模
country: 注册国家
crp_type: 组织机构类别
fud_date: 成立日期
reg_cptl: 注册资本
remark_ctvc: 经营范围
agency_ctnm: 代办理人姓名
agency_citp: 代办理人证件类型
agency_ctid: 代办理人证件号码
agency_edt: 代办理人证件有效期限
remark: 备注
indu_code: 行业代码
stat_flag_ori: 客户状态原值
stat_flag: 客户状态
mer_prov: 省
mer_city: 市
mer_area: 区县
address: 详细地址
tel: 联系电话
mer_unit: 管理机构
is_line: 是否线上
certification : 建立渠道
cer_num: 通过身份验证渠道数量
con_acc_name: 经营名称
bord_flag: 境内外标识
web_info: 网络支付商户网址信息
con_nation: 商户所属国家或地区
majority_shareholder_ctnm: 控股股东或实际控制人姓名
majority_shareholder_citp: 控股股东或实际控制人证件类型
majority_shareholder_citp_ori: 控股股东或实际控制人证件类型原值
majority_shareholder_ctid: 控股股东或实际控制人证件号码
majority_shareholder_edt: 控股股东或实际控制人证件有效期限
reg_cptl_code: 注册资本金币种
bind_card: 银行绑定标识
ip_code: 注册地IP地址
mac_info: 注册设备MAC或IMEI地址
self_acc_no: 特约商户收单结算账号
acc_type1: 账户类型
bank_acc_name: 银行账户名称
reals: 客户真实有效性
complex: 非自然人结构复杂度
clear: 非自然人股权可辨识度
batch_pay: 批量代付标识
statement_type: 结算类型
:return:
"""
busi_reg_no = "o_{}".format(num)
ctnm = make_name_data()
cten = word_to_pinyin(ctnm)
client_tp = random.choice(["1", "2"])
busi_type = make_busi_type_data()
account_tp = make_account_tp_data(busi_type)
if client_tp == "2":
smid = make_random_str(20) # 该字段值待确定
else:
smid = ""
citp = random.choice(["21", "29"])
citp_ori = citp # 该值暂定
ctid = make_ctid_data()
ctid_edt = make_Card_valid_date(ctid)
if citp == "29":
citp_nt = random.choice(["营业执照", "统一社会信用代码"])
else:
citp_nt = "证件类型"
if citp_ori == "营业执照":
id_type = "11"
else:
id_type = "12"
org_no = make_random_num(9) # 统一社会信用代码9-17位
linkman = make_name_data()
linktel = make_tel_num()
linkjob = "联系人职务"
linkmail = make_email_data()
linkphone = make_random_num(9)
ceml = make_email_data()
ctvc = make_org_ctvc_data()
crnm = make_name_data()
crit = make_citp_data()
crit_ori = "证件原值"
if crit == "19":
crit_nt = "证件类型说明"
else:
crit_nt = ""
crid = make_ctid_data()
crid_edt = make_Card_valid_date(crid)
rgdt = make_register_date()
cls_dt = make_cls_dt_data(busi_reg_no)
scale = make_scale_data()
country = make_country_data()
crp_type = make_crp_type_data()
fud_date = "20151111" # 成立日期,暂时写死
reg_cptl = "1000000.00" # 注册资金,暂时写死
remark_ctvc = "经营范围"
agency_ctnm = make_name_data()
agency_citp = make_citp_data()
agency_ctid = make_ctid_data()
agency_edt = make_Card_valid_date(agency_ctid)
remark = "备注,暂时不填"
indu_code = make_indu_code_data() # 支付机构行业代码,暂时默认为11111
stat_flag_ori = "11111" # 客户状态原值,可是用支付系统码表,根据客户业务系统修改
stat_flag = make_stat_flag_data(busi_reg_no)
mer_prov = get_province_code_data(ctid[:6])
mer_city = make_province_city_code_data(ctid[:6])
mer_area = ctid[:6]
address = make_address(ctid[:6])
tel = make_tel_num()
mer_unit = make_mer_unit_data()
is_line = random.choice(["0", "1"])
certification = random.choice(["1", "2", "3"])
cer_num = str(random.randint(0, 6))
con_acc_name = "默认经营名称" # 网络支付、预付卡、银行卡收单必须填写,暂为空
bord_flag = make_bord_flag_data() # 网络支付、预付卡、银行卡收单必须填写
web_info = make_web_info_data(busi_type) # 非网络支付业务,无网址用户可不填
con_nation = make_con_nation_data(bord_flag) # 网络支付、预付卡、银行卡收单必须填写
majority_shareholder_ctnm = make_name_data()
majority_shareholder_citp = make_citp_data()
majority_shareholder_citp_ori = "控股股东或实际控制人证件类型原值"
majority_shareholder_ctid = make_ctid_data()
majority_shareholder_edt = make_Card_valid_date(majority_shareholder_ctid)
reg_cptl_code = "CNY"
bind_card = make_bind_card_data(busi_type) # 仅需网络支付填写
ip_code = make_ip_data(busi_type) # 仅需网络支付填写
mac_info = make_mac_info_data(busi_type) # PC机填写MAC,移动终端填写IMEI(需网络支付,预付卡填写), 暂为空
self_acc_no = make_self_acc_no_data(client_tp) # 非商户不填,网络支付、预付卡、银行卡收单必须填写
acc_type1 = make_acc_type1_data(client_tp) # 非商户不填,网络支付、预付卡、银行卡收单必须填写
bank_acc_name = make_bank_acc_name_data(acc_type1) # 当acc_type1=12时填写,银行账号对应账户名称( 网络支付、预付卡、银行卡收单均需填写)
reals = str(random.randint(1, 5))
complex = make_complex_data()
clear = make_clear_data()
batch_pay = make_batch_pay_data(busi_type, client_tp)
statement_type = random.choice(["0", "1"])
# print(busi_reg_no, ctnm, cten, client_tp, account_tp, busi_type, smid, citp, citp_ori, ctid, ctid_edt, citp_nt,
# id_type, org_no, linkman, linktel, linkjob, linkmail, linkphone, ceml, ctvc, crnm, crit, crit_ori, crit_nt,
# crid, crid_edt, rgdt, cls_dt, scale, country, crp_type, fud_date, reg_cptl, remark_ctvc, agency_ctnm,
# agency_citp, agency_ctid, agency_edt, remark, indu_code, stat_flag_ori, stat_flag, mer_prov, mer_city,
# mer_area, address, tel, mer_unit, is_line, certification, cer_num, con_acc_name, bord_flag, web_info,
# con_nation, majority_shareholder_ctnm, majority_shareholder_citp, majority_shareholder_citp_ori,
# majority_shareholder_ctid, majority_shareholder_edt, reg_cptl_code, bind_card, ip_code, mac_info, self_acc_no,
# acc_type1, bank_acc_name, reals, complex, clear, batch_pay, statement_type)
# contect_data = make_connect_data([
# busi_reg_no, ctnm, cten, client_tp, account_tp, busi_type, smid, citp, citp_ori, ctid, ctid_edt, citp_nt,
# id_type, org_no, linkman, linktel, linkjob, linkmail, linkphone, ceml, ctvc, crnm, crit, crit_ori, crit_nt,
# crid, crid_edt, rgdt, cls_dt, scale, country, crp_type, fud_date, reg_cptl, remark_ctvc, agency_ctnm,
# agency_citp, agency_ctid, agency_edt, remark, indu_code, stat_flag_ori, stat_flag, mer_prov, mer_city, mer_area,
# address, tel, mer_unit, is_line, certification, cer_num, con_acc_name, bord_flag, web_info, con_nation,
# majority_shareholder_ctnm, majority_shareholder_citp, majority_shareholder_citp_ori, majority_shareholder_ctid,
# majority_shareholder_edt, reg_cptl_code, bind_card, ip_code, mac_info, self_acc_no, acc_type1, bank_acc_name,
# reals, complex, clear, batch_pay, statement_type
# ])
contect_data = "busi_reg_no,ctnm,cten,client_tp,account_tp,busi_type,smid,citp,citp_ori,ctid,ctid_edt,citp_nt,id_type,org_no,linkman,linktel,linkjob,linkmail,linkphone,ceml,ctvc,crnm,crit,crit_ori,crit_nt,crid,crid_edt,rgdt,cls_dt,scale,country,crp_type,fud_date,reg_cptl,remark_ctvc,agency_ctnm,agency_citp,agency_ctid,agency_edt,remark,indu_code,stat_flag_ori,stat_flag,mer_prov,mer_city,mer_area,address,tel,mer_unit,is_line,certification,cer_num,con_acc_name,bord_flag,web_info,con_nation,majority_shareholder_ctnm,majority_shareholder_citp,majority_shareholder_citp_ori,majority_shareholder_ctid,majority_shareholder_edt,reg_cptl_code,bind_card,ip_code,mac_info,self_acc_no,acc_type1,bank_acc_name,reals,complex,clear,batch_pay,statement_type"
return {
"busi_reg_no": busi_reg_no,
"ctnm": ctnm,
"cten": cten,
"client_tp": client_tp,
"account_tp": account_tp,
"busi_type": busi_type,
"smid": smid,
"citp": citp,
"citp_ori": citp_ori,
"ctid": ctid,
"ctid_edt": ctid_edt,
"citp_nt": citp_nt,
"id_type": id_type,
"org_no": org_no,
"linkman": linkman,
"linktel": linktel,
"linkjob": linkjob,
"linkmail": linkmail,
"linkphone": linkphone,
"ceml": ceml,
"ctvc": ctvc,
"crnm": crnm,
"crit": crit,
"crit_ori": crit_ori,
"crit_nt": crit_nt,
"crid": crid,
"crid_edt": crid_edt,
"rgdt": rgdt,
"cls_dt": cls_dt,
"scale": scale,
"country": country,
"crp_type": crp_type,
"fud_date": fud_date,
"reg_cptl": reg_cptl,
"remark_ctvc": remark_ctvc,
"agency_ctnm": agency_ctnm,
"agency_citp": agency_citp,
"agency_ctid": agency_ctid,
"agency_edt": agency_edt,
"remark": remark,
"indu_code": indu_code,
"stat_flag_ori": stat_flag_ori,
"stat_flag": stat_flag,
"mer_prov": mer_prov,
"mer_city": mer_city,
"mer_area": mer_area,
"address": address,
"tel": tel,
"mer_unit": mer_unit,
"is_line": is_line,
"certification": certification,
"cer_num": cer_num,
"con_acc_name": con_acc_name,
"bord_flag": bord_flag,
"web_info": web_info,
"con_nation": con_nation,
"majority_shareholder_ctnm": majority_shareholder_ctnm,
"majority_shareholder_citp": majority_shareholder_citp,
"majority_shareholder_citp_ori": majority_shareholder_citp_ori,
"majority_shareholder_ctid": majority_shareholder_ctid,
"majority_shareholder_edt": majority_shareholder_edt,
"reg_cptl_code": reg_cptl_code,
"bind_card": bind_card,
"ip_code": ip_code,
"mac_info": mac_info,
"self_acc_no": self_acc_no,
"acc_type1": acc_type1,
"bank_acc_name": bank_acc_name,
"reals": reals,
"complex": complex,
"clear": clear,
"batch_pay": batch_pay,
"statement_type": statement_type
}, contect_data
# 客户证件表
def make_stan_cert(infos):
"""
ctif_id:客户号
ctif_tp:主体类型
citp:证件类型
citp_ori:证件类型原值
citp_nt:证件类型说明
ctid:证件号码
iss_unt:证件签发机关
address:证件地址
ctid_edt:主体证件有效期
iss_dt:证件签发日期
iss_ctry:证件签发国家
is_rp:是否主证件
:return:
"""
ctif_id = infos.get("busi_reg_no") # 取值
ctif_tp = "1"
citp = infos.get("citp") # 取值
citp_ori = infos.get("citp_ori") # 取值
citp_nt = infos.get("citp_nt") # 取值
ctid = infos.get("ctid") # 取值
iss_unt = make_province_city_process_data(ctid[:6])[:16] + "公安局" # 取值户籍所在地县级公安局
address = infos.get("address") # 取值
ctid_edt = infos.get("ctid_edt") # 取值,
iss_dt = make_iss_dt_data(ctid_edt)
iss_ctry = infos.get("country") # 取值,
is_rp = "1" # 考虑添加副证件
# print(ctif_id, ctif_tp, citp, citp_ori, citp_nt, ctid, iss_unt, address, ctid_edt, iss_dt, iss_ctry, is_rp)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, citp, citp_ori, citp_nt, ctid, iss_unt, address, ctid_edt, iss_dt, iss_ctry, is_rp
# ])
contect_data = "ctif_id,ctif_tp,citp,citp_ori,citp_nt,ctid,iss_unt,address,ctid_edt,iss_dt,iss_ctry,is_rp"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"citp": citp,
"citp_ori": citp_ori,
"citp_nt": citp_nt,
"ctid": ctid,
"iss_unt": iss_unt,
"address": address,
"ctid_edt": ctid_edt,
"iss_dt": iss_dt,
"iss_ctry": iss_ctry,
"is_rp": is_rp
}, contect_data
# 客户地址信息表
def make_stan_address(infos, ctif_tp_data):
"""
ctif_id: 客户号
ctif_tp: 主体类型
address_tp: 地址类型
address: 详细地址
ctry: 国家代码
county: 行政区划代码
prvc: 省
city: 市
area: 区县
postcode: 邮编
exp_dt: 地址的失效日
is_rp: 是否主地址
:return:
"""
ctif_id = infos.get("busi_reg_no") # 取值
ctif_tp = ctif_tp_data # 取值
address_tp = make_address_tp_data()
address = infos.get("address") #
ctry = infos.get("country") # 取值
# county = make_make_province_city_process_data(infos.get("ctid")[:6]) # 已从最新接口文档中移除
prvc = infos.get("mer_prov") # 取值
city = infos.get("mer_city") # 取值
area = infos.get("mer_area") # 取值
postcode = ""
exp_dt = ""
is_rp = "1"
# print(ctif_id, ctif_tp, address_tp, address, ctry, prvc, city, area, postcode, exp_dt, is_rp)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, address_tp, address, ctry, prvc, city, area, postcode, exp_dt, is_rp
# ])
contect_data = "ctif_id,ctif_tp,address_tp,address,ctry,prvc,city,area,postcode,exp_dt,is_rp"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"address_tp": address_tp,
"address": address,
"ctry": ctry,
"prvc": prvc,
"city": city,
"area": area,
"postcode": postcode,
"exp_dt": exp_dt,
"is_rp": is_rp
}, contect_data
# 客户联系信息表
def make_stan_tel(infos):
"""
ctif_id:客户号
ctif_tp:主体类型
tel_tp:电话类型
tel:联系电话
is_rp:是否主电话
:param infos:
:return:
"""
ctif_id = infos.get("busi_reg_no")
ctif_tp = "1"
tel_tp = random.choice(["11", "12", "21", "22", "23"])
tel = make_tel_num()
is_rp = "1"
# print(ctif_id, ctif_tp, tel_tp, tel, is_rp)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, tel_tp, tel, is_rp
# ])
contect_data = 'ctif_id,ctif_tp,tel_tp,tel,is_rp'
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"tel_tp": tel_tp,
"tel": tel,
"is_rp": is_rp
}, contect_data
# 关系人信息表
def make_stan_relation(infos):
"""
客户关系
ctif_id: 客户号
ctif_tp: 主体类型
rel_tp: 关系类型
rel_layer: 关系人层级
rel_ctif: 关系人客户号
rel_cstp: 关系人类别
rel_name: 关系人名称
rcnt: 关系人国籍/国家
citp: 关系人证件类型
citp_ori: 关系人证件类型原
ctid: 关系人证件号码
citp_nt: 关系人证件类型说
hold_per: 持股比例
hold_amt: 持股金额
ctid_edt: 关系人证件有效期
rel_prov: 关系人省
rel_city: 关系人市
rel_area: 关系人区县
rear: 关系人详细地址
retl: 关系人联系电话
:param infos:
:return:
"""
ctif_id = infos.get("busi_reg_no")
ctif_tp = "1"
rel_tp = make_rel_tp_data()
rel_layer = random.choice(["-1", "0", "1", "2", "3", "4", "5"])
rel_ctif = make_random_num(6)
rel_cstp = random.choice(["1", "2"])
rel_name = make_name_data()
rcnt = "CHE" # make_country_data() 默认中国
citp = make_citp_data()
citp_ori = "证件类型原值"
ctid = make_ctid_data()
citp_nt = "证件类型说明"
hold_per = 0.05 # 持股比例
hold_amt = 0.05 # 持股金额
ctid_edt = make_Card_valid_date(ctid)
rel_prov = get_province_code_data(ctid[:6])
rel_city = make_province_city_code_data(ctid[:6])
rel_area = ctid[:6]
rear = make_address(ctid[:6])
retl = make_tel_num()
# print(ctif_id, ctif_tp, rel_tp, rel_layer, rel_ctif, rel_cstp, rel_name, rcnt, citp, citp_ori, ctid, citp_nt,
# hold_per, hold_amt, ctid_edt, rel_prov, rel_city, rel_area, rear, retl)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, rel_tp, rel_layer, rel_ctif, rel_cstp, rel_name, rcnt, citp, citp_ori, ctid, citp_nt,
# hold_per, hold_amt, ctid_edt, rel_prov, rel_city, rel_area, rear, retl
# ])
contect_data = "ctif_id,ctif_tp,rel_tp,rel_layer,rel_ctif,rel_cstp,rel_name,rcnt,citp,citp_ori,ctid,citp_nt,hold_per,hold_amt,ctid_edt,rel_prov,rel_city,rel_area,rear,retl"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"rel_tp": rel_tp,
"rel_layer": rel_layer,
"rel_ctif": rel_ctif,
"rel_cstp": rel_cstp,
"rel_name": rel_name,
"rcnt": rcnt,
"citp": citp,
"citp_ori": citp_ori,
"ctid": ctid,
"citp_nt": citp_nt,
"hold_per": hold_per,
"hold_amt": hold_amt,
"ctid_edt": ctid_edt,
"rel_prov": rel_prov,
"rel_city": rel_city,
"rel_area": rel_area,
"rear": rear,
"retl": retl
}, contect_data
# 支付账户表
def make_stan_pact(infos):
"""
ctif_id: 客户号
ctif_tp: 主体类型
act_tp: 账户类型
act_cd: 支付账户号
act_typ: 账号类别
act_limit: 支付账户交易限额
is_self_acc: 是否特约商户收单结算账号
sales_name: 预付卡办卡人
cst_sex: 预付卡办卡人性别
nation: 预付卡办卡人国籍
occupation: 预付卡办卡人职业
id_type: 预付卡办卡人证件种类
id_type_ori: 预付卡办卡人证件种类原值
id_no: 预付卡办卡人证件号码
id_deadline: 预付卡办卡人证件有效期截至日
contact: 预付卡办卡人联系方式
address: 预付卡办卡人住所地或工作单位地址
sales_flag: 预付卡代直销标识
bind_mob: 绑定的手机号码
mer_unit: 管理机构
cls_dt: 账户状态
rgdt: 开户日期
cls_stat: 销户日期
:param infos:
:return:
"""
if infos.get("busi_type") == "02":
ctif_id = ""
ctif_tp = ""
act_tp = ""
act_cd = ""
act_typ = ""
act_limit = 0
is_self_acc = ""
sales_name = ""
cst_sex = ""
nation = ""
occupation = ""
id_type = ""
id_type_ori = ""
id_no = ""
id_deadline = ""
contact = ""
address = ""
sales_flag = ""
bind_mob = ""
mer_unit = ""
cls_dt = ""
rgdt = ""
cls_stat = ""
else:
ctif_id = infos.get("busi_reg_no")
ctif_tp = "1"
act_tp = random.choice(['11', "211", "212"])
act_cd = make_act_cd_data(act_tp)
act_typ = make_act_type_data(act_tp)
act_limit = make_act_limit_data(act_tp, act_typ)
is_self_acc = random.choice(["0", "1"])
sales_name, cst_sex, nation, occupation, id_type, id_type_ori, id_no, id_deadline, contact, address, sales_flag \
= make_prepaid_card_data(infos)
bind_mob = make_bind_mob_data(infos)
mer_unit = make_mer_unit_data()
cls_dt = make_cls_dt_data(infos.get("busi_reg_no"))
rgdt = make_register_date()
if cls_dt == "C":
cls_stat = make_register_date()
else:
cls_stat = ""
# print(ctif_id, ctif_tp, act_tp, act_cd, act_typ, act_limit, is_self_acc, sales_name, "性别:", cst_sex, nation,
# occupation, id_type, id_type_ori, id_no, id_deadline, contact, address, sales_flag, bind_mob, mer_unit,
# cls_dt, rgdt, cls_stat)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, act_tp, act_cd, act_typ, act_limit, is_self_acc, sales_name, cst_sex, nation, occupation,
# id_type, id_type_ori, id_no, id_deadline, contact, address, sales_flag, bind_mob, mer_unit, cls_dt, rgdt,
# cls_stat
# ])
contect_data = "ctif_id,ctif_tp,act_tp,act_cd,act_typ,act_limit,is_self_acc,sales_name,cst_sex,nation,occupation,id_type,id_type_ori,id_no,id_deadline,contact,address,sales_flag,bind_mob,mer_unit,cls_dt,rgdt,cls_stat"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"act_tp": act_tp,
"act_cd": act_cd,
"act_typ": act_typ,
"act_limit": act_limit,
"is_self_acc": is_self_acc,
"sales_name": sales_name,
"cst_sex": cst_sex,
"nation": nation,
"occupation": occupation,
"id_type": id_type,
"id_type_ori": id_type_ori,
"id_no": id_no,
"id_deadline": id_deadline,
"contact": contact,
"address": address,
"sales_flag": sales_flag,
"bind_mob": bind_mob,
"mer_unit": mer_unit,
"cls_dt": cls_dt,
"rgdt": rgdt,
"cls_stat": cls_stat
}, contect_data
# 银行账户表
def make_stan_bact(infos, t_stan_pact):
"""
ctif_id: 客户号
ctif_tp: 主体类型
act_tp: 银行账号种类
act_flag: 银行账号种类-现场检查
act_cd: 银行账户号
cabm: 银行账号开户行名称
pay_id: 关联支付账户
is_self_acc: 是否特约商户收单结算账号
bank_acc_name: 银行账户名称
mer_unit: 管理机构
:param infos:
:return:
"""
ctif_id = infos.get("busi_reg_no")
ctif_tp = "1"
act_tp = make_bank_act_tp_data(ctif_tp)
act_flag = random.choice(["11", "12"])
act_cd = "62" + make_random_num(17)
cabm = make_cabm_data(infos.get("ctid")[:6])
pay_id = make_pay_id_data(infos.get("busi_type"), t_stan_pact.get("act_cd"))
is_self_acc = t_stan_pact.get("is_self_acc")
bank_acc_name = "" # 没明白是什么,暂空
mer_unit = t_stan_pact.get("mer_unit")
# print(ctif_id, ctif_tp, act_tp, act_flag, act_cd, cabm, pay_id, is_self_acc, bank_acc_name, mer_unit)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, act_tp, act_flag, act_cd, cabm, pay_id, is_self_acc, bank_acc_name, mer_unit
# ])
contect_data = "ctif_id,ctif_tp,act_tp,act_flag,act_cd,cabm,pay_id,is_self_acc,bank_acc_name,mer_unit"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"act_tp": act_tp,
"act_flag": act_flag,
"act_cd": act_cd,
"cabm": cabm,
"pay_id": pay_id,
"is_self_acc": is_self_acc,
"bank_acc_name": bank_acc_name,
"mer_unit": mer_unit
}, contect_data
# 标准交易表
def make_stan_stif(infos, stan_bact, ctif_tp_num, stif_time):
"""
ctif_id: 主体客户号
ctif_tp: 主体类别
client_tp: 客户类别
smid: 主体特约商户编码
ctnm: 主体姓名/名称
citp: 主体身份证件/证明文件类型
citp_ori: 主体身份证件/证明文件类型原值
citp_nt: 主体身份证件/证明文件类型说明
ctid: 主体身份证件/证明文件号码
cbat: 主体的银行账号种类
cbac: 主体的银行账号
cabm: 主体银行账号的开户行名称
ctat: 主体的交易账号种类
ctac: 主体的交易账号
cpin: 主体所在支付机构的名称
cpba: 主体所在支付机构的银行账号
cpbn: 主体所在支付机构的银行账号的开户行名称
ctip: 主体的交易IP地址
tstm: 交易时间
cttp: 货币资金转移方式
tsdr: 资金收付标志
crpp: 资金用途
crtp: 交易币种
crat: 交易金额
tcif_id: 交易对手ID
tcnm: 交易对手姓名/名称
tsmi: 交易对手特约商户编码
tcit: 交易对手证件/证明文件类型
tcit_ori: 交易对手证件/证明文件类型原值
tcit_nt: 交易对手证件/证明文件类型说明
tcid: 交易对手证件/证明文件号码
tcat: 交易对手的银行账号种类
tcba: 交易对手的银行账号
tcbn: 交易对手银行账号的开户行名称
tctt: 交易对手的交易账号种类
tcta: 交易对手的交易账号
tcpn: 交易对手所在支付机构的名称
tcpa: 交易对手所在支付机构的银行账号
tpbn: 交易对手所在支付机构银行账号的开户行名称
tcip: 交易对手的交易IP地址
tmnm: 交易商品名称
bptc: 银行与支付机构之间的业务交易编码
pmtc: 支付机构与商户之间的业务交易编码
ticd: 业务标识号
busi_type: 业务类型
trans_type: 交易类型
pos_dev_id: 交易终端号或IMEI号等设备标识
trans_stat: 交易状态
bank_stat: 银行状态
mer_prov: 地区省
mer_area: 地区县
pos_prov: 交易省
pos_area: 交易县
mer_unit: 管理机构
extend1: 转换标识
iofg: 境内外标识
trans_channel: 交易渠道
ctmac: 交易发生的mac地址
balance: 主体支付账户的余额
acc_flag: 交易对方账户类型
ctid_edt: 主体身份证件/证明文件有效期截止日
tran_flag: 对手账号标识
trans_order: 交易订单号
trans_cst_type: 交易类型(客户定义)
crat_u: 交易金额折合美元
crat_c: 交易金额折合人民币
trans_way: 交易方式
agency_ctnm: 代办人姓名
agency_citp: 代办人身份证件(证明文件)类型
agency_ctid: 代办人身份证件(证明文件)号码
agency_country: 代办人国籍
:param infos:
:return:
"""
ctif_id = infos.get("busi_reg_no")
ctif_tp = ctif_tp_num
client_tp = infos.get("client_tp")
smid = infos.get("smid")
ctnm = infos.get("ctnm")
citp = infos.get("citp")
citp_ori = infos.get("citp_ori")
citp_nt = infos.get("citp_nt")
ctid = infos.get("ctid")
cbat = stan_bact.get("act_tp")
cbac = stan_bact.get("act_cd")
cabm = stan_bact.get("cabm")
busi_type = make_busi_type_data()
ctat = make_ctat_data(busi_type)
ctac = make_random_num(17)
cpin = "默认机构名称"
cpba = make_random_num(17)
cpbn = make_cabm_data(make_province_code_data())
ctip = make_ip_data(busi_type)
# tstm = make_trade_time_data()
tstm = stif_time
cttp = make_cttp_data()
tsdr = random.choice(["01", "02"])
crpp = "资金用途"
crtp = "CNY"
crat = make_crat_data()
tcif_id = make_tcif_id_data(busi_type)
tcnm = make_name_data()
tsmi = make_random_num(20)
tcit = make_cert_type_data()
tcit_ori = "证件原值,需提供支付系统码表?"
tcit_nt = "证件类型说明"
tcid = make_random_num(20)
tcat = random.choice(["01", "02", "03"])
tcba = make_random_num(19)
tcbn = make_cabm_data(make_province_code_data())
tctt = random.choice(["01", "02"])
tcta = make_random_num(19)
tcpn = "默认支付机构名称"
tcpa = make_random_num(19)
tpbn = make_cabm_data(make_province_code_data())
tcip = make_ip_data(busi_type)
tmnm = "默认商品名称"
bptc = make_random_num(25)
pmtc = make_random_num(25)
ticd = make_ticd_data()
trans_type = make_trans_type_data(busi_type)
pos_dev_id = make_pos_dev_id_data(busi_type)
trans_stat = "交易状态" # 交易状态,需提供支付系统码表
bank_stat = "银行状态" # 银行状态,需提供支付系统码表
province_code = make_province_code_data()
mer_prov = province_code
mer_area = make_province_city_code_data(province_code)
province_code2 = make_province_code_data()
pos_prov = province_code2
pos_area = make_province_city_code_data(province_code2)
mer_unit = make_mer_unit_data() # 需提供支付系统代码表
extend1 = ""
# rate_rmb = "" # 老接口字段
# rate_usa = "" # 老接口字段
iofg = "0" # 暂时默认境内交易
trans_channel = make_trans_channel_data()
ctmac = make_mac_info_data(busi_type)
balance = "10000"
acc_flag = make_acc_flag_data(busi_type)
ctid_edt = infos.get("ctid_edt")
tran_flag = make_tran_flag_data(busi_type)
trans_order = make_trans_order_data(busi_type)
trans_cst_type = make_trans_cst_type_data()
crat_u = make_crat_u_data(crat)
crat_c = make_crat_r_data(crat)
trans_way = make_random_str(6) # 详见交易方式代码表(目前未收到人行的接口文件,暂定6位)
agency_ctnm = make_name_data()
agency_citp = make_citp_data()
agency_ctid = make_ctid_data()
agency_country = "CHN"
# print(ctif_id, ctif_tp, client_tp, smid, ctnm, citp, citp_ori, citp_nt, ctid, cbat, cbac, cabm, ctat, ctac, cpin,
# cpba, cpbn, ctip, tstm, cttp, tsdr, crpp, crtp, crat, tcif_id, tcnm, tsmi, tcit, tcit_ori, tcit_nt, tcid,
# tcat,
# tcba, tcbn, tctt, tcta, tcpn, tcpa, tpbn, tcip, tmnm, bptc, pmtc, ticd, busi_type, trans_type, pos_dev_id,
# trans_stat, bank_stat, mer_prov, mer_area, pos_prov, pos_area, mer_unit, extend1, iofg, trans_channel, ctmac,
# balance, acc_flag, ctid_edt, tran_flag, trans_order, trans_cst_type, crat_u, crat_c, trans_way, agency_ctnm,
# agency_citp, agency_ctid, agency_country)
# contect_data = make_connect_data([
# ctif_id, ctif_tp, client_tp, smid, ctnm, citp, citp_ori, citp_nt, ctid, cbat, cbac, cabm, ctat, ctac, cpin,
# cpba, cpbn, ctip, tstm, cttp, tsdr, crpp, crtp, crat, tcif_id, tcnm, tsmi, tcit, tcit_ori, tcit_nt, tcid, tcat,
# tcba, tcbn, tctt, tcta, tcpn, tcpa, tpbn, tcip, tmnm, bptc, pmtc, ticd, busi_type, trans_type, pos_dev_id,
# trans_stat, bank_stat, mer_prov, mer_area, pos_prov, pos_area, mer_unit, extend1, iofg, trans_channel, ctmac,
# balance, acc_flag, ctid_edt, tran_flag, trans_order, trans_cst_type, crat_u, crat_c, trans_way, agency_ctnm,
# agency_citp, agency_ctid, agency_country
# ])
contect_data = "ctif_id,ctif_tp,client_tp,smid,ctnm,citp,citp_ori,citp_nt,ctid,cbat,cbac,cabm,ctat,ctac,cpin,cpba,cpbn,ctip,tstm,cttp,tsdr,crpp,crtp,crat,tcif_id,tcnm,tsmi,tcit,tcit_ori,tcit_nt,tcid,tcat,tcba,tcbn,tctt,tcta,tcpn,tcpa,tpbn,tcip,tmnm,bptc,pmtc,ticd,busi_type,trans_type,pos_dev_id,trans_stat,bank_stat,mer_prov,mer_area,pos_prov,pos_area,mer_unit,extend1,iofg,trans_channel,ctmac,balance,acc_flag,ctid_edt,tran_flag,trans_order,trans_cst_type,crat_u,crat_c,trans_way,agency_ctnm,agency_citp,agency_ctid,agency_country"
return {
"ctif_id": ctif_id,
"ctif_tp": ctif_tp,
"client_tp": client_tp,
"smid": smid,
"ctnm": ctnm,
"citp": citp,
"citp_ori": citp_ori,
"citp_nt": citp_nt,
"ctid": ctid,
"cbat": cbat,
"cbac": cbac,
"cabm": cabm,
"ctat": ctat,
"ctac": ctac,
"cpin": cpin,
"cpba": cpba,
"cpbn": cpbn,
"ctip": ctip,
"tstm": tstm,
"cttp": cttp,
"tsdr": tsdr,
"crpp": crpp,
"crtp": crtp,
"crat": crat,
"tcif_id": tcif_id,
"tcnm": tcnm,
"tsmi": tsmi,
"tcit": tcit,
"tcit_ori": tcit_ori,
"tcit_nt": tcit_nt,
"tcid": tcid,
"tcat": tcat,
"tcba": tcba,
"tcbn": tcbn,
"tctt": tctt,
"tcta": tcta,
"tcpn": tcpn,
"tcpa": tcpa,
"tpbn": tpbn,
"tcip": tcip,
"tmnm": tmnm,
"bptc": bptc,
"pmtc": pmtc,
"ticd": ticd,
"busi_type": busi_type,
"trans_type": trans_type,
"pos_dev_id": pos_dev_id,
"trans_stat": trans_stat,
"bank_stat": bank_stat,
"mer_prov": mer_prov,
"mer_area": mer_area,
"pos_prov": pos_prov,
"pos_area": pos_area,
"mer_unit": mer_unit,
"extend1": extend1,
"iofg": iofg,
"trans_channel": trans_channel,
"ctmac": ctmac,
"balance": balance,
"acc_flag": acc_flag,
"ctid_edt": ctid_edt,
"tran_flag": tran_flag,
"trans_order": trans_order,
"trans_cst_type": trans_cst_type,
"crat_u": crat_u,
"crat_c": crat_c,
"trans_way": trans_way,
"agency_ctnm": agency_ctnm,
"agency_citp": agency_citp,
"agency_ctid": agency_ctid,
"agency_country": agency_country
}, contect_data
def person(num, connect, stif_num, stif_time):
# print("个人")
t_stan_person, stan_person_connect = make_stan_person(num)
t_stan_cert, stan_cert_connect = make_stan_cert(t_stan_person)
t_stan_address, stan_address_connect = make_stan_address(t_stan_person, "1")
t_stan_tel, stan_tel_connect = make_stan_tel(t_stan_person)
t_stan_pact, stan_pact_connect = make_stan_pact(t_stan_person)
t_stan_bact, stan_bact_connect = make_stan_bact(t_stan_person, t_stan_pact)
t_stan_relation, stan_relation_connect = make_stan_relation(t_stan_person)
# 交易表数据单独写入,一个主体写入10条数据
# for num in range(10):
# t_stan_stif, stan_stif_connect = make_stan_stif(t_stan_person, t_stan_bact, '1')
# # data = eval("t_stan_stif"[2:] + "_connect")
# data = stan_stif_connect
# file_name = "t_stan_stif".split("_")[-1] + "_" + file_date_time
# print(stan_stif_connect)
# write_to_csv(file_name + ".csv", data)
# for num in range(stif_num):
# t_stan_stif, stan_stif_connect = make_stan_stif(t_stan_person, t_stan_bact, '1', stif_time)
# print("t_stan_stif", t_stan_stif)
# connect.save("t_stan_stif", stan_stif_connect, t_stan_stif)
# connect.commit()
# print("stan_person_connect", stan_person_connect)
# print("stan_cert_connect", stan_cert_connect)
# print("stan_address_connect", stan_address_connect)
# print("stan_tel_connect", stan_tel_connect)
# print("stan_pact_connect", stan_pact_connect)
# print("stan_bact_connect", stan_bact_connect)
# print("stan_relation_connect", stan_relation_connect)
name = ["t_stan_person", "t_stan_cert", "t_stan_address", "t_stan_tel", "t_stan_relation", "t_stan_pact",
"t_stan_bact"]
for file_name in name:
data = eval(file_name[2:] + "_connect")
# file_name = file_name.split("_")[-1] + "_" + file_date_time
# write_to_csv(file_name + ".csv", data)
# write_to_csv(file_name + ".txt", data)
connect.save(file_name, data, eval(file_name))
connect.commit()
def org(num, connect,stif_num, stif_time):
# print("机构")
t_stan_org, stan_org_connect = make_stan_org(num)
t_stan_cert, stan_cert_connect = make_stan_cert(t_stan_org)
t_stan_address, stan_address_connect = make_stan_address(t_stan_org, "2")
t_stan_tel, stan_tel_connect = make_stan_tel(t_stan_org)
t_stan_pact, stan_pact_connect = make_stan_pact(t_stan_org)
t_stan_bact, stan_bact_connect = make_stan_bact(t_stan_org, t_stan_pact)
t_stan_relation, stan_relation_connect = make_stan_relation(t_stan_org)
# 交易表数据单独写入,一个主体写入10条数据
# for num in range(stif_num):
# t_stan_stif, stan_stif_connect = make_stan_stif(t_stan_org, t_stan_bact, '2', stif_time)
# print("t_stan_stif", t_stan_stif)
# # data = eval("t_stan_stif"[2:] + "_connect")
# data = stan_stif_connect
# file_name = "t_stan_stif".split("_")[-1] + "_" + file_date_time
# print(stan_stif_connect)
# write_to_csv(file_name + ".csv", data)
# connect.save("t_stan_stif", stan_stif_connect, t_stan_stif)
# connect.commit()
# print("stan_org_connect", stan_org_connect)
# print("stan_cert_connect", stan_cert_connect)
# print("stan_address_connect", stan_address_connect)
# print("stan_tel_connect", stan_tel_connect)
# print("stan_pact_connect", stan_pact_connect)
# print("stan_bact_connect", stan_bact_connect)
# print("stan_relation_connect", stan_relation_connect)
name = ["t_stan_org", "t_stan_cert", "t_stan_address", "t_stan_tel", "t_stan_relation", "t_stan_pact",
"t_stan_bact"]
for file_name in name:
data = eval(file_name[2:] + "_connect")
# file_name = file_name.split("_")[-1] + "_" + file_date_time
# write_to_csv(file_name + ".csv", data)
# write_to_csv(file_name + ".txt", data)
connect.save(file_name, data, eval(file_name))
connect.commit()
# def main(num):
# person(num)
# org(num)
def main(begin, end, stif_num, stif_time):
connect = Save_MySQL()
for num in range(begin, end):
person(num, connect, stif_num, stif_time)
org(num, connect, stif_num, stif_time)
connect.quit()
# 修改日期
# trade_date
if __name__ == "__main__":
# pinyin = word_to_pinyin("张三")
# print(pinyin)
# res = make_ctid_data()
# print(res)
# res = read_province_data()
# print(res)
# add = make_make_province_city_process_data("150722")
# print(add)
# address = make_address("230183")
# print(address)
# trade_data = make_trade_amount_data()
# print(trade_data)
# ticd = make_ticd_data()
# print(ticd)
# data = make_make_province_city_process_data(make_province_code_data())
# make_province_city_data(data)[-1]
# data2 = make_province_code_data()
# province = get_province_data(data2[:2])
# print(province)
# read_excel()
# header = "&#@".join(t_stan_tel_header)
# write_to_csv("t_stan_tel.csv", header)
# date = time.strftime("%Y-%m-%d", time.localtime())
# -------------------------多线程
from threading import Thread
# make_trade_time_data()
start_time = time.time()
# threads = []
# for count in range(10):
# t = Thread(target=main, args=(count*10, (count+1)*10))
# t.start()
# threads.append(t)
# for t in threads:
# t.join()
# -------------------------单线程
# file_date_time = "2019-10-17"
# stif_time = "201910170900"
# main(1000, 1500)
end_time = time.time()
print(end_time-start_time) # 13
# for i in range(100):
# # tt = make_register_date()
# ss = random.choice([
# "01", # 互联网支付
# "02", # 银行卡收单
# "03", # 预付卡发行与受理
# "04", # 移动电话支付
# "05", # 固定电话支付
# "06", # 数字电视支付
# "07" # 货币汇兑
# ])
# print(ss)
# tt = make_tcif_id_data(ss)
#
# print(tt)
# ctid_edt = "20170506"
# ctid_edt = "99991231"
# tt = make_iss_dt_data(ctid_edt)
# print(tt)
#
# dd = make_country_data()
# print(dd)
# tt = make_province_city_process_data("412825")
# print(tt)
|
py | 1a31dd5293a62f7af801144492f4e5872b790d04 | import platform
import sys
from helper import executable_exists
PackageManager = {
"macos": "brew install",
"linux": {
"readhat": "sudo yum",
"arch": "sudo packman -S",
"gentoo": "sudo emerge --ask --verbose",
"suse": "sudo zypper install",
"debian": "sudo apt-get install"
}
}
LinuxDistroRecognition = {
"yum": "redhat",
"packman": "arch",
"emerge": "gentoo",
"zypper": "suse",
"apt-get": "debian"
}
PortAudio = {
"name": "Voice Recorder",
"pip": [
'SpeechRecognition',
"pyaudio --global-option='build_ext' --global-option='-I/usr/local/include' --global-option='-L/usr/local/lib'"],
"package_guess": {
"macos": 'portaudio',
"linux": {
'redhat': 'python3-pyaudio python3-devel',
'arch': 'python-pyaudio',
'gentoo': 'pyaudio',
'suse': 'python3-PyAudio python3-devel',
'debian': 'python3-pyaudio python3-dev'
}},
"description": "Required for voice control",
"instruction": """\
Please install python-binding 'pyaudio' manually."
For more details go to the below link:
https://people.csail.mit.edu/hubert/pyaudio/"""}
RequestsSecurity = {
"name": "Requests security",
"pip": ['requests[security]'],
"description": "Better/saver https",
"instruction": "https://stackoverflow.com/questions/31811949/pip-install-requestssecurity-vs-pip-install-requests-difference"
}
NativeNotification = {
"name": "Notification",
"executable": ['notify-send'],
"description": "Native linux notifications",
"instruction": "Please install 'notify-send' manually using your local package manager!",
"package_guess": {
"linux": {
'redhat': 'libnotify',
'arch': 'libnotify',
'gentoo': 'eselect-notify-send',
'suse': 'libnotify-tools',
'debian': 'libnotify-bin'
}
}
}
FFMPEG = {
"name": "ffmpeg",
"executable": ['ffmpeg'],
"description": "Download music as .mp3 instead .webm",
"instruction": "Please install 'ffmpeg' manually using your local package manager!",
"package_guess": {
"macos": "ffmpeg",
"linux": {
'redhat': 'ffmpeg',
'arch': 'ffmpeg',
'gentoo': 'ffmpeg',
'suse': 'ffmpeg',
'debian': 'ffmpeg'
}
}
}
ESPEAK = {
"name": "espeak",
"executable": ['espeak'],
"description": "Text To Speech for Jarvis to talk out loud (alternatives: sapi5 or nsss will work, too)",
"instruction": "Please install 'espeak' manually using your local package manager!",
"package_guess": {
"linux": {
'redhat': 'espeak',
'arch': 'espeak',
'gentoo': 'espeak',
'suse': 'espeak',
'debian': 'espeak'
}
}
}
OPTIONAL_REQUIREMENTS = [PortAudio, RequestsSecurity, FFMPEG, ESPEAK]
if not sys.platform == "darwin":
OPTIONAL_REQUIREMENTS += [NativeNotification]
def get_guess(data):
if sys.platform == "darwin":
if 'macos' in data:
return data['macos']
else:
return False
elif platform.system().lower() == "linux":
if 'linux' in data:
data = data['linux']
else:
return False
for executable, distro in LinuxDistroRecognition.items():
if executable_exists(executable):
if distro in data:
return data[distro]
return False
|
py | 1a31ddbac48a1d99174e8eb35f3c24440ccd213f | import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
import pandas as pd
EXAMPLE_ROWS = 5
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def call(config,msg):
api.config = config
return process(msg)
def set_port_callback(port, callback) :
df = pd.DataFrame(
{'icol': [1, 2, 3, 4, 5], 'xcol2': ['A', 'A', 'B', 'B', 'C'], \
'xcol3': ['K', 'L', 'M', 'N', 'O'], 'xcol4': ['a1', 'a1', 'b1', 'b1', 'b1']})
default_msg = api.Message(attributes = {'format': 'pandas', 'name': 'test','process_list':[]}, body=df)
callback(default_msg)
class config:
## Meta data
config_params = dict()
version = '0.0.17'
tags = {'pandas': '','sdi_utils':''}
operator_description = "Sample from Dataframe"
operator_description_long = "Sampling over a DataFrame but keeps datasets with the same value of the \
defined column as set and not splitting them, e.g. sampling with the invariant_column='date' samples \
but ensures that all datasets of a certain date are taken or none. This leads to the fact that the \
sample_size is only a guiding target. Depending on the size of the datasets with the same value of \
the *invariant_column* compared to the *sample_size* this could deviate a lot. "
add_readme = dict()
add_readme["References"] = "[pandas doc: sample](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sample.html)"
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
sample_size = 0.1
config_params['sample_size'] = {'title': 'Sample size', 'description': 'Sample size', 'type': 'number'}
random_state = 1
config_params['random_state'] = {'title': 'Random state', 'description': 'Random state', 'type': 'integer'}
invariant_column = ''
config_params['invariant_column'] = {'title': 'Invariant column', 'description': 'Column where all the same value records should be kept as a whole in a sample', 'type': 'string'}
def process(msg) :
att_dict = msg.attributes
att_dict['operator'] = 'sample'
if api.config.debug_mode == True:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='DEBUG')
else:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='INFO')
logger.info("Process started")
time_monitor = tp.progress()
# start custom process definition
# test if body refers to a DataFrame type
prev_att = msg.attributes
df = msg.body
if not isinstance(df, pd.DataFrame):
logger.error('Message body does not contain a pandas DataFrame')
raise TypeError('Message body does not contain a pandas DataFrame')
###### start calculation
sample_size = api.config.sample_size
if sample_size < 1 :
sample_size = int(sample_size * df.shape[0])
if sample_size < 1 :
sample_size = 1
logger.warning("Fraction of sample size too small. Set sample size to 1.")
elif sample_size > df.shape[0]:
logger.warning("Sample size larger than number of rows")
logger.debug("Samples_size: {}/() ({})".format(sample_size,df.shape[0],sample_size/df.shape[0]))
random_state = api.config.random_state
invariant_column = tfp.read_value(api.config.invariant_column)
if invariant_column and sample_size < df.shape[0]:
# get the average number of records for each value of invariant
sc_df = df.groupby(invariant_column)[invariant_column].count()
sample_size_invariant = int(sample_size / sc_df.mean())
sample_size_invariant = 1 if sample_size_invariant == 0 else sample_size_invariant # ensure minimum
sc_df = sc_df.sample(n=sample_size_invariant, random_state=random_state).to_frame()
sc_df.rename(columns={invariant_column: 'sum'}, inplace=True)
# sample the df by merge 2 df
df = pd.merge(df, sc_df, how='inner', right_index=True, left_on=invariant_column)
df.drop(columns=['sum'], inplace=True)
else:
df = df.sample(n=sample_size, random_state=random_state)
# end custom process definition
if df.empty :
raise ValueError('DataFrame is empty')
logger.debug('Columns: {}'.format(str(df.columns)))
logger.debug('Shape (#rows - #columns): {} - {}'.format(df.shape[0],df.shape[1]))
logger.debug('Memory: {} kB'.format(df.memory_usage(deep=True).sum() / 1024 ** 2))
example_rows = EXAMPLE_ROWS if df.shape[0] > EXAMPLE_ROWS else df.shape[0]
for i in range(0, example_rows):
logger.debug('Row {}: {}'.format(i,str([str(i)[:10].ljust(10) for i in df.iloc[i, :].tolist()])))
progress_str = '<BATCH ENDED><1>'
if 'storage.fileIndex' in att_dict and 'storage.fileCount' in att_dict and 'storage.endOfSequence' in att_dict:
if att_dict['storage.fileIndex'] + 1 == att_dict['storage.fileCount']:
progress_str = '<BATCH ENDED><{}>'.format(att_dict['storage.fileCount'])
else:
progress_str = '<BATCH IN-PROCESS><{}/{}>'.format(att_dict['storage.fileIndex'] + 1,
att_dict['storage.fileCount'])
att_dict['process_list'].append(att_dict['operator'])
logger.debug('Process ended: {} - {} '.format(progress_str, time_monitor.elapsed_time()))
logger.debug('Past process steps: {}'.format(att_dict['process_list']))
return log_stream.getvalue(), api.Message(attributes=att_dict,body=df)
inports = [{'name': 'data', 'type': 'message.DataFrame',"description":"Input data"}]
outports = [{'name': 'log', 'type': 'string',"description":"Logging data"}, \
{'name': 'data', 'type': 'message.DataFrame',"description":"Output data"}]
def call_on_input(msg) :
log, msg = process(msg)
api.send(outports[0]['name'], log)
api.send(outports[1]['name'], msg)
#api.set_port_callback([inports[0]['name']], call_on_input)
def main() :
print('Test: Default')
api.set_port_callback([inports[0]['name']], call_on_input)
if __name__ == '__main__':
main()
#gs.gensolution(os.path.realpath(__file__), config, inports, outports)
|
py | 1a31df18b3df1f6d0aeb91eb1a9182ee17ccde5f | import os
import sys
import time
from circus.process import Process, RUNNING
from circus.tests.support import TestCircus, skipIf, EasyTestSuite
import circus.py3compat
from circus.py3compat import StringIO, PY3
RLIMIT = """\
import resource, sys
with open(sys.argv[1], 'w') as f:
for limit in ('NOFILE', 'NPROC'):
res = getattr(resource, 'RLIMIT_%s' % limit)
f.write('%s=%s\\n' % (limit, resource.getrlimit(res)))
"""
VERBOSE = """\
import sys
for i in range(1000):
for stream in (sys.stdout, sys.stderr):
stream.write(str(i))
stream.flush()
"""
def _nose_no_s():
if PY3:
return isinstance(sys.stdout, StringIO)
else:
return not hasattr(sys.stdout, 'fileno')
class TestProcess(TestCircus):
def test_base(self):
cmd = sys.executable
args = "-c 'import time; time.sleep(2)'"
process = Process('test', cmd, args=args, shell=False)
try:
info = process.info()
self.assertEqual(process.pid, info['pid'])
age = process.age()
self.assertTrue(age > 0.)
self.assertFalse(process.is_child(0))
finally:
process.stop()
def test_rlimits(self):
script_file = self.get_tmpfile(RLIMIT)
output_file = self.get_tmpfile()
cmd = sys.executable
args = [script_file, output_file]
rlimits = {'nofile': 20,
'nproc': 20}
process = Process('test', cmd, args=args, rlimits=rlimits)
try:
# wait for the process to finish
while process.status == RUNNING:
time.sleep(1)
finally:
process.stop()
with open(output_file, 'r') as f:
output = {}
for line in f.readlines():
limit, value = line.rstrip().split('=', 1)
output[limit] = value
def srt2ints(val):
return [circus.py3compat.long(key) for key in val[1:-1].split(',')]
wanted = [circus.py3compat.long(20), circus.py3compat.long(20)]
self.assertEqual(srt2ints(output['NOFILE']), wanted)
self.assertEqual(srt2ints(output['NPROC']), wanted)
def test_comparison(self):
cmd = sys.executable
args = ['import time; time.sleep(2)', ]
p1 = Process('1', cmd, args=args)
p2 = Process('2', cmd, args=args)
self.assertTrue(p1 < p2)
self.assertFalse(p1 == p2)
self.assertTrue(p1 == p1)
p1.stop()
p2.stop()
def test_process_parameters(self):
# all the options passed to the process should be available by the
# command / process
p1 = Process('1', 'make-me-a-coffee',
'$(circus.wid) --type $(circus.env.type)',
shell=False, spawn=False, env={'type': 'macchiato'})
self.assertEqual(['make-me-a-coffee', '1', '--type', 'macchiato'],
p1.format_args())
p2 = Process('1', 'yeah $(CIRCUS.WID)', spawn=False)
self.assertEqual(['yeah', '1'], p2.format_args())
os.environ['coffee_type'] = 'american'
p3 = Process('1', 'yeah $(circus.env.type)', shell=False, spawn=False,
env={'type': 'macchiato'})
self.assertEqual(['yeah', 'macchiato'], p3.format_args())
os.environ.pop('coffee_type')
@skipIf(_nose_no_s(), 'Nose runs without -s')
def test_streams(self):
script_file = self.get_tmpfile(VERBOSE)
cmd = sys.executable
args = [script_file]
# 1. streams sent to /dev/null
process = Process('test', cmd, args=args, close_child_stdout=True,
close_child_stderr=True)
try:
# wait for the process to finish
while process.status == RUNNING:
time.sleep(1)
# the pipes should be empty
self.assertEqual(process.stdout.read(), b'')
self.assertEqual(process.stderr.read(), b'')
finally:
process.stop()
# 2. streams sent to /dev/null, no PIPEs
process = Process('test', cmd, args=args, close_child_stdout=True,
close_child_stderr=True, pipe_stdout=False,
pipe_stderr=False)
try:
# wait for the process to finish
while process.status == RUNNING:
time.sleep(1)
# the pipes should be unexistant
self.assertTrue(process.stdout is None)
self.assertTrue(process.stderr is None)
finally:
process.stop()
# 3. streams & pipes open
process = Process('test', cmd, args=args)
try:
# wait for the process to finish
while process.status == RUNNING:
time.sleep(1)
# the pipes should be unexistant
self.assertEqual(len(process.stdout.read()), 2890)
self.assertEqual(len(process.stderr.read()), 2890)
finally:
process.stop()
test_suite = EasyTestSuite(__name__)
|
py | 1a31df51b6ddf6d66a8096ccd8f96662c075ac6f | """
# Mobius Software LTD
# Copyright 2015-2018, Mobius Software LTD
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
"""
from iot.classes.IoTClient import *
from iot.mqtt.MQParser import MQParser
from iot.mqtt.mqtt_classes.MQConnackCode import *
from iot.mqtt.mqtt_classes.MQTopic import *
from iot.mqtt.mqtt_classes.Will import *
from iot.mqtt.mqtt_messages.MQConnect import *
from iot.mqtt.mqtt_messages.MQDisconnect import *
from iot.mqtt.mqtt_messages.MQPuback import *
from iot.mqtt.mqtt_messages.MQPubcomp import *
from iot.mqtt.mqtt_messages.MQPubrec import *
from iot.mqtt.mqtt_messages.MQPubrel import *
from iot.mqtt.mqtt_messages.MQSubscribe import *
from iot.mqtt.mqtt_messages.MQUnsubscribe import *
from iot.network.TCPClient import *
from iot.timers.TimersMap import *
class MQTTclient(IoTClient):
def __init__(self, account, client):
self.account = account
self.clientGUI = client
self.parser = MQParser(None)
self.resendperiod = 3000
self.connectionState = None
self.data = None
self.timers = TimersMap(self)
self.publishPackets = {}
self.can_connect = True
def send(self, message):
if self.connectionState == ConnectionState.CONNECTION_ESTABLISHED:
self.parser.setMessage(message)
message = self.parser.encode()
self.clientFactory.send(message)
else:
return False
def dataReceived(self, data):
messages = []
index = 1
while len(data) - index > 0:
length = self.parser.next(data, index)
if length < 0:
break
part = data[index - 1:index + length]
message = self.parser.decode(part)
messages.append(message)
index += length
for message in messages:
process_messageType_method(self, message.getType(), message)
def setState(self, ConnectionState):
self.connectionState = ConnectionState
def isConnected(self):
return self.connectionState == ConnectionState.CONNECTION_ESTABLISHED
def closeChannel(self):
if self.client is not None:
self.client.stop()
def goConnect(self):
self.setState(ConnectionState.CONNECTING)
if self.account.willTopic is not None:
topic = MQTopic(self.account.willTopic, self.account.qos)
will = Will(topic, self.account.will, self.account.isRetain)
else:
will = None
connect = MQConnect(self.account.username, self.account.password, self.account.clientID, self.account.cleanSession, self.account.keepAlive, will)
if self.timers is not None:
self.timers.stopAllTimers()
self.timers.goConnectTimer(connect)
self.parser.setMessage(connect)
self.clientFactory = ClientFactory(self.parser.encode(), self)
if self.account.isSecure:
ctx = CtxFactory(self.account.certificate, self.account.certPasw)
self.connector = reactor.connectSSL(self.account.serverHost, self.account.port, self.clientFactory, ctx)
else:
self.connector = reactor.connectTCP(self.account.serverHost, self.account.port, self.clientFactory)
def publish(self, name, qos, content, retain, dup):
topic = MQTopic(name, qos)
publish = MQPublish(0, topic, content, retain, dup)
if (qos == 0):
self.send(publish)
else:
if (qos in [1, 2]):
self.timers.goMessageTimer(publish)
def unsubscribeFrom(self, topicName):
listTopics = []
listTopics.append(topicName)
unsubscribe = MQUnsubscribe(0, listTopics)
self.timers.goMessageTimer(unsubscribe)
def subscribeTo(self, name, qos):
topic = MQTopic(name, qos)
listMQTopics = [topic]
subscribe = MQSubscribe(0, listMQTopics)
self.timers.goMessageTimer(subscribe)
def pingreq(self):
self.send(MQPingreq())
def disconnectWith(self, duration):
self.send(MQDisconnect())
self.timers.stopAllTimers()
self.clientFactory.client_close_connection()
def timeoutMethod(self):
if self.can_connect:
self.can_connect = False
self.timers.stopAllTimers()
reactor.callFromThread(self.clientGUI.timeout)
def connectTimeoutMethod(self):
if self.can_connect:
self.can_connect = False
self.timers.stopAllTimers()
reactor.callFromThread(self.clientGUI.show_error_message, "Connect Error", "Connection timeout")
reactor.callFromThread(self.clientGUI.timeout)
def ConnectionLost(self):
if self.can_connect:
self.can_connect = False
if self.timers is not None:
self.timers.stopAllTimers()
self.connector.disconnect()
reactor.callFromThread(self.clientGUI.errorReceived)
# _____________________________________________________________________________________
def processConnack(self, message):
self.timers.stopConnectTimer()
if message.returnCode == 0: # MQ_ACCEPTED
self.setState(ConnectionState.CONNECTION_ESTABLISHED)
self.timers.goPingTimer(MQPingreq(), self.account.keepAlive)
self.clientGUI.connackReceived(message.returnCode)
else:
messagebox.showinfo("Connect error", MQConnackCode(message.returnCode).readable_name())
self.clientGUI.errorReceived()
def processSuback(self, message):
subscribe = self.timers.removeTimer(message.packetID)
if subscribe is not None:
size = len(subscribe.listMQTopics)
topic = subscribe.listMQTopics[size - 1]
qos = topic.getQoS()
self.clientGUI.subackReceived(topic, qos, 0)
def processUnsuback(self, message):
unsubscribe = self.timers.removeTimer(message.packetID)
if unsubscribe is not None:
self.clientGUI.unsubackReceived(unsubscribe.listTopics)
def processPublish(self, message):
publisherQoS = message.topic.qos.getValue()
if publisherQoS.getValue() == 0:
self.clientGUI.publishReceived(message.topic, publisherQoS, message.content, message.dup, message.retain)
if publisherQoS.getValue() == 1: # AT_LEAST_ONCE
puback = MQPuback(message.packetID)
self.send(puback)
self.clientGUI.publishReceived(message.topic, publisherQoS, message.content, message.dup, message.retain)
if publisherQoS.getValue() == 2: # EXACTLY_ONCE
pubrec = MQPubrec(message.packetID)
self.send(pubrec)
self.publishPackets[message.packetID] = message
def processPuback(self, message):
publish = self.timers.removeTimer(message.packetID)
if publish is not None:
self.clientGUI.pubackReceived(publish.topic, publish.topic.getQoS(), publish.content, publish.dup, publish.retain, 0)
def processPubrec(self, message):
publish = self.timers.removeTimer(message.packetID)
if publish is not None:
self.timers.goMessageTimer(MQPubrel(publish.packetID))
self.publishPackets[publish.packetID] = publish
def processPubrel(self, message):
publish = self.publishPackets.get(message.packetID)
if publish is not None:
self.clientGUI.publishReceived(publish.topic, publish.topic.getQoS().getValue(), publish.content, publish.dup, publish.retain)
self.send(MQPubcomp(message.packetID))
def processPubcomp(self, message):
pubrel = self.timers.removeTimer(message.packetID)
if pubrel is not None:
publish = self.publishPackets.get(message.packetID)
self.clientGUI.pubackReceived(publish.topic, publish.topic.getQoS(), publish.content, publish.dup, publish.retain, 0)
def processPingresp(self, message):
self.clientGUI.pingrespReceived(False)
def processSubscribe(self, message):
self.clientGUI.errorReceived('received invalid message subscribe')
def processConnect(self, message):
self.clientGUI.errorReceived('received invalid message connect')
def processPingreq(self, message):
self.clientGUI.errorReceived('received invalid message pingreq')
def processDisconnect(self, message):
self.timers.stopAllTimers()
self.clientGUI.disconnectReceived()
def processUnsubscribe(self, message):
raise ValueError('received invalid message unsubscribe')
switcherProcess = {
1: processConnect,
2: processConnack,
3: processPublish,
4: processPuback,
5: processPubrec,
6: processPubrel,
7: processPubcomp,
8: processSubscribe,
9: processSuback,
10: processUnsubscribe,
11: processUnsuback,
12: processPingreq,
13: processPingresp,
14: processDisconnect,
}
def process_messageType_method(self, argument, message):
return switcherProcess[argument].__call__(self, message)
|
py | 1a31df5ac28fb98892a3e9adeb64d22370c6daba | #!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
def define_user_agent():
'''
This method defines crawler's User Agent.
'''
return {'User-Agent': 'Crawly 1.0',
'From': '[email protected]'}
def download(url):
'''
Sends a get request to a url and downloads the response.
'''
return requests.get(url, headers=define_user_agent(), timeout=10)
|
py | 1a31df969987bdd42a5e70e45ba4b7025a96893f | # Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
def add_subparser(subparsers):
_parser = subparsers.add_parser("countdown", description="QuantRocket cron service CLI", help="Manage crontabs")
_subparsers = _parser.add_subparsers(title="subcommands", dest="subcommand")
_subparsers.required = True
examples = """
Upload a new crontab, or return the current crontab.
Examples:
Upload a new crontab to a service called countdown-australia (replaces
current crontab):
quantrocket countdown crontab mycron.crontab -s countdown-australia
Show current crontab for a service called countdown-australia:
quantrocket countdown crontab -s countdown-australia
"""
parser = _subparsers.add_parser(
"crontab",
help="upload a new crontab, or return the current crontab",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"filename",
nargs="?",
metavar="FILENAME",
help="the crontab file to upload (if omitted, return the current crontab)")
parser.add_argument(
"-s", "--service",
metavar="SERVICE_NAME",
help="the name of the countdown service (default 'countdown')")
parser.set_defaults(func="quantrocket.countdown._load_or_show_crontab")
examples = """
Set or show the countdown service timezone.
Examples:
Set the timezone of the countdown service to America/New_York:
quantrocket countdown timezone America/New_York
Show the current timezone of the countdown service:
quantrocket countdown timezone
Show the timezone for a service called countdown-australia:
quantrocket countdown timezone -s countdown-australia
"""
parser = _subparsers.add_parser(
"timezone",
help="set or show the countdown service timezone",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"tz",
nargs="?",
metavar="TZ",
help="the timezone to set (pass a partial timezone string such as 'newyork' "
"or 'europe' to see close matches, or pass '?' to see all choices)")
parser.add_argument(
"-s", "--service",
metavar="SERVICE_NAME",
help="the name of the countdown service, (default 'countdown')")
parser.set_defaults(func="quantrocket.countdown._cli_get_or_set_timezone")
|
py | 1a31df9bc1d35143c2fe17325cc1357e8e87f7a2 | __author__ = "Junhee Yoon"
__version__ = "1.0.0"
__maintainer__ = "Junhee Yoon"
__email__ = "[email protected]"
"""
Description: This is batch job for transforming data to DESeq input
"""
import pandas as pd
import numpy as np
import os
import glob
import argparse
from libraries.botoClass import botoHandler
from libraries.externalHandler import handlers as dataHandler
## argparse setting
parser = argparse.ArgumentParser(prog='step1_get_DESeq2_input.py')
parser.add_argument('-c','--ctype', type=str, dest='celltype', required=True,\
choices=['CD4','CD8','CD14'],help='Cell type for extraction, default = CD8')
parser.add_argument('-v','--condcolumn', type=str, dest='condcolumn', required=True,\
help='Column name which is using for condition value')
parser.add_argument('-x','--cond1', type=str, dest='cond1', required=True,\
help='condition1 for metadata')
parser.add_argument('-y','--cond2', type=str, dest='cond2', required=True,\
help='condition2 for metadata')
args = parser.parse_args()
# Main function
if __name__ == "__main__":
### Get ENV variables
mainDataBucket = os.environ['mainbucket'] # openkbc-ms-maindata-bucket
metaName = os.environ['metafile'] # EPIC_HCvB_metadata_baseline_updated-share.csv
outputPath = os.environ['efspoint'] # /output/
### Error handling here
### Data prepration
s3 = botoHandler(mainDataBucket) # Call boto3
COUNT_PATH = "/data/" # Main data path
META_PATH = s3.getFile([metaName]) ## This is FIXED parameter
s3.getDirFiles('rsem_counts/', destpath=COUNT_PATH) # Download all count files
filelist = glob.glob(COUNT_PATH+"*-"+args.celltype+".genes.results") # File path
filelist = [os.path.basename(cursor) for cursor in filelist] # Extracting base file name
sampleName = dataHandler.get_samplename(filelist)
result_arr = [] # result array
# sampleName and filelist have same order, and appending to result array
for filename in filelist:
sampleValues = dataHandler.get_column(COUNT_PATH+filename, 'expected_count')
result_arr.append(sampleValues)
result_df = pd.concat(result_arr, axis=1)
result_df.columns = sampleName # Change column name by using sample names
metadata = pd.read_csv(META_PATH) # read meta data
# get meta result
meta_result_df = dataHandler.get_condtionMatrix_by_category(metadata, 'HCVB_ID', args.condcolumn, [args.cond1, args.cond2])
overlapped_samples = list(set(meta_result_df.index.tolist()).intersection(set(result_df.columns.tolist()))) # Overlapped samples
# Extract overlapped samples
meta_result_df = meta_result_df.loc[overlapped_samples]
result_df = result_df[overlapped_samples]
result_df.astype(int).to_csv(outputPath+args.celltype+"_output.csv") # Output
meta_result_df.to_csv(outputPath+args.celltype+"_meta_output.csv") |
py | 1a31dfb3bfd082a0cb9484df30ee8d0f026e844d | from rest_framework.views import *
from apps.core.exceptions import CustomAPIException
from apps.utils.response import simple_response
from apps.core import response_code
##
# 重写异常handler, 满足现有response 格式,
# 方便编码
###
def custom_exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, Http404):
exc = exceptions.NotFound()
elif isinstance(exc, PermissionDenied):
exc = exceptions.PermissionDenied()
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait
if isinstance(exc.detail, (list, dict)):
data = exc.detail
else:
data = {'detail': exc.detail}
set_rollback()
if exc.status_code == 400:
code, message = response_code.ERR_PARAM_ERROR
elif exc.status_code == 401:
code, message = response_code.ERR_AUTH_ERROR
elif exc.status_code == 403:
code, message = response_code.ERR_PERMISSION_ERROR
elif exc.status_code == 404:
code, message = response_code.ERR_NOT_FOUND_ERROR
elif exc.status_code == 500:
code, message = response_code.ERR_SERVER_ERROR
elif exc.status_code == 405:
code, message = response_code.ERR_METHOD_NOT_ALLOWED
else:
code, message = response_code.ERR_UNKNOWN_ERROR
return simple_response(code=code, data=data, message=message, headers=headers)
elif isinstance(exc, CustomAPIException): # 捕获自定义的异常
set_rollback()
return simple_response(code=exc.get_code(), message=exc.get_message(), data=exc.get_data())
return None
|
py | 1a31e1e1a4d05a8b98d0ac3f3f9f31e1d06214ec | """Test nimare.meta.ale (ALE/SCALE meta-analytic algorithms)."""
import logging
import os
import pickle
import nibabel as nib
import numpy as np
import pytest
import nimare
from nimare.correct import FDRCorrector, FWECorrector
from nimare.meta import ale
from nimare.utils import vox2mm
def test_ALE_ma_map_reuse(testdata_cbma, tmp_path_factory, caplog):
"""Test that MA maps are re-used when appropriate."""
from nimare.meta import kernel
tmpdir = tmp_path_factory.mktemp("test_ALE_ma_map_reuse")
testdata_cbma.update_path(tmpdir)
# ALEKernel cannot extract sample_size from a Dataset,
# so we need to set it for this kernel and for the later meta-analyses.
kern = kernel.ALEKernel(sample_size=20)
dset = kern.transform(testdata_cbma, return_type="dataset")
# The associated column should be in the new Dataset's images DataFrame
cols = dset.images.columns.tolist()
assert any(["ALEKernel" in col for col in cols])
# The Dataset without the images will generate them from scratch.
# If drop_invalid is False, then there should be an Exception, since two studies in the test
# dataset are missing coordinates.
meta = ale.ALE(kernel__sample_size=20)
with pytest.raises(Exception):
meta.fit(testdata_cbma, drop_invalid=False)
with caplog.at_level(logging.DEBUG, logger="nimare.meta.cbma.base"):
meta.fit(testdata_cbma)
assert "Loading pre-generated MA maps" not in caplog.text
# The Dataset with the images will re-use them, as evidenced by the logger message.
with caplog.at_level(logging.DEBUG, logger="nimare.meta.cbma.base"):
meta.fit(dset)
assert "Loading pre-generated MA maps" in caplog.text
def test_ALESubtraction_ma_map_reuse(testdata_cbma, tmp_path_factory, caplog):
"""Test that MA maps are re-used when appropriate."""
from nimare.meta import kernel
tmpdir = tmp_path_factory.mktemp("test_ALESubtraction_ma_map_reuse")
testdata_cbma.update_path(tmpdir)
# ALEKernel cannot extract sample_size from a Dataset,
# so we need to set it for this kernel and for the later meta-analyses.
kern = kernel.ALEKernel(sample_size=20)
dset = kern.transform(testdata_cbma, return_type="dataset")
# The Dataset without the images will generate them from scratch.
sub_meta = ale.ALESubtraction(n_iters=10, kernel__sample_size=20)
with caplog.at_level(logging.DEBUG, logger="nimare.meta.cbma.base"):
sub_meta.fit(testdata_cbma, testdata_cbma)
assert "Loading pre-generated MA maps" not in caplog.text
# The Dataset with the images will re-use them,
# as evidenced by the logger message.
with caplog.at_level(logging.DEBUG, logger="nimare.meta.cbma.base"):
sub_meta.fit(dset, dset)
assert "Loading pre-generated MA maps" in caplog.text
def test_ALE_approximate_null_unit(testdata_cbma, tmp_path_factory):
"""Unit test for ALE with approximate null_method."""
tmpdir = tmp_path_factory.mktemp("test_ALE_approximate_null_unit")
out_file = os.path.join(tmpdir, "file.pkl.gz")
meta = ale.ALE(null_method="approximate")
res = meta.fit(testdata_cbma)
assert "stat" in res.maps.keys()
assert "p" in res.maps.keys()
assert "z" in res.maps.keys()
assert isinstance(res, nimare.results.MetaResult)
assert isinstance(res.get_map("z", return_type="image"), nib.Nifti1Image)
assert isinstance(res.get_map("z", return_type="array"), np.ndarray)
res2 = res.copy()
assert res2 != res
assert isinstance(res, nimare.results.MetaResult)
# Test saving/loading
meta.save(out_file, compress=True)
assert os.path.isfile(out_file)
meta2 = ale.ALE.load(out_file, compressed=True)
assert isinstance(meta2, ale.ALE)
with pytest.raises(pickle.UnpicklingError):
ale.ALE.load(out_file, compressed=False)
meta.save(out_file, compress=False)
assert os.path.isfile(out_file)
meta2 = ale.ALE.load(out_file, compressed=False)
assert isinstance(meta2, ale.ALE)
with pytest.raises(OSError):
ale.ALE.load(out_file, compressed=True)
# Test MCC methods
# Monte Carlo FWE
corr = FWECorrector(method="montecarlo", voxel_thresh=0.001, n_iters=5, n_cores=-1)
cres = corr.transform(meta.results)
assert isinstance(cres, nimare.results.MetaResult)
assert "z_desc-size_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys()
assert "z_desc-mass_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys()
assert "z_level-voxel_corr-FWE_method-montecarlo" in cres.maps.keys()
assert "logp_desc-size_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys()
assert "logp_desc-mass_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys()
assert "logp_level-voxel_corr-FWE_method-montecarlo" in cres.maps.keys()
assert isinstance(
cres.get_map("z_desc-size_level-cluster_corr-FWE_method-montecarlo", return_type="image"),
nib.Nifti1Image,
)
assert isinstance(
cres.get_map("z_desc-size_level-cluster_corr-FWE_method-montecarlo", return_type="array"),
np.ndarray,
)
assert isinstance(
cres.get_map("z_desc-mass_level-cluster_corr-FWE_method-montecarlo", return_type="image"),
nib.Nifti1Image,
)
assert isinstance(
cres.get_map("z_desc-mass_level-cluster_corr-FWE_method-montecarlo", return_type="array"),
np.ndarray,
)
# Bonferroni FWE
corr = FWECorrector(method="bonferroni")
cres = corr.transform(res)
assert isinstance(cres, nimare.results.MetaResult)
assert isinstance(
cres.get_map("z_corr-FWE_method-bonferroni", return_type="image"), nib.Nifti1Image
)
assert isinstance(
cres.get_map("z_corr-FWE_method-bonferroni", return_type="array"), np.ndarray
)
# FDR
corr = FDRCorrector(method="indep", alpha=0.05)
cres = corr.transform(meta.results)
assert isinstance(cres, nimare.results.MetaResult)
assert isinstance(
cres.get_map("z_corr-FDR_method-indep", return_type="image"), nib.Nifti1Image
)
assert isinstance(cres.get_map("z_corr-FDR_method-indep", return_type="array"), np.ndarray)
def test_ALE_montecarlo_null_unit(testdata_cbma, tmp_path_factory):
"""Unit test for ALE with an montecarlo null_method.
This test is run with low-memory kernel transformation as well.
"""
tmpdir = tmp_path_factory.mktemp("test_ALE_montecarlo_null_unit")
out_file = os.path.join(tmpdir, "file.pkl.gz")
meta = ale.ALE(null_method="montecarlo", n_iters=10, kernel__memory_limit="1gb")
res = meta.fit(testdata_cbma)
assert "stat" in res.maps.keys()
assert "p" in res.maps.keys()
assert "z" in res.maps.keys()
assert isinstance(res, nimare.results.MetaResult)
assert isinstance(res.get_map("z", return_type="image"), nib.Nifti1Image)
assert isinstance(res.get_map("z", return_type="array"), np.ndarray)
res2 = res.copy()
assert res2 != res
assert isinstance(res, nimare.results.MetaResult)
# Test saving/loading
meta.save(out_file, compress=True)
assert os.path.isfile(out_file)
meta2 = ale.ALE.load(out_file, compressed=True)
assert isinstance(meta2, ale.ALE)
with pytest.raises(pickle.UnpicklingError):
ale.ALE.load(out_file, compressed=False)
meta.save(out_file, compress=False)
assert os.path.isfile(out_file)
meta2 = ale.ALE.load(out_file, compressed=False)
assert isinstance(meta2, ale.ALE)
with pytest.raises(OSError):
ale.ALE.load(out_file, compressed=True)
# Test MCC methods
# Monte Carlo FWE
corr = FWECorrector(method="montecarlo", voxel_thresh=0.001, n_iters=5, n_cores=-1)
cres = corr.transform(meta.results)
assert isinstance(cres, nimare.results.MetaResult)
assert "z_desc-size_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys()
assert "z_desc-mass_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys()
assert "z_level-voxel_corr-FWE_method-montecarlo" in cres.maps.keys()
assert "logp_desc-size_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys()
assert "logp_desc-mass_level-cluster_corr-FWE_method-montecarlo" in cres.maps.keys()
assert "logp_level-voxel_corr-FWE_method-montecarlo" in cres.maps.keys()
assert isinstance(
cres.get_map("z_desc-size_level-cluster_corr-FWE_method-montecarlo", return_type="image"),
nib.Nifti1Image,
)
assert isinstance(
cres.get_map("z_desc-size_level-cluster_corr-FWE_method-montecarlo", return_type="array"),
np.ndarray,
)
assert isinstance(
cres.get_map("z_desc-mass_level-cluster_corr-FWE_method-montecarlo", return_type="image"),
nib.Nifti1Image,
)
assert isinstance(
cres.get_map("z_desc-mass_level-cluster_corr-FWE_method-montecarlo", return_type="array"),
np.ndarray,
)
# Bonferroni FWE
corr = FWECorrector(method="bonferroni")
cres = corr.transform(res)
assert isinstance(cres, nimare.results.MetaResult)
assert isinstance(
cres.get_map("z_corr-FWE_method-bonferroni", return_type="image"), nib.Nifti1Image
)
assert isinstance(
cres.get_map("z_corr-FWE_method-bonferroni", return_type="array"), np.ndarray
)
# FDR
corr = FDRCorrector(method="indep", alpha=0.05)
cres = corr.transform(meta.results)
assert isinstance(cres, nimare.results.MetaResult)
assert isinstance(
cres.get_map("z_corr-FDR_method-indep", return_type="image"), nib.Nifti1Image
)
assert isinstance(cres.get_map("z_corr-FDR_method-indep", return_type="array"), np.ndarray)
def test_ALESubtraction_smoke(testdata_cbma, tmp_path_factory):
"""Smoke test for ALESubtraction."""
tmpdir = tmp_path_factory.mktemp("test_ALESubtraction_smoke")
out_file = os.path.join(tmpdir, "file.pkl.gz")
sub_meta = ale.ALESubtraction(n_iters=10, memory_limit=None)
sub_meta.fit(testdata_cbma, testdata_cbma)
assert isinstance(sub_meta.results, nimare.results.MetaResult)
assert "z_desc-group1MinusGroup2" in sub_meta.results.maps.keys()
assert isinstance(
sub_meta.results.get_map("z_desc-group1MinusGroup2", return_type="image"), nib.Nifti1Image
)
assert isinstance(
sub_meta.results.get_map("z_desc-group1MinusGroup2", return_type="array"), np.ndarray
)
sub_meta.save(out_file)
assert os.path.isfile(out_file)
def test_ALESubtraction_smoke_lowmem(testdata_cbma, tmp_path_factory):
"""Smoke test for ALESubtraction with low memory settings."""
tmpdir = tmp_path_factory.mktemp("test_ALESubtraction_smoke_lowmem")
out_file = os.path.join(tmpdir, "file.pkl.gz")
sub_meta = ale.ALESubtraction(n_iters=10, memory_limit="1gb")
sub_meta.fit(testdata_cbma, testdata_cbma)
assert isinstance(sub_meta.results, nimare.results.MetaResult)
assert "z_desc-group1MinusGroup2" in sub_meta.results.maps.keys()
assert isinstance(
sub_meta.results.get_map("z_desc-group1MinusGroup2", return_type="image"), nib.Nifti1Image
)
assert isinstance(
sub_meta.results.get_map("z_desc-group1MinusGroup2", return_type="array"), np.ndarray
)
sub_meta.save(out_file)
assert os.path.isfile(out_file)
def test_SCALE_smoke(testdata_cbma):
"""Smoke test for SCALE."""
dset = testdata_cbma.slice(testdata_cbma.ids[:3])
xyz = vox2mm(
np.vstack(np.where(testdata_cbma.masker.mask_img.get_fdata())).T,
testdata_cbma.masker.mask_img.affine,
)
xyz = xyz[:, :20]
meta = ale.SCALE(n_iters=5, n_cores=1, xyz=xyz)
res = meta.fit(dset)
assert isinstance(res, nimare.results.MetaResult)
assert "z" in res.maps.keys()
assert isinstance(res.get_map("z", return_type="image"), nib.Nifti1Image)
assert isinstance(res.get_map("z", return_type="array"), np.ndarray)
def test_SCALE_smoke_lowmem(testdata_cbma):
"""Smoke test for SCALE with low memory settings."""
dset = testdata_cbma.slice(testdata_cbma.ids[:3])
xyz = vox2mm(
np.vstack(np.where(testdata_cbma.masker.mask_img.get_fdata())).T,
testdata_cbma.masker.mask_img.affine,
)
xyz = xyz[:, :20]
meta = ale.SCALE(n_iters=5, n_cores=1, xyz=xyz, memory_limit="1gb")
res = meta.fit(dset)
assert isinstance(res, nimare.results.MetaResult)
assert "z" in res.maps.keys()
assert isinstance(res.get_map("z", return_type="image"), nib.Nifti1Image)
assert isinstance(res.get_map("z", return_type="array"), np.ndarray)
|
py | 1a31e53973b050e927c95aa0583367bfe7b15ede | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : inn.
# @File : tensorflow_addons.layers.polynomial.PolynomialCrossing
# @Time : 2020/5/19 10:48 上午
# @Author : yuanjie
# @Email : [email protected]
# @Software : PyCharm
# @Description :
|
py | 1a31e566b97813c54f3052d71e6e4882b43497f6 | from collections import namedtuple
import itertools
import networkx as nx
import numpy as np
from pgmpy.factors.discrete import factor_product
from pgmpy.inference import Inference
from pgmpy.models import BayesianModel, MarkovChain, MarkovModel
from pgmpy.utils.mathext import sample_discrete
from pgmpy.extern.six.moves import map, range
from pgmpy.sampling import _return_samples
State = namedtuple('State', ['var', 'state'])
class BayesianModelSampling(Inference):
"""
Class for sampling methods specific to Bayesian Models
Parameters
----------
model: instance of BayesianModel
model on which inference queries will be computed
Public Methods
--------------
forward_sample(size)
"""
def __init__(self, model):
if not isinstance(model, BayesianModel):
raise TypeError("Model expected type: BayesianModel, got type: ", type(model))
self.topological_order = list(nx.topological_sort(model))
super(BayesianModelSampling, self).__init__(model)
def forward_sample(self, size=1, return_type='dataframe'):
"""
Generates sample(s) from joint distribution of the bayesian network.
Parameters
----------
size: int
size of sample to be generated
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples
Examples
--------
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> inference.forward_sample(size=2, return_type='recarray')
rec.array([(0, 0, 1), (1, 0, 2)],
dtype=[('diff', '<i8'), ('intel', '<i8'), ('grade', '<i8')])
"""
types = [(var_name, 'int') for var_name in self.topological_order]
sampled = np.zeros(size, dtype=types).view(np.recarray)
for node in self.topological_order:
cpd = self.model.get_cpds(node)
states = range(self.cardinality[node])
evidence = cpd.variables[:0:-1]
if evidence:
cached_values = self.pre_compute_reduce(variable=node)
evidence = np.vstack([sampled[i] for i in evidence])
weights = list(map(lambda t: cached_values[tuple(t)], evidence.T))
else:
weights = cpd.values
sampled[node] = sample_discrete(states, weights, size)
return _return_samples(return_type, sampled)
def pre_compute_reduce(self, variable):
variable_cpd = self.model.get_cpds(variable)
variable_evid = variable_cpd.variables[:0:-1]
cached_values = {}
for state_combination in itertools.product(*[range(self.cardinality[var]) for var in variable_evid]):
states = list(zip(variable_evid, state_combination))
cached_values[state_combination] = variable_cpd.reduce(states, inplace=False).values
return cached_values
def rejection_sample(self, evidence=None, size=1, return_type="dataframe"):
"""
Generates sample(s) from joint distribution of the bayesian network,
given the evidence.
Parameters
----------
evidence: list of `pgmpy.factor.State` namedtuples
None if no evidence
size: int
size of sample to be generated
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples
Examples
--------
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.factors.discrete import State
>>> from pgmpy.sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> evidence = [State(var='diff', state=0)]
>>> inference.rejection_sample(evidence=evidence, size=2, return_type='dataframe')
intel diff grade
0 0 0 1
1 0 0 1
"""
if evidence is None:
return self.forward_sample(size)
types = [(var_name, 'int') for var_name in self.topological_order]
sampled = np.zeros(0, dtype=types).view(np.recarray)
prob = 1
i = 0
while i < size:
_size = int(((size - i) / prob) * 1.5)
_sampled = self.forward_sample(_size, 'recarray')
for evid in evidence:
_sampled = _sampled[_sampled[evid[0]] == evid[1]]
prob = max(len(_sampled) / _size, 0.01)
sampled = np.append(sampled, _sampled)[:size]
i += len(_sampled)
return _return_samples(return_type, sampled)
def likelihood_weighted_sample(self, evidence=None, size=1, return_type="dataframe"):
"""
Generates weighted sample(s) from joint distribution of the bayesian
network, that comply with the given evidence.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Algorithm 12.2 pp 493.
Parameters
----------
evidence: list of `pgmpy.factor.State` namedtuples
None if no evidence
size: int
size of sample to be generated
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples with corresponding weights
Examples
--------
>>> from pgmpy.factors.discrete import State
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> evidence = [State('diff', 0)]
>>> inference.likelihood_weighted_sample(evidence=evidence, size=2, return_type='recarray')
rec.array([(0, 0, 1, 0.6), (0, 0, 2, 0.6)],
dtype=[('diff', '<i8'), ('intel', '<i8'), ('grade', '<i8'), ('_weight', '<f8')])
"""
types = [(var_name, 'int') for var_name in self.topological_order]
types.append(('_weight', 'float'))
sampled = np.zeros(size, dtype=types).view(np.recarray)
sampled['_weight'] = np.ones(size)
evidence_dict = {var: st for var, st in evidence}
for node in self.topological_order:
cpd = self.model.get_cpds(node)
states = range(self.cardinality[node])
evidence = cpd.get_evidence()
if evidence:
evidence_values = np.vstack([sampled[i] for i in evidence])
cached_values = self.pre_compute_reduce(node)
weights = list(map(lambda t: cached_values[tuple(t)], evidence_values.T))
if node in evidence_dict:
sampled[node] = evidence_dict[node]
for i in range(size):
sampled['_weight'][i] *= weights[i][evidence_dict[node]]
else:
sampled[node] = sample_discrete(states, weights)
else:
if node in evidence_dict:
sampled[node] = evidence_dict[node]
for i in range(size):
sampled['_weight'][i] *= cpd.values[evidence_dict[node]]
else:
sampled[node] = sample_discrete(states, cpd.values, size)
return _return_samples(return_type, sampled)
class GibbsSampling(MarkovChain):
"""
Class for performing Gibbs sampling.
Parameters:
-----------
model: BayesianModel or MarkovModel
Model from which variables are inherited and transition probabilites computed.
Public Methods:
---------------
set_start_state(state)
sample(start_state, size)
generate_sample(start_state, size)
Examples:
---------
Initialization from a BayesianModel object:
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import BayesianModel
>>> intel_cpd = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> sat_cpd = TabularCPD('sat', 2, [[0.95, 0.2], [0.05, 0.8]], evidence=['intel'], evidence_card=[2])
>>> student = BayesianModel()
>>> student.add_nodes_from(['intel', 'sat'])
>>> student.add_edge('intel', 'sat')
>>> student.add_cpds(intel_cpd, sat_cpd)
>>> from pgmpy.inference import GibbsSampling
>>> gibbs_chain = GibbsSampling(student)
Sample from it:
>>> gibbs_chain.sample(size=3)
intel sat
0 0 0
1 0 0
2 1 1
"""
def __init__(self, model=None):
super(GibbsSampling, self).__init__()
if isinstance(model, BayesianModel):
self._get_kernel_from_bayesian_model(model)
elif isinstance(model, MarkovModel):
self._get_kernel_from_markov_model(model)
def _get_kernel_from_bayesian_model(self, model):
"""
Computes the Gibbs transition models from a Bayesian Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: BayesianModel
The model from which probabilities will be computed.
"""
self.variables = np.array(model.nodes())
self.cardinalities = {var: model.get_cpds(var).variable_card for var in self.variables}
for var in self.variables:
other_vars = [v for v in self.variables if var != v]
other_cards = [self.cardinalities[v] for v in other_vars]
cpds = [cpd for cpd in model.cpds if var in cpd.scope()]
prod_cpd = factor_product(*cpds)
kernel = {}
scope = set(prod_cpd.scope())
for tup in itertools.product(*[range(card) for card in other_cards]):
states = [State(v, s) for v, s in zip(other_vars, tup) if v in scope]
prod_cpd_reduced = prod_cpd.reduce(states, inplace=False)
kernel[tup] = prod_cpd_reduced.values / sum(prod_cpd_reduced.values)
self.transition_models[var] = kernel
def _get_kernel_from_markov_model(self, model):
"""
Computes the Gibbs transition models from a Markov Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: MarkovModel
The model from which probabilities will be computed.
"""
self.variables = np.array(model.nodes())
factors_dict = {var: [] for var in self.variables}
for factor in model.get_factors():
for var in factor.scope():
factors_dict[var].append(factor)
# Take factor product
factors_dict = {var: factor_product(*factors) if len(factors) > 1 else factors[0]
for var, factors in factors_dict.items()}
self.cardinalities = {var: factors_dict[var].get_cardinality([var])[var] for var in self.variables}
for var in self.variables:
other_vars = [v for v in self.variables if var != v]
other_cards = [self.cardinalities[v] for v in other_vars]
kernel = {}
factor = factors_dict[var]
scope = set(factor.scope())
for tup in itertools.product(*[range(card) for card in other_cards]):
states = [State(var, s) for var, s in zip(other_vars, tup) if var in scope]
reduced_factor = factor.reduce(states, inplace=False)
kernel[tup] = reduced_factor.values / sum(reduced_factor.values)
self.transition_models[var] = kernel
def sample(self, start_state=None, size=1, return_type="dataframe"):
"""
Sample from the Markov Chain.
Parameters:
-----------
start_state: dict or array-like iterable
Representing the starting states of the variables. If None is passed, a random start_state is chosen.
size: int
Number of samples to be generated.
return_type: string (dataframe | recarray)
Return type for samples, either of 'dataframe' or 'recarray'.
Defaults to 'dataframe'
Returns
-------
sampled: A pandas.DataFrame or a numpy.recarray object depending upon return_type argument
the generated samples
Examples:
---------
>>> from pgmpy.factors import DiscreteFactor
>>> from pgmpy.inference import GibbsSampling
>>> from pgmpy.models import MarkovModel
>>> model = MarkovModel([('A', 'B'), ('C', 'B')])
>>> factor_ab = DiscreteFactor(['A', 'B'], [2, 2], [1, 2, 3, 4])
>>> factor_cb = DiscreteFactor(['C', 'B'], [2, 2], [5, 6, 7, 8])
>>> model.add_factors(factor_ab, factor_cb)
>>> gibbs = GibbsSampling(model)
>>> gibbs.sample(size=4, return_tupe='dataframe')
A B C
0 0 1 1
1 1 0 0
2 1 1 0
3 1 1 1
"""
if start_state is None and self.state is None:
self.state = self.random_state()
elif start_state is not None:
self.set_start_state(start_state)
types = [(var_name, 'int') for var_name in self.variables]
sampled = np.zeros(size, dtype=types).view(np.recarray)
sampled[0] = np.array([st for var, st in self.state])
for i in range(size - 1):
for j, (var, st) in enumerate(self.state):
other_st = tuple(st for v, st in self.state if var != v)
next_st = sample_discrete(list(range(self.cardinalities[var])),
self.transition_models[var][other_st])[0]
self.state[j] = State(var, next_st)
sampled[i + 1] = np.array([st for var, st in self.state])
return _return_samples(return_type, sampled)
def generate_sample(self, start_state=None, size=1):
"""
Generator version of self.sample
Return Type:
------------
List of State namedtuples, representing the assignment to all variables of the model.
Examples:
---------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.sampling import GibbsSampling
>>> from pgmpy.models import MarkovModel
>>> model = MarkovModel([('A', 'B'), ('C', 'B')])
>>> factor_ab = DiscreteFactor(['A', 'B'], [2, 2], [1, 2, 3, 4])
>>> factor_cb = DiscreteFactor(['C', 'B'], [2, 2], [5, 6, 7, 8])
>>> model.add_factors(factor_ab, factor_cb)
>>> gibbs = GibbsSampling(model)
>>> gen = gibbs.generate_sample(size=2)
>>> [sample for sample in gen]
[[State(var='C', state=1), State(var='B', state=1), State(var='A', state=0)],
[State(var='C', state=0), State(var='B', state=1), State(var='A', state=1)]]
"""
if start_state is None and self.state is None:
self.state = self.random_state()
elif start_state is not None:
self.set_start_state(start_state)
for i in range(size):
for j, (var, st) in enumerate(self.state):
other_st = tuple(st for v, st in self.state if var != v)
next_st = sample_discrete(list(range(self.cardinalities[var])),
self.transition_models[var][other_st])[0]
self.state[j] = State(var, next_st)
yield self.state[:]
|
py | 1a31e5e63bdd4f3aaf00d01a009c485f40b1479c | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapter for CleverHans v3.1.0 to be run in TensorFlow 2.x environment.
The multi-representation adversary experiments are run in TensorFlow 2, but
depend on a version of the CleverHans package which expects TensorFlow 1. This
adapter glues them together by importing needed parts from CleverHans and
assigning their TensorFlow references to `tensorflow.compat.v1`.
"""
# pylint: disable=g-bad-import-order
import tensorflow as tf
import tensorflow.compat.v1 as tfv1
# Expose the symbols used in function interfaces. This has to be done before
# actually importing CleverHans.
tf.GraphKeys = tfv1.GraphKeys
# pylint: disable=g-import-not-at-top
from cleverhans import compat
from cleverhans import utils_tf
from cleverhans.attacks import sparse_l1_descent
# pylint: enable=g-import-not-at-top
# pylint: enable=g-bad-import-order
# Bind the expected TensorFlow version.
compat.tf = tfv1
utils_tf.tf = tfv1
sparse_l1_descent.tf = tfv1
|
py | 1a31e65401ca1f5f8c1d47a11d303927f061d3e0 | """
=============================================
:mod:`archivers` -- Solution archival methods
=============================================
This module provides pre-defined archivers for evoluationary computations.
All archiver functions have the following arguments:
- *random* -- the random number generator object
- *population* -- the population of individuals
- *archive* -- the current archive of individuals
- *args* -- a dictionary of keyword arguments
Each archiver function returns the updated archive.
.. note::
The *population* is really a shallow copy of the actual population of
the evolutionary computation. This means that any activities like
sorting will not affect the actual population.
.. Copyright 2012 Aaron Garrett
.. Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
.. The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
.. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
.. module:: archivers
.. moduleauthor:: Aaron Garrett <[email protected]>
"""
import math
def default_archiver(random, population, archive, args):
"""Do nothing.
This function just returns the existing archive (which is
probably empty) with no changes.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
archive -- the current archive of individuals
args -- a dictionary of keyword arguments
"""
return archive
def population_archiver(random, population, archive, args):
"""Archive the current population.
This function replaces the archive with the individuals
of the current population.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
archive -- the current archive of individuals
args -- a dictionary of keyword arguments
"""
new_archive = []
for ind in population:
new_archive.append(ind)
return new_archive
def best_archiver(random, population, archive, args):
"""Archive only the best individual(s).
This function archives the best solutions and removes inferior ones.
If the comparison operators have been overloaded to define Pareto
preference (as in the ``Pareto`` class), then this archiver will form
a Pareto archive.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
archive -- the current archive of individuals
args -- a dictionary of keyword arguments
"""
new_archive = archive
for ind in population:
if len(new_archive) == 0:
new_archive.append(ind)
else:
should_remove = []
should_add = True
for a in new_archive:
if ind.candidate == a.candidate:
should_add = False
break
elif ind < a:
should_add = False
elif ind > a:
should_remove.append(a)
for r in should_remove:
new_archive.remove(r)
if should_add:
new_archive.append(ind)
return new_archive
def adaptive_grid_archiver(random, population, archive, args):
"""Archive only the best individual(s) using a fixed size grid.
This function archives the best solutions by using a fixed-size grid
to determine which existing solutions should be removed in order to
make room for new ones. This archiver is designed specifically for
use with the Pareto Archived Evolution Strategy (PAES).
.. Arguments:
random -- the random number generator object
population -- the population of individuals
archive -- the current archive of individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *max_archive_size* -- the maximum number of individuals in the archive
(default len(population))
- *num_grid_divisions* -- the number of grid divisions (default 1)
"""
def get_grid_location(fitness, num_grid_divisions, global_smallest, global_largest):
loc = 0
n = 1
num_objectives = len(fitness)
inc = [0 for _ in range(num_objectives)]
width = [0 for _ in range(num_objectives)]
local_smallest = global_smallest[:]
for i, f in enumerate(fitness):
if f < local_smallest[i] or f > local_smallest[i] + global_largest[i] - global_smallest[i]:
return -1
for i in range(num_objectives):
inc[i] = n
n *= 2
width[i] = global_largest[i] - global_smallest[i]
for d in range(num_grid_divisions):
for i, f in enumerate(fitness):
if f < width[i] / 2.0 + local_smallest[i]:
loc += inc[i]
else:
local_smallest[i] += width[i] / 2.0
for i in range(num_objectives):
inc[i] *= num_objectives * 2
width[i] /= 2.0
return loc
def update_grid(individual, archive, num_grid_divisions, global_smallest, global_largest, grid_population):
if len(archive) == 0:
num_objectives = len(individual.fitness)
smallest = [individual.fitness[o] for o in range(num_objectives)]
largest = [individual.fitness[o] for o in range(num_objectives)]
else:
num_objectives = min(min([len(a.fitness) for a in archive]), len(individual.fitness))
smallest = [min(min([a.fitness[o] for a in archive]), individual.fitness[o]) for o in range(num_objectives)]
largest = [max(max([a.fitness[o] for a in archive]), individual.fitness[o]) for o in range(num_objectives)]
for i in range(num_objectives):
global_smallest[i] = smallest[i] - abs(0.2 * smallest[i])
global_largest[i] = largest[i] + abs(0.2 * largest[i])
for i in range(len(grid_population)):
grid_population[i] = 0
for a in archive:
loc = get_grid_location(a.fitness, num_grid_divisions, global_smallest, global_largest)
a.grid_location = loc
grid_population[loc] += 1
loc = get_grid_location(individual.fitness, num_grid_divisions, global_smallest, global_largest)
individual.grid_location = loc
grid_population[loc] += 1
max_archive_size = args.setdefault('max_archive_size', len(population))
num_grid_divisions = args.setdefault('num_grid_divisions', 1)
if not 'grid_population' in dir(adaptive_grid_archiver):
adaptive_grid_archiver.grid_population = [0 for _ in range(2**(min([len(p.fitness) for p in population]) * num_grid_divisions))]
if not 'global_smallest' in dir(adaptive_grid_archiver):
adaptive_grid_archiver.global_smallest = [0 for _ in range(min([len(p.fitness) for p in population]))]
if not 'global_largest' in dir(adaptive_grid_archiver):
adaptive_grid_archiver.global_largest = [0 for _ in range(min([len(p.fitness) for p in population]))]
new_archive = archive
for ind in population:
update_grid(ind, new_archive, num_grid_divisions, adaptive_grid_archiver.global_smallest,
adaptive_grid_archiver.global_largest, adaptive_grid_archiver.grid_population)
should_be_added = True
for a in new_archive:
if ind == a or a > ind:
should_be_added = False
if should_be_added:
if len(new_archive) == 0:
new_archive.append(ind)
else:
join = False
nondominated = True
removal_set = []
for i, a in enumerate(new_archive):
if ind > a and not join:
new_archive[i] = ind
join = True
elif ind > a:
if not a in removal_set:
removal_set.append(a)
# Otherwise, the individual is nondominated against this archive member.
# We can't use set difference because Individual objects are not hashable.
# We'd like to say...
# new_archive = list(set(new_archive) - set(removal_set))
# So this code gets that same result without using sets.
temp_archive = []
for ind in new_archive:
if ind not in removal_set:
temp_archive.append(ind)
new_archive = temp_archive
if not join and nondominated:
if len(new_archive) == max_archive_size:
replaced_index = 0
found_replacement = False
loc = get_grid_location(ind.fitness, num_grid_divisions,
adaptive_grid_archiver.global_smallest,
adaptive_grid_archiver.global_largest)
ind.grid_location = loc
if ind.grid_location >= 0:
most = adaptive_grid_archiver.grid_population[ind.grid_location]
else:
most = -1
for i, a in enumerate(new_archive):
pop_at_a = adaptive_grid_archiver.grid_population[a.grid_location]
if pop_at_a > most:
most = pop_at_a
replaced_index = i
found_replacement = True
if found_replacement:
new_archive[replaced_index] = ind
else:
new_archive.append(ind)
return new_archive
|
py | 1a31e7fc7725d3c3909f4800ceabd2a0298f3351 | import pandas as pd
import numpy as np
if __name__ == '__main__':
df = pd.read_csv(snakemake.input[0], sep="\t", index_col=0)
mc_df = pd.read_csv(snakemake.input[1], sep="\t", index_col=0)
# sum the rows of the mutation count matrix to get the number of mutations per sample
n_mutations = mc_df.sum(axis=1)
# df is samples-by-signatures
# n_mutations is vector of length samples
df = df.transpose().multiply(n_mutations).transpose()
#df.index = [i-1 for i in df.index]
df.to_csv(snakemake.output[0], sep="\t")
|
py | 1a31e922c8c01a89d3b332f4e233c694f44ee074 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A setuptools based setup module.
See:
https://packaging.python.org/tutorials/packaging-projects/
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
__version__ = "1.0.6"
description = "Analysis Correlation Engine (ACE) API Python Bindings."
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ace_api',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
# The project's main homepage.
url='https://github.com/IntegralDefense/ACE/_api_package',
# Author details
author='John Davison',
author_email='[email protected]',
# Choose your license
license='Apache-2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
"Intended Audience :: Information Technology",
"Intended Audience :: Telecommunications Industry",
'Operating System :: OS Independent',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
#'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='Cyber Security,Information Security,InfoSec,Detection,Response,SOAR',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=[],
include_package_data=True,
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
py_modules=["ace_api"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['tzlocal', 'requests', 'pytz'],
entry_points={
'console_scripts': ['ace_api=ace_api:main'],
}
)
|
py | 1a31e92ae87ea87b6d3099e9dd4b3fba1cbdd48d | """
batch 模块实现了 fastNLP 所需的 :class:`~fastNLP.core.batch.DataSetIter` 类。
"""
__all__ = [
"BatchIter",
"DataSetIter",
"TorchLoaderIter",
]
import atexit
from numbers import Number
import numpy as np
import torch
import torch.utils.data
from ._logger import logger
from .dataset import DataSet
from .sampler import SequentialSampler
_python_is_exit = False
def _set_python_is_exit():
global _python_is_exit
_python_is_exit = True
atexit.register(_set_python_is_exit)
class DataSetGetter:
def __init__(self, dataset: DataSet, as_numpy=False):
self.dataset = dataset
self.inputs = {n: f for n, f in dataset.get_all_fields().items() if f.is_input}
self.targets = {n: f for n, f in dataset.get_all_fields().items() if f.is_target}
self.as_numpy = as_numpy
self.idx_list = list(range(len(dataset)))
def __getitem__(self, idx: int):
# mapping idx to sampled idx
idx = self.idx_list[idx]
inputs = {n:f.get(idx) for n, f in self.inputs.items()}
targets = {n:f.get(idx) for n, f in self.targets.items()}
return idx, inputs, targets
def __len__(self):
return len(self.dataset)
def collate_fn(self, batch: list):
"""
:param batch: [[idx1, x_dict1, y_dict1], [idx2, x_dict2, y_dict2], [xx, xx, xx]]
:return:
"""
# TODO 支持在DataSet中定义collate_fn,因为有时候可能需要不同的field之间融合,比如BERT的场景
batch_x = {n:[] for n in self.inputs.keys()}
batch_y = {n:[] for n in self.targets.keys()}
indices = []
for idx, x, y in batch:
indices.append(idx)
for n, v in x.items():
batch_x[n].append(v)
for n, v in y.items():
batch_y[n].append(v)
def pad_batch(batch_dict, field_array):
for n, vlist in batch_dict.items():
f = field_array[n]
if f.padder is None:
batch_dict[n] = np.array(vlist)
else:
data = f.pad(vlist)
if not self.as_numpy:
try:
data, flag = _to_tensor(data, f.dtype)
except TypeError as e:
logger.error(f"Field {n} cannot be converted to torch.tensor.")
raise e
batch_dict[n] = data
return batch_dict
return (indices,
pad_batch(batch_x, self.inputs),
pad_batch(batch_y, self.targets))
def set_idx_list(self, idx_list):
if len(idx_list) != len(self.idx_list):
raise ValueError
self.idx_list = idx_list
def __getattr__(self, item):
if hasattr(self.dataset, item):
return getattr(self.dataset, item)
else:
raise AttributeError("'DataSetGetter' object has no attribute '{}'".format(item))
class SamplerAdapter(torch.utils.data.Sampler):
def __init__(self, sampler, dataset):
super().__init__(dataset)
self.sampler = sampler
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __iter__(self):
return iter(self.sampler(self.dataset))
class BatchIter:
def __init__(self):
self.dataiter = None
self.num_batches = None
self.cur_batch_indices = None
self.batch_size = None
def init_iter(self):
pass
@staticmethod
def get_num_batches(num_samples, batch_size, drop_last):
num_batches = num_samples // batch_size
if not drop_last and (num_samples % batch_size > 0):
num_batches += 1
return num_batches
def __iter__(self):
self.init_iter()
for indices, batch_x, batch_y in self.dataiter:
self.cur_batch_indices = indices
yield batch_x, batch_y
def get_batch_indices(self):
return self.cur_batch_indices
def __len__(self):
return self.num_batches
@property
def dataset(self):
return self.dataiter.dataset
class DataSetIter(BatchIter):
"""
DataSetIter 用于从 `DataSet` 中按一定的顺序, 依次按 ``batch_size`` 的大小将数据取出,
组成 `x` 和 `y`::
batch = DataSetIter(data_set, batch_size=16, sampler=SequentialSampler())
num_batch = len(batch)
for batch_x, batch_y in batch:
# do stuff ...
"""
def __init__(self, dataset, batch_size=1, sampler=None, as_numpy=False,
num_workers=0, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None):
"""
:param dataset: :class:`~fastNLP.DataSet` 对象, 数据集
:param int batch_size: 取出的batch大小
:param sampler: 规定使用的 :class:`~fastNLP.Sampler` 方式. 若为 ``None`` , 使用 :class:`~fastNLP.SequentialSampler`.
Default: ``None``
:param bool as_numpy: 若为 ``True`` , 输出batch为 numpy.array. 否则为 :class:`torch.Tensor`.
Default: ``False``
:param int num_workers: 使用多少个进程来预处理数据
:param bool pin_memory: 是否将产生的tensor使用pin memory, 可能会加快速度。
:param bool drop_last: 如果最后一个batch没有batch_size这么多sample,就扔掉最后一个
:param timeout:
:param worker_init_fn: 在每个worker启动时调用该函数,会传入一个值,该值是worker的index。
"""
super().__init__()
assert isinstance(dataset, DataSet)
if not isinstance(sampler, torch.utils.data.Sampler):
self.sampler = SamplerAdapter(sampler=sampler or SequentialSampler(), dataset=dataset)
else:
self.sampler = sampler
dataset = DataSetGetter(dataset, as_numpy)
collate_fn = dataset.collate_fn if hasattr(dataset, 'collate_fn') else None
self.dataiter = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, sampler=self.sampler,
collate_fn=collate_fn, num_workers=num_workers,
pin_memory=pin_memory, drop_last=drop_last,
timeout=timeout, worker_init_fn=worker_init_fn)
# 以sampler的数量为准,因为DistributedSampler的时候每个进程上并不是所有的数据都用上了
self.num_batches = self.get_num_batches(len(self.dataiter.sampler), batch_size, drop_last)
self.batch_size = batch_size
class TorchLoaderIter(BatchIter):
def __init__(self, dataset):
super().__init__()
assert isinstance(dataset, torch.utils.data.DataLoader)
self.dataiter = dataset
self.num_batches = self.get_num_batches(len(dataset.sampler), dataset.batch_size, dataset.drop_last)
self.batch_size = dataset.batch_size
def _to_tensor(batch, field_dtype):
"""
:param batch: np.array()
:param field_dtype: 数据类型
:return: batch, flag. 如果传入的数据支持转为tensor,返回的batch就是tensor,且flag为True;如果传入的数据不支持转为tensor,
返回的batch就是原来的数据,且flag为False
"""
try:
if field_dtype is not None and isinstance(field_dtype, type)\
and issubclass(field_dtype, Number) \
and not isinstance(batch, torch.Tensor):
if issubclass(batch.dtype.type, np.floating):
new_batch = torch.as_tensor(batch).float() # 默认使用float32
elif issubclass(batch.dtype.type, np.integer):
new_batch = torch.as_tensor(batch).long() # 复用内存地址,避免复制
else:
new_batch = torch.as_tensor(batch)
return new_batch, True
else:
return batch, False
except Exception as e:
raise e
|
py | 1a31e95cbc821649a8b608324df8035c74893c62 | import sys
from math import pi, sin
from scipy.optimize import bisect, brenth, fsolve
A, B, C = map(int, sys.stdin.readline().split())
def f(t):
return A * t + B * sin(C * t * pi) - 100
def main():
# ans = bisect(f, a, b)
ans = brenth(f, 0, 200)
return ans
if __name__ == "__main__":
ans = main()
print(ans)
|
py | 1a31e963a246bb29a5a0efe961087dbe67770912 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure()
ax = fig.add_subplot(111)
line, = ax.plot(np.random.rand(10))
ax.set_ylim(0, 1)
def update(data):
line.set_ydata(data)
return line,
def data_gen():
while True: yield np.random.rand(10)
ani = animation.FuncAnimation(fig, update, data_gen, interval=100)
plt.show()
|
py | 1a31e9a9534854070fea89c91e8fa646516d28e6 | from .views import *
from django.urls import path
app_name = 'product'
urlpatterns = [
# Category
path('category/', CategoryList.as_view(), name='categoryList'),
path('category/<int:pk>/', CategoryDetail.as_view(), name='categoryDetail'),
# Subject
path('subject/', SubjectList.as_view(), name='subjectList'),
path('subject/<int:pk>/', SubjectDetail.as_view(), name='subjectDetail'),
# Product
path('product/', ProductList.as_view(), name='productList'),
path('product/<int:pk>/', ProductDetail.as_view(), name='productDetail'),
# Nested (Category)
path(
'category/<int:cat>/product/',
CategoryProductList.as_view(),
name='categoryProductList'
),
# Nested (Subject)
path(
'subject/<int:sub>/product/',
SubjectProductList.as_view(),
name='subjectProductList'
),
# Nested (Category Subject)
path(
'category/<int:cat>/subject/<int:sub>/product/',
CategorySubjectProductList.as_view(),
name='categorySubjectProductList'
),
]
|
py | 1a31e9c0c2772365d023afe00aaa105a9e6368ee | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from datetime import datetime
from datetime import timedelta
from datetime import timezone
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class TestClient(unittest.TestCase):
PROJECT = "PROJECT"
PROJECT_PATH = f"projects/{PROJECT}"
LOGGER_NAME = "LOGGER_NAME"
SINK_NAME = "SINK_NAME"
FILTER = "logName:syslog AND severity>=ERROR"
DESTINATION_URI = "faux.googleapis.com/destination"
METRIC_NAME = "metric_name"
FILTER = "logName:syslog AND severity>=ERROR"
DESCRIPTION = "DESCRIPTION"
TIME_FORMAT = '"%Y-%m-%dT%H:%M:%S.%f%z"'
@staticmethod
def _get_target_class():
from google.cloud.logging import Client
return Client
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
from google.cloud._http import ClientInfo
from google.cloud.logging_v2._http import Connection
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
self.assertEqual(client.project, self.PROJECT)
self.assertIsInstance(client._connection, Connection)
self.assertIsInstance(client._connection._client_info, ClientInfo)
def test_ctor_explicit(self):
from google.cloud._http import ClientInfo
from google.cloud.logging_v2._http import Connection
creds = _make_credentials()
client_info = ClientInfo()
client = self._make_one(
project=self.PROJECT, credentials=creds, client_info=client_info
)
self.assertEqual(client.project, self.PROJECT)
self.assertIs(client._client_info, client_info)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._connection._client_info, client_info)
def test_ctor_w_empty_client_options(self):
from google.api_core.client_options import ClientOptions
creds = _make_credentials()
client_options = ClientOptions()
client = self._make_one(
project=self.PROJECT, credentials=creds, client_options=client_options
)
self.assertEqual(
client._connection.API_BASE_URL, client._connection.DEFAULT_API_ENDPOINT
)
def test_ctor_w_client_options_object(self):
from google.api_core.client_options import ClientOptions
creds = _make_credentials()
client_options = ClientOptions(
api_endpoint="https://foo-logging.googleapis.com"
)
client = self._make_one(
project=self.PROJECT, credentials=creds, client_options=client_options
)
self.assertEqual(
client._connection.API_BASE_URL, "https://foo-logging.googleapis.com"
)
def test_ctor_w_client_options_dict(self):
creds = _make_credentials()
client_options = {"api_endpoint": "https://foo-logging.googleapis.com"}
client = self._make_one(
project=self.PROJECT, credentials=creds, client_options=client_options
)
self.assertEqual(
client._connection.API_BASE_URL, "https://foo-logging.googleapis.com"
)
def test_logging_api_wo_gapic(self):
from google.cloud.logging_v2._http import _LoggingAPI
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
conn = client._connection = _Connection()
api = client.logging_api
self.assertIsInstance(api, _LoggingAPI)
self.assertEqual(api.api_request, conn.api_request)
# API instance is cached
again = client.logging_api
self.assertIs(again, api)
def test_logging_api_w_gapic(self):
clients = []
api_obj = object()
def make_api(client_obj):
clients.append(client_obj)
return api_obj
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds, _use_grpc=True)
patch = mock.patch("google.cloud.logging_v2.client._gapic")
with patch as gapic_module:
gapic_module.make_logging_api.side_effect = make_api
api = client.logging_api
self.assertIs(api, api_obj)
self.assertEqual(clients, [client])
# API instance is cached
again = client.logging_api
self.assertIs(again, api)
def test_no_gapic_ctor(self):
from google.cloud.logging_v2._http import _LoggingAPI
creds = _make_credentials()
patch = mock.patch("google.cloud.logging_v2.client._USE_GRPC", new=True)
with patch:
client = self._make_one(
project=self.PROJECT, credentials=creds, _use_grpc=False
)
api = client.logging_api
self.assertIsInstance(api, _LoggingAPI)
def test_sinks_api_wo_gapic(self):
from google.cloud.logging_v2._http import _SinksAPI
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
conn = client._connection = _Connection()
api = client.sinks_api
self.assertIsInstance(api, _SinksAPI)
self.assertEqual(api.api_request, conn.api_request)
# API instance is cached
again = client.sinks_api
self.assertIs(again, api)
def test_sinks_api_w_gapic(self):
clients = []
api_obj = object()
def make_api(client_obj):
clients.append(client_obj)
return api_obj
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds, _use_grpc=True)
patch = mock.patch("google.cloud.logging_v2.client._gapic")
with patch as gapic_module:
gapic_module.make_sinks_api.side_effect = make_api
api = client.sinks_api
self.assertIs(api, api_obj)
self.assertEqual(clients, [client])
# API instance is cached
again = client.sinks_api
self.assertIs(again, api)
def test_metrics_api_wo_gapic(self):
from google.cloud.logging_v2._http import _MetricsAPI
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
conn = client._connection = _Connection()
api = client.metrics_api
self.assertIsInstance(api, _MetricsAPI)
self.assertEqual(api.api_request, conn.api_request)
# API instance is cached
again = client.metrics_api
self.assertIs(again, api)
def test_metrics_api_w_gapic(self):
clients = []
api_obj = object()
def make_api(client_obj):
clients.append(client_obj)
return api_obj
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds, _use_grpc=True)
patch = mock.patch("google.cloud.logging_v2.client._gapic")
with patch as gapic_module:
gapic_module.make_metrics_api.side_effect = make_api
api = client.metrics_api
self.assertIs(api, api_obj)
self.assertEqual(clients, [client])
# API instance is cached
again = client.metrics_api
self.assertIs(again, api)
def test_logger(self):
from google.cloud.logging import Logger
from google.cloud.logging_v2.logger import _GLOBAL_RESOURCE
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
labels = {"test": "true"}
logger = client.logger(
self.LOGGER_NAME, resource=_GLOBAL_RESOURCE, labels=labels
)
self.assertIsInstance(logger, Logger)
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
self.assertEqual(logger.default_resource, _GLOBAL_RESOURCE)
self.assertEqual(logger.labels, labels)
def test_list_entries_defaults(self):
from google.cloud.logging import TextEntry
IID = "IID"
TEXT = "TEXT"
ENTRIES = [
{
"textPayload": TEXT,
"insertId": IID,
"resource": {"type": "global"},
"logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME),
}
]
creds = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=creds, _use_grpc=False
)
returned = {"entries": ENTRIES}
client._connection = _Connection(returned)
iterator = client.list_entries()
entries = list(iterator)
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertIsInstance(entry, TextEntry)
self.assertEqual(entry.insert_id, IID)
self.assertEqual(entry.payload, TEXT)
logger = entry.logger
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
# check call payload
call_payload_no_filter = deepcopy(client._connection._called_with)
call_payload_no_filter["data"]["filter"] = "removed"
self.assertEqual(
call_payload_no_filter,
{
"path": "/entries:list",
"method": "POST",
"data": {
"filter": "removed",
"resourceNames": [f"projects/{self.PROJECT}"],
},
},
)
# verify that default filter is 24 hours
timestamp = datetime.strptime(
client._connection._called_with["data"]["filter"],
"timestamp>=" + self.TIME_FORMAT,
)
yesterday = datetime.now(timezone.utc) - timedelta(days=1)
self.assertLess(yesterday - timestamp, timedelta(minutes=1))
def test_list_entries_explicit(self):
from google.cloud.logging import DESCENDING
from google.cloud.logging import ProtobufEntry
from google.cloud.logging import StructEntry
from google.cloud.logging import Logger
PROJECT1 = "PROJECT1"
PROJECT2 = "PROJECT2"
INPUT_FILTER = "logName:LOGNAME"
IID1 = "IID1"
IID2 = "IID2"
PAYLOAD = {"message": "MESSAGE", "weather": "partly cloudy"}
PROTO_PAYLOAD = PAYLOAD.copy()
PROTO_PAYLOAD["@type"] = "type.googleapis.com/testing.example"
TOKEN = "TOKEN"
PAGE_SIZE = 42
ENTRIES = [
{
"jsonPayload": PAYLOAD,
"insertId": IID1,
"resource": {"type": "global"},
"logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME),
},
{
"protoPayload": PROTO_PAYLOAD,
"insertId": IID2,
"resource": {"type": "global"},
"logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME),
},
{
"protoPayload": "ignored",
"insertId": "ignored",
"resource": {"type": "global"},
"logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME),
},
]
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"entries": ENTRIES}
client._connection = _Connection(returned)
iterator = client.list_entries(
resource_names=[f"projects/{PROJECT1}", f"projects/{PROJECT2}"],
filter_=INPUT_FILTER,
order_by=DESCENDING,
page_size=PAGE_SIZE,
page_token=TOKEN,
max_results=2,
)
entries = list(iterator)
# Check the entries.
self.assertEqual(len(entries), 2)
entry = entries[0]
self.assertIsInstance(entry, StructEntry)
self.assertEqual(entry.insert_id, IID1)
self.assertEqual(entry.payload, PAYLOAD)
logger = entry.logger
self.assertIsInstance(logger, Logger)
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
entry = entries[1]
self.assertIsInstance(entry, ProtobufEntry)
self.assertEqual(entry.insert_id, IID2)
self.assertEqual(entry.payload, PROTO_PAYLOAD)
logger = entry.logger
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
self.assertIs(entries[0].logger, entries[1].logger)
# check call payload
call_payload_no_filter = deepcopy(client._connection._called_with)
call_payload_no_filter["data"]["filter"] = "removed"
self.assertEqual(
call_payload_no_filter,
{
"path": "/entries:list",
"method": "POST",
"data": {
"filter": "removed",
"orderBy": DESCENDING,
"pageSize": PAGE_SIZE,
"pageToken": TOKEN,
"resourceNames": [f"projects/{PROJECT1}", f"projects/{PROJECT2}"],
},
},
)
# verify that default timestamp filter is added
timestamp = datetime.strptime(
client._connection._called_with["data"]["filter"],
INPUT_FILTER + " AND timestamp>=" + self.TIME_FORMAT,
)
yesterday = datetime.now(timezone.utc) - timedelta(days=1)
self.assertLess(yesterday - timestamp, timedelta(minutes=1))
def test_list_entries_explicit_timestamp(self):
from google.cloud.logging import DESCENDING
from google.cloud.logging import ProtobufEntry
from google.cloud.logging import StructEntry
from google.cloud.logging import Logger
PROJECT1 = "PROJECT1"
PROJECT2 = "PROJECT2"
INPUT_FILTER = 'logName:LOGNAME AND timestamp="2020-10-13T21"'
IID1 = "IID1"
IID2 = "IID2"
PAYLOAD = {"message": "MESSAGE", "weather": "partly cloudy"}
PROTO_PAYLOAD = PAYLOAD.copy()
PROTO_PAYLOAD["@type"] = "type.googleapis.com/testing.example"
PAGE_SIZE = 42
ENTRIES = [
{
"jsonPayload": PAYLOAD,
"insertId": IID1,
"resource": {"type": "global"},
"logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME),
},
{
"protoPayload": PROTO_PAYLOAD,
"insertId": IID2,
"resource": {"type": "global"},
"logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME),
},
]
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"entries": ENTRIES}
client._connection = _Connection(returned)
iterator = client.list_entries(
resource_names=[f"projects/{PROJECT1}", f"projects/{PROJECT2}"],
filter_=INPUT_FILTER,
order_by=DESCENDING,
page_size=PAGE_SIZE,
)
entries = list(iterator)
# Check the entries.
self.assertEqual(len(entries), 2)
entry = entries[0]
self.assertIsInstance(entry, StructEntry)
self.assertEqual(entry.insert_id, IID1)
self.assertEqual(entry.payload, PAYLOAD)
logger = entry.logger
self.assertIsInstance(logger, Logger)
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
entry = entries[1]
self.assertIsInstance(entry, ProtobufEntry)
self.assertEqual(entry.insert_id, IID2)
self.assertEqual(entry.payload, PROTO_PAYLOAD)
logger = entry.logger
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
self.assertIs(entries[0].logger, entries[1].logger)
# check call payload
# filter should not be changed
self.assertEqual(
client._connection._called_with,
{
"path": "/entries:list",
"method": "POST",
"data": {
"filter": INPUT_FILTER,
"orderBy": DESCENDING,
"pageSize": PAGE_SIZE,
"resourceNames": [f"projects/{PROJECT1}", f"projects/{PROJECT2}"],
},
},
)
def test_sink_defaults(self):
from google.cloud.logging import Sink
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
sink = client.sink(self.SINK_NAME)
self.assertIsInstance(sink, Sink)
self.assertEqual(sink.name, self.SINK_NAME)
self.assertIsNone(sink.filter_)
self.assertIsNone(sink.destination)
self.assertIs(sink.client, client)
self.assertEqual(sink.parent, self.PROJECT_PATH)
def test_sink_explicit(self):
from google.cloud.logging import Sink
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
sink = client.sink(
self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI
)
self.assertIsInstance(sink, Sink)
self.assertEqual(sink.name, self.SINK_NAME)
self.assertEqual(sink.filter_, self.FILTER)
self.assertEqual(sink.destination, self.DESTINATION_URI)
self.assertIs(sink.client, client)
self.assertEqual(sink.parent, self.PROJECT_PATH)
def test_list_sinks_no_paging(self):
from google.cloud.logging import Sink
PROJECT = "PROJECT"
SINK_NAME = "sink_name"
FILTER = "logName:syslog AND severity>=ERROR"
SINKS = [
{"name": SINK_NAME, "filter": FILTER, "destination": self.DESTINATION_URI}
]
client = self._make_one(
project=PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"sinks": SINKS}
client._connection = _Connection(returned)
iterator = client.list_sinks()
sinks = list(iterator)
# Check the sinks returned.
self.assertEqual(len(sinks), 1)
sink = sinks[0]
self.assertIsInstance(sink, Sink)
self.assertEqual(sink.name, SINK_NAME)
self.assertEqual(sink.filter_, FILTER)
self.assertEqual(sink.destination, self.DESTINATION_URI)
self.assertIs(sink.client, client)
# Verify the mocked transport.
called_with = client._connection._called_with
path = "/projects/%s/sinks" % (self.PROJECT,)
self.assertEqual(
called_with, {"method": "GET", "path": path, "query_params": {}}
)
def test_list_sinks_with_paging(self):
from google.cloud.logging import Sink
PROJECT = "PROJECT"
SINK_NAME = "sink_name"
FILTER = "logName:syslog AND severity>=ERROR"
TOKEN = "TOKEN"
PAGE_SIZE = 42
SINKS = [
{"name": SINK_NAME, "filter": FILTER, "destination": self.DESTINATION_URI},
{"name": "test", "filter": "test", "destination": "test"},
]
client = self._make_one(
project=PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"sinks": SINKS}
client._connection = _Connection(returned)
iterator = client.list_sinks(
page_size=PAGE_SIZE, page_token=TOKEN, max_results=1
)
sinks = list(iterator)
# Check the sinks returned.
self.assertEqual(len(sinks), 1)
sink = sinks[0]
self.assertIsInstance(sink, Sink)
self.assertEqual(sink.name, SINK_NAME)
self.assertEqual(sink.filter_, FILTER)
self.assertEqual(sink.destination, self.DESTINATION_URI)
self.assertIs(sink.client, client)
# Verify the mocked transport.
called_with = client._connection._called_with
path = "/projects/%s/sinks" % (self.PROJECT,)
self.assertEqual(
called_with,
{
"method": "GET",
"path": path,
"query_params": {"pageSize": PAGE_SIZE, "pageToken": TOKEN},
},
)
def test_metric_defaults(self):
from google.cloud.logging import Metric
creds = _make_credentials()
client_obj = self._make_one(project=self.PROJECT, credentials=creds)
metric = client_obj.metric(self.METRIC_NAME)
self.assertIsInstance(metric, Metric)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertIsNone(metric.filter_)
self.assertEqual(metric.description, "")
self.assertIs(metric.client, client_obj)
self.assertEqual(metric.project, self.PROJECT)
def test_metric_explicit(self):
from google.cloud.logging import Metric
creds = _make_credentials()
client_obj = self._make_one(project=self.PROJECT, credentials=creds)
metric = client_obj.metric(
self.METRIC_NAME, filter_=self.FILTER, description=self.DESCRIPTION
)
self.assertIsInstance(metric, Metric)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertEqual(metric.filter_, self.FILTER)
self.assertEqual(metric.description, self.DESCRIPTION)
self.assertIs(metric.client, client_obj)
self.assertEqual(metric.project, self.PROJECT)
def test_list_metrics_no_paging(self):
from google.cloud.logging import Metric
metrics = [
{
"name": self.METRIC_NAME,
"filter": self.FILTER,
"description": self.DESCRIPTION,
}
]
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"metrics": metrics}
client._connection = _Connection(returned)
# Execute request.
iterator = client.list_metrics()
metrics = list(iterator)
# Check the metrics returned.
self.assertEqual(len(metrics), 1)
metric = metrics[0]
self.assertIsInstance(metric, Metric)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertEqual(metric.filter_, self.FILTER)
self.assertEqual(metric.description, self.DESCRIPTION)
self.assertIs(metric.client, client)
# Verify mocked transport.
called_with = client._connection._called_with
path = "/projects/%s/metrics" % (self.PROJECT,)
self.assertEqual(
called_with, {"method": "GET", "path": path, "query_params": {}}
)
def test_list_metrics_with_paging(self):
from google.cloud.logging import Metric
token = "TOKEN"
page_size = 42
metrics = [
{
"name": self.METRIC_NAME,
"filter": self.FILTER,
"description": self.DESCRIPTION,
},
{"name": "test", "filter": "test", "description": "test"},
]
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"metrics": metrics}
client._connection = _Connection(returned)
# Execute request.
iterator = client.list_metrics(
page_size=page_size, page_token=token, max_results=1
)
metrics = list(iterator)
# Check the metrics returned.
self.assertEqual(len(metrics), 1)
metric = metrics[0]
self.assertIsInstance(metric, Metric)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertEqual(metric.filter_, self.FILTER)
self.assertEqual(metric.description, self.DESCRIPTION)
self.assertIs(metric.client, client)
# Verify mocked transport.
called_with = client._connection._called_with
path = "/projects/%s/metrics" % (self.PROJECT,)
self.assertEqual(
called_with,
{
"method": "GET",
"path": path,
"query_params": {"pageSize": page_size, "pageToken": token},
},
)
def test_get_default_handler_app_engine(self):
import os
from google.cloud._testing import _Monkey
from google.cloud.logging_v2.handlers._monitored_resources import _GAE_ENV_VARS
from google.cloud.logging.handlers import CloudLoggingHandler
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, _use_grpc=False
)
gae_env_vars = {var: "TRUE" for var in _GAE_ENV_VARS}
with _Monkey(os, environ=gae_env_vars):
handler = client.get_default_handler()
handler.transport.worker.stop()
self.assertIsInstance(handler, CloudLoggingHandler)
def test_get_default_handler_container_engine(self):
from google.cloud.logging.handlers import StructuredLogHandler
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, _use_grpc=False
)
patch = mock.patch(
"google.cloud.logging_v2.handlers._monitored_resources.retrieve_metadata_server",
return_value="test-gke-cluster",
)
with patch:
handler = client.get_default_handler()
self.assertIsInstance(handler, StructuredLogHandler)
def test_get_default_handler_general(self):
import io
from google.cloud.logging.handlers import CloudLoggingHandler
from google.cloud.logging import Resource
name = "test-logger"
resource = Resource("resource_type", {"resource_label": "value"})
labels = {"handler_label": "value"}
stream = io.BytesIO()
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, _use_grpc=False
)
handler = client.get_default_handler(
name=name, resource=resource, labels=labels, stream=stream
)
handler.transport.worker.stop()
self.assertIsInstance(handler, CloudLoggingHandler)
self.assertEqual(handler.name, name)
self.assertEqual(handler.resource, resource)
self.assertEqual(handler.labels, labels)
def test_setup_logging(self):
from google.cloud.logging.handlers import CloudLoggingHandler
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, _use_grpc=False
)
with mock.patch("google.cloud.logging_v2.client.setup_logging") as mocked:
client.setup_logging()
self.assertEqual(len(mocked.mock_calls), 1)
_, args, kwargs = mocked.mock_calls[0]
(handler,) = args
self.assertIsInstance(handler, CloudLoggingHandler)
handler.transport.worker.stop()
expected_kwargs = {
"excluded_loggers": (
"google.cloud",
"google.auth",
"google_auth_httplib2",
"google.api_core.bidi",
"werkzeug",
),
"log_level": 20,
}
self.assertEqual(kwargs, expected_kwargs)
def test_setup_logging_w_extra_kwargs(self):
import io
from google.cloud.logging.handlers import CloudLoggingHandler
from google.cloud.logging import Resource
name = "test-logger"
resource = Resource("resource_type", {"resource_label": "value"})
labels = {"handler_label": "value"}
stream = io.BytesIO()
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, _use_grpc=False
)
with mock.patch("google.cloud.logging_v2.client.setup_logging") as mocked:
client.setup_logging(
name=name, resource=resource, labels=labels, stream=stream
)
self.assertEqual(len(mocked.mock_calls), 1)
_, args, kwargs = mocked.mock_calls[0]
(handler,) = args
self.assertIsInstance(handler, CloudLoggingHandler)
self.assertEqual(handler.name, name)
self.assertEqual(handler.resource, resource)
self.assertEqual(handler.labels, labels)
handler.transport.worker.stop()
expected_kwargs = {
"excluded_loggers": (
"google.cloud",
"google.auth",
"google_auth_httplib2",
"google.api_core.bidi",
"werkzeug",
),
"log_level": 20,
}
self.assertEqual(kwargs, expected_kwargs)
class _Connection(object):
_called_with = None
def __init__(self, *responses):
self._responses = responses
def api_request(self, **kw):
self._called_with = kw
response, self._responses = self._responses[0], self._responses[1:]
return response
|
py | 1a31e9ea296018dc2147105beba6415f1b78e013 | import logging
NAME_LOGGER = "datacube_logger"
logger = logging.getLogger(NAME_LOGGER)
DEFAULT_LOGGER_LEVEL = 20
logger.setLevel(DEFAULT_LOGGER_LEVEL)
# create file handler which logs even debug messages
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter(
"%(asctime)-15s - %(file_name)s - [%(levelname)s] - %(message)s",
"%m/%d/%Y %I:%M:%S %p",
)
ch.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.propagate = False
class Logger(logging.Logger):
def __init__(self, name):
self.d = {"file_name": name}
def debug(self, message):
logger.debug(message, extra=self.d)
def info(self, message):
logger.info(message, extra=self.d)
def error(self, message):
logger.error(message, extra=self.d)
def warning(self, message):
logger.warning(message, extra=self.d)
def critical(self, message):
logger.critical(message, extra=self.d)
def non_verbose(self):
logger.setLevel(logging.INFO)
|
py | 1a31ea6a8e8692fc47a156a863adbb8bbfe40d5d | import os
from string import Template
import psycopg2
import argparse
parser = argparse.ArgumentParser()
args_general = parser.add_argument_group(title="General Options")
args_general.add_argument('-t', '--table_name', default='test', help='table to be copied. target table with have _copy as a suffix')
args_general.add_argument('-c', '--columns', default='id', help='list the columns to be converted to bigint eg. col1,col2,col3')
args_general.add_argument('-p', '--primary_key', default='id', help='primary key of the table for which a new sequence is created in the copy table')
args = parser.parse_args()
table_name = args.table_name
if not os.path.exists('generated/{}'.format(table_name)):
os.makedirs('generated/{}'.format(table_name))
pk = args.primary_key
columns = args.columns
raw_convert = columns.split(',')
convert = []
for c in raw_convert:
convert.append('ALTER TABLE {} ALTER COLUMN {} TYPE bigint'.format(table_name+"_copy", c))
def create_clone(table_name, pk, convert):
template = open('templates/clone.sql')
src = Template(template.read())
d = {'source': table_name, 'target': table_name+"_copy", 'pk': pk, 'seq_name': table_name+"_copy"+"_id"+"_seq", 'convert': '\n'.join(convert)}
result = src.substitute(d)
f = open("generated/{0}/{0}_clone.sql".format(table_name), "a")
f.write(result)
f.close
def create_indexes(table_name):
try:
conn = psycopg2.connect("dbname=postgres")
except psycopg2.Error as e:
print("Failed to connect to the database: ", e.pgcode)
template = open('templates/trigger.sql')
src = Template(template.read())
query = "select replace(indexdef,'INDEX', 'INDEX CONCURRENTLY') from pg_indexes where tablename = '{}'".format(table_name)
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
template = open('templates/indexes.sql')
src = Template(template.read())
d = {'indexes': '\n'.join(list(str(row) for row in rows))}
result = src.substitute(d)
f = open("generated/{0}/{0}_indexes.sql".format(table_name), "a")
f.write(result)
f.close
def create_trigger(table_name, pk):
template = open('templates/trigger.sql')
src = Template(template.read())
d = {'source': table_name, 'target': table_name+"_copy", 'pk': pk, 'tname': table_name+"_trig", 'b': "$BODY$"}
result = src.substitute(d)
f = open("generated/{0}/{0}_trig.sql".format(table_name), "a")
f.write(result)
f.close
def grant_acl(table_name):
try:
conn = psycopg2.connect("dbname=postgres")
except psycopg2.Error as e:
print("Failed to connect to the database: ", e.pgcode)
query = "select 'GRANT ' || privilege_type || ' ON ' || table_name || ' TO ' || grantee || ';' from information_schema.role_table_grants where table_name = '{}' and grantee <> grantor;".format(table_name, table_name)
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
template = open('templates/acl.sql')
src = Template(template.read())
d = {'acl': '\n'.join(str(row) for row in rows)}
result = src.substitute(d)
f = open("generated/{0}/{0}_acl.sql".format(table_name), "a")
f.write(result)
f.close
def main():
create_trigger(table_name, pk)
create_clone(table_name, pk, convert)
grant_acl(table_name)
create_indexes(table_name)
if __name__ == "__main__":
main()
|
py | 1a31eb4cf5078840f58a70751017c1f00388d556 | """
weasyprint.tests.test_presentational_hints
------------------------------------------
Test the HTML presentational hints.
"""
from weasyprint import CSS, HTML
from .testing_utils import BASE_URL, assert_no_logs
PH_TESTING_CSS = CSS(string='''
@page {margin: 0; size: 1000px 1000px}
body {margin: 0}
''')
@assert_no_logs
def test_no_ph():
# Test both CSS and non-CSS rules
document = HTML(string='''
<hr size=100 />
<table align=right width=100><td>0</td></table>
''').render(stylesheets=[PH_TESTING_CSS])
page, = document.pages
html, = page._page_box.children
body, = html.children
hr, table = body.children
assert hr.border_height() != 100
assert table.position_x == 0
@assert_no_logs
def test_ph_page():
document = HTML(string='''
<body marginheight=2 topmargin=3 leftmargin=5
bgcolor=red text=blue />
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
assert body.margin_top == 2
assert body.margin_bottom == 2
assert body.margin_left == 5
assert body.margin_right == 0
assert body.style['background_color'] == (1, 0, 0, 1)
assert body.style['color'] == (0, 0, 1, 1)
@assert_no_logs
def test_ph_flow():
document = HTML(string='''
<pre wrap></pre>
<center></center>
<div align=center></div>
<div align=middle></div>
<div align=left></div>
<div align=right></div>
<div align=justify></div>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
pre, center, div1, div2, div3, div4, div5 = body.children
assert pre.style['white_space'] == 'pre-wrap'
assert center.style['text_align'] == 'center'
assert div1.style['text_align'] == 'center'
assert div2.style['text_align'] == 'center'
assert div3.style['text_align'] == 'left'
assert div4.style['text_align'] == 'right'
assert div5.style['text_align'] == 'justify'
@assert_no_logs
def test_ph_phrasing():
document = HTML(string='''
<style>@font-face { src: url(AHEM____.TTF); font-family: ahem }</style>
<br clear=left>
<br clear=right />
<br clear=both />
<br clear=all />
<font color=red face=ahem size=7></font>
<Font size=4></Font>
<font size=+5 ></font>
<font size=-5 ></font>
''', base_url=BASE_URL).render(
stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
line1, line2, line3, line4, line5 = body.children
br1, = line1.children
br2, = line2.children
br3, = line3.children
br4, = line4.children
font1, font2, font3, font4 = line5.children
assert br1.style['clear'] == 'left'
assert br2.style['clear'] == 'right'
assert br3.style['clear'] == 'both'
assert br4.style['clear'] == 'both'
assert font1.style['color'] == (1, 0, 0, 1)
assert font1.style['font_family'] == ('ahem',)
assert font1.style['font_size'] == 1.5 * 2 * 16
assert font2.style['font_size'] == 6 / 5 * 16
assert font3.style['font_size'] == 1.5 * 2 * 16
assert font4.style['font_size'] == 8 / 9 * 16
@assert_no_logs
def test_ph_lists():
document = HTML(string='''
<ol>
<li type=A></li>
<li type=1></li>
<li type=a></li>
<li type=i></li>
<li type=I></li>
</ol>
<ul>
<li type=circle></li>
<li type=disc></li>
<li type=square></li>
</ul>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
ol, ul = body.children
oli1, oli2, oli3, oli4, oli5 = ol.children
uli1, uli2, uli3 = ul.children
assert oli1.style['list_style_type'] == 'upper-alpha'
assert oli2.style['list_style_type'] == 'decimal'
assert oli3.style['list_style_type'] == 'lower-alpha'
assert oli4.style['list_style_type'] == 'lower-roman'
assert oli5.style['list_style_type'] == 'upper-roman'
assert uli1.style['list_style_type'] == 'circle'
assert uli2.style['list_style_type'] == 'disc'
assert uli3.style['list_style_type'] == 'square'
@assert_no_logs
def test_ph_lists_types():
document = HTML(string='''
<ol type=A></ol>
<ol type=1></ol>
<ol type=a></ol>
<ol type=i></ol>
<ol type=I></ol>
<ul type=circle></ul>
<ul type=disc></ul>
<ul type=square></ul>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
ol1, ol2, ol3, ol4, ol5, ul1, ul2, ul3 = body.children
assert ol1.style['list_style_type'] == 'upper-alpha'
assert ol2.style['list_style_type'] == 'decimal'
assert ol3.style['list_style_type'] == 'lower-alpha'
assert ol4.style['list_style_type'] == 'lower-roman'
assert ol5.style['list_style_type'] == 'upper-roman'
assert ul1.style['list_style_type'] == 'circle'
assert ul2.style['list_style_type'] == 'disc'
assert ul3.style['list_style_type'] == 'square'
@assert_no_logs
def test_ph_tables():
document = HTML(string='''
<table align=left rules=none></table>
<table align=right rules=groups></table>
<table align=center rules=rows></table>
<table border=10 cellspacing=3 bordercolor=green>
<thead>
<tr>
<th valign=top></th>
</tr>
</thead>
<tr>
<td nowrap><h1 align=right></h1><p align=center></p></td>
</tr>
<tr>
</tr>
<tfoot align=justify>
<tr>
<td></td>
</tr>
</tfoot>
</table>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
wrapper1, wrapper2, wrapper3, wrapper4, = body.children
assert wrapper1.style['float'] == 'left'
assert wrapper2.style['float'] == 'right'
assert wrapper3.style['margin_left'] == 'auto'
assert wrapper3.style['margin_right'] == 'auto'
assert wrapper1.children[0].style['border_left_style'] == 'hidden'
assert wrapper1.style['border_collapse'] == 'collapse'
assert wrapper2.children[0].style['border_left_style'] == 'hidden'
assert wrapper2.style['border_collapse'] == 'collapse'
assert wrapper3.children[0].style['border_left_style'] == 'hidden'
assert wrapper3.style['border_collapse'] == 'collapse'
table4, = wrapper4.children
assert table4.style['border_top_style'] == 'outset'
assert table4.style['border_top_width'] == 10
assert table4.style['border_spacing'] == (3, 3)
r, g, b, a = table4.style['border_left_color']
assert g > r and g > b
head_group, rows_group, foot_group = table4.children
head, = head_group.children
th, = head.children
assert th.style['vertical_align'] == 'top'
line1, line2 = rows_group.children
td, = line1.children
assert td.style['white_space'] == 'nowrap'
assert td.style['border_top_width'] == 1
assert td.style['border_top_style'] == 'inset'
h1, p = td.children
assert h1.style['text_align'] == 'right'
assert p.style['text_align'] == 'center'
foot, = foot_group.children
tr, = foot.children
assert tr.style['text_align'] == 'justify'
@assert_no_logs
def test_ph_hr():
document = HTML(string='''
<hr align=left>
<hr align=right />
<hr align=both color=red />
<hr align=center noshade size=10 />
<hr align=all size=8 width=100 />
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
hr1, hr2, hr3, hr4, hr5 = body.children
assert hr1.margin_left == 0
assert hr1.style['margin_right'] == 'auto'
assert hr2.style['margin_left'] == 'auto'
assert hr2.margin_right == 0
assert hr3.style['margin_left'] == 'auto'
assert hr3.style['margin_right'] == 'auto'
assert hr3.style['color'] == (1, 0, 0, 1)
assert hr4.style['margin_left'] == 'auto'
assert hr4.style['margin_right'] == 'auto'
assert hr4.border_height() == 10
assert hr4.style['border_top_width'] == 5
assert hr5.border_height() == 8
assert hr5.height == 6
assert hr5.width == 100
assert hr5.style['border_top_width'] == 1
@assert_no_logs
def test_ph_embedded():
document = HTML(string='''
<object data="data:image/svg+xml,<svg></svg>"
align=top hspace=10 vspace=20></object>
<img src="data:image/svg+xml,<svg></svg>" alt=text
align=right width=10 height=20 />
<embed src="data:image/svg+xml,<svg></svg>" align=texttop />
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
line, = body.children
object_, text1, img, embed, text2 = line.children
assert embed.style['vertical_align'] == 'text-top'
assert object_.style['vertical_align'] == 'top'
assert object_.margin_top == 20
assert object_.margin_left == 10
assert img.style['float'] == 'right'
assert img.width == 10
assert img.height == 20
|
py | 1a31eba21ac22c288209a6750bc0aee5b435a5ea |
import subprocess
import sys
import re
import os
import setup_util
from os.path import expanduser
home = expanduser("~")
def start(args, logfile, errfile):
setup_util.replace_text("sinatra/hello_world.rb", ":host => '.*'", ":host => '" + args.database_host + "'")
try:
subprocess.check_call("rvm ruby-2.0.0-p0 do bundle install --gemfile=Gemfile-ruby", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
subprocess.check_call("cp Gemfile-ruby Gemfile", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
subprocess.check_call("cp Gemfile-ruby.lock Gemfile.lock", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
subprocess.check_call("sudo /usr/local/nginx/sbin/nginx -c " + home + "/FrameworkBenchmarks/sinatra/config/nginx.conf", shell=True, stderr=errfile, stdout=logfile)
subprocess.Popen("rvm ruby-2.0.0-p0 do bundle exec unicorn_rails -E production -c config/unicorn.rb", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
subprocess.call("sudo /usr/local/nginx/sbin/nginx -s stop", shell=True, stderr=errfile, stdout=logfile)
try:
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'unicorn' in line and 'master' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
# subprocess.check_call("rvm ruby-2.0.0-p0 do bundle exec passenger stop --pid-file=$HOME/FrameworkBenchmarks/rack/rack.pid", shell=True, cwd='rack')
subprocess.check_call("rm Gemfile", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
subprocess.check_call("rm Gemfile.lock", shell=True, cwd="sinatra", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
|
py | 1a31ebba0ef91e1963d395ddbc3c656387a12964 | # Copyright (c) 2020 Rocky Bernstein
from uncompyle6.parsers.treenode import SyntaxTree
def tryelsestmtl3(self, lhs, n, rule, ast, tokens, first, last):
# Check the end of the except handler that there isn't a jump from
# inside the except handler to the end. If that happens
# then this is a "try" with no "else".
except_handler = ast[3]
if except_handler == "except_handler_else":
except_handler = except_handler[0]
come_from = except_handler[-1]
# We only care about the *first* come_from because that is the
# the innermost one. So if the "tryelse" is invalid (should be a "try")
# it will be invalid here.
if come_from == "COME_FROM":
first_come_from = except_handler[-1]
elif come_from == "END_FINALLY":
return False
elif come_from == "except_return":
return False
else:
assert come_from in ("come_froms", "opt_come_from_except")
first_come_from = come_from[0]
if not hasattr(first_come_from, "attr"):
# optional come from
return False
leading_jump = except_handler[0]
if not hasattr(leading_jump, "offset"):
return False
# We really don't care that this is a jump per-se. But
# we could also check that this jumps to the end of the except if
# desired.
if isinstance(leading_jump, SyntaxTree):
except_handler_first_offset = leading_jump.first_child().off2int()
else:
except_handler_first_offset = leading_jump.off2int()
return first_come_from.attr > except_handler_first_offset
|
py | 1a31ebe0820d67df4b768e8102a134b34cfff0c5 | from __future__ import unicode_literals
import time
import hmac
import hashlib
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
sanitized_Request,
urlencode_postdata,
xpath_text,
)
class AtresPlayerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?atresplayer\.com/television/[^/]+/[^/]+/[^/]+/(?P<id>.+?)_\d+\.html'
_NETRC_MACHINE = 'atresplayer'
_TESTS = [
{
'url': 'http://www.atresplayer.com/television/programas/el-club-de-la-comedia/temporada-4/capitulo-10-especial-solidario-nochebuena_2014122100174.html',
'md5': 'efd56753cda1bb64df52a3074f62e38a',
'info_dict': {
'id': 'capitulo-10-especial-solidario-nochebuena',
'ext': 'mp4',
'title': 'Especial Solidario de Nochebuena',
'description': 'md5:e2d52ff12214fa937107d21064075bf1',
'duration': 5527.6,
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': 'This video is only available for registered users'
},
{
'url': 'http://www.atresplayer.com/television/especial/videoencuentros/temporada-1/capitulo-112-david-bustamante_2014121600375.html',
'md5': '6e52cbb513c405e403dbacb7aacf8747',
'info_dict': {
'id': 'capitulo-112-david-bustamante',
'ext': 'flv',
'title': 'David Bustamante',
'description': 'md5:f33f1c0a05be57f6708d4dd83a3b81c6',
'duration': 1439.0,
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'http://www.atresplayer.com/television/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_2014122400174.html',
'only_matching': True,
},
]
_USER_AGENT = 'Dalvik/1.6.0 (Linux; U; Android 4.3; GT-I9300 Build/JSS15J'
_MAGIC = 'QWtMLXs414Yo+c#_+Q#K@NN)'
_TIMESTAMP_SHIFT = 30000
_TIME_API_URL = 'http://servicios.atresplayer.com/api/admin/time.json'
_URL_VIDEO_TEMPLATE = 'https://servicios.atresplayer.com/api/urlVideo/{1}/{0}/{1}|{2}|{3}.json'
_PLAYER_URL_TEMPLATE = 'https://servicios.atresplayer.com/episode/getplayer.json?episodePk=%s'
_EPISODE_URL_TEMPLATE = 'http://www.atresplayer.com/episodexml/%s'
_LOGIN_URL = 'https://servicios.atresplayer.com/j_spring_security_check'
_ERRORS = {
'UNPUBLISHED': 'We\'re sorry, but this video is not yet available.',
'DELETED': 'This video has expired and is no longer available for online streaming.',
'GEOUNPUBLISHED': 'We\'re sorry, but this video is not available in your region due to right restrictions.',
# 'PREMIUM': 'PREMIUM',
}
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_form = {
'j_username': username,
'j_password': password,
}
request = sanitized_Request(
self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self._download_webpage(
request, None, 'Logging in')
error = self._html_search_regex(
r'(?s)<ul[^>]+class="[^"]*\blist_error\b[^"]*">(.+?)</ul>',
response, 'error', default=None)
if error:
raise ExtractorError(
'Unable to login: %s' % error, expected=True)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
episode_id = self._search_regex(
r'episode="([^"]+)"', webpage, 'episode id')
request = sanitized_Request(
self._PLAYER_URL_TEMPLATE % episode_id,
headers={'User-Agent': self._USER_AGENT})
player = self._download_json(request, episode_id, 'Downloading player JSON')
episode_type = player.get('typeOfEpisode')
error_message = self._ERRORS.get(episode_type)
if error_message:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error_message), expected=True)
formats = []
video_url = player.get('urlVideo')
if video_url:
format_info = {
'url': video_url,
'format_id': 'http',
}
mobj = re.search(r'(?P<bitrate>\d+)K_(?P<width>\d+)x(?P<height>\d+)', video_url)
if mobj:
format_info.update({
'width': int_or_none(mobj.group('width')),
'height': int_or_none(mobj.group('height')),
'tbr': int_or_none(mobj.group('bitrate')),
})
formats.append(format_info)
timestamp = int_or_none(self._download_webpage(
self._TIME_API_URL,
video_id, 'Downloading timestamp', fatal=False), 1000, time.time())
timestamp_shifted = compat_str(timestamp + self._TIMESTAMP_SHIFT)
token = hmac.new(
self._MAGIC.encode('ascii'),
(episode_id + timestamp_shifted).encode('utf-8'), hashlib.md5
).hexdigest()
request = sanitized_Request(
self._URL_VIDEO_TEMPLATE.format('windows', episode_id, timestamp_shifted, token),
headers={'User-Agent': self._USER_AGENT})
fmt_json = self._download_json(
request, video_id, 'Downloading windows video JSON')
result = fmt_json.get('resultDes')
if result.lower() != 'ok':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, result), expected=True)
for format_id, video_url in fmt_json['resultObject'].items():
if format_id == 'token' or not video_url.startswith('http'):
continue
if 'geodeswowsmpra3player' in video_url:
# f4m_path = video_url.split('smil:', 1)[-1].split('free_', 1)[0]
# f4m_url = 'http://drg.antena3.com/{0}hds/es/sd.f4m'.format(f4m_path)
# this videos are protected by DRM, the f4m downloader doesn't support them
continue
video_url_hd = video_url.replace('free_es', 'es')
formats.extend(self._extract_f4m_formats(
video_url_hd[:-9] + '/manifest.f4m', video_id, f4m_id='hds',
fatal=False))
formats.extend(self._extract_mpd_formats(
video_url_hd[:-9] + '/manifest.mpd', video_id, mpd_id='dash',
fatal=False))
self._sort_formats(formats)
path_data = player.get('pathData')
episode = self._download_xml(
self._EPISODE_URL_TEMPLATE % path_data, video_id,
'Downloading episode XML')
duration = float_or_none(xpath_text(
episode, './media/asset/info/technical/contentDuration', 'duration'))
art = episode.find('./media/asset/info/art')
title = xpath_text(art, './name', 'title')
description = xpath_text(art, './description', 'description')
thumbnail = xpath_text(episode, './media/asset/files/background', 'thumbnail')
subtitles = {}
subtitle_url = xpath_text(episode, './media/asset/files/subtitle', 'subtitle')
if subtitle_url:
subtitles['es'] = [{
'ext': 'srt',
'url': subtitle_url,
}]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
|
py | 1a31ec454171dc1640073cdcaf39a6f6dc71fb26 | from tempfile import NamedTemporaryFile
import pytest
from sanitized_dump.utils.db import db_setting_to_db_string
@pytest.mark.parametrize("engine, name, user, password, host, port, expected", [
("django.db.backends.mysql", "db", "user", "password", "host", "3306",
"mysql://user:password@host:3306/db"),
("django.db.backends.mysql", "db", "user", "password", "host", "3307",
"mysql://user:password@host:3307/db"),
("django.db.backends.mysql", "db", "user", "password", None, None,
"mysql://user:password@/db"),
("django.db.backends.postgresql", "db", "user", "password", None, None,
"postgres://user:password@/db"),
("django.db.backends.postgresql", "db", "user", "password", None, 1111,
"postgres://user:password@localhost:1111/db"),
("django.contrib.gis.db.backends.postgis", "johannes", "hernekeitto", "viina", "teline", 1111,
"postgis://hernekeitto:viina@teline:1111/johannes"),
("django.db.backends.postgresql", "baredb", None, None, None, None,
"postgres:///baredb"),
])
def test_db_url_generation(engine, name, user, password, host, port, expected):
databases = {
"default": {
"ENGINE": engine,
"NAME": name,
"USER": user,
"PASSWORD": password,
"HOST": host,
"PORT": port,
}
}
result = db_setting_to_db_string(databases)
assert result == expected
@pytest.mark.parametrize("database, user, password, host, port, expected", [
("db", "user", "password", "host", 3306, "mysql://user:password@host:3306/db"),
("db", "user", "password", "host", 3307, "mysql://user:password@host:3307/db"),
("db", "user", "password", None, None, "mysql://user:password@/db"),
("db", "user", "password", None, 5432, "mysql://user:password@localhost:5432/db"),
("db", "user", "password", None, 1111, "mysql://user:password@localhost:1111/db"),
("johannes", "hernekeitto", "viina", "teline", 1111, "mysql://hernekeitto:viina@teline:1111/johannes"),
])
def test_mysql_options_file_reading(database, user, password, host, port, expected):
config_lines = ["[client]"]
params = dict(database=database, user=user, password=password, host=host, port=port)
for key, value in params.items():
if value:
config_lines.append("%s=%s" % (key, value))
with NamedTemporaryFile(delete=False) as f:
f.write(("\n".join(config_lines)).encode("utf-8"))
f.flush()
databases = {
"default": {
"ENGINE": "django.db.backends.mysql",
"OPTIONS": {
"read_default_file": f.name,
},
}
}
result = db_setting_to_db_string(databases)
assert result == expected
|
py | 1a31eca2921605ed743d3a08ed65469ed840b0f0 | from ... pyaz_utils import _call_az
from . import alert
def list(caller=None, correlation_id=None, end_time=None, filters=None, max_events=None, namespace=None, offset=None, resource_group=None, resource_id=None, select=None, start_time=None, status=None):
'''
List and query activity log events.
Optional Parameters:
- caller -- None
- correlation_id -- None
- end_time -- End time of the query. Defaults to the current time. Format: date (yyyy-mm-dd) time (hh:mm:ss.xxxxx) timezone (+/-hh:mm)
- filters -- OData filters. Will ignore other filter arguments.
- max_events -- None
- namespace -- None
- offset -- None
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- resource_id -- None
- select -- None
- start_time -- Start time of the query. Format: date (yyyy-mm-dd) time (hh:mm:ss.xxxxx) timezone (+/-hh:mm)
- status -- None
'''
return _call_az("az monitor activity-log list", locals())
def list_categories():
'''
List the event categories of activity logs.
'''
return _call_az("az monitor activity-log list-categories", locals())
|
py | 1a31ed310b99fb2ef9e55c1fe842e14976ab5f6e | #!/usr/bin/env python3
import sys
with open(sys.argv[1]) as input:
lines = input.readlines()
numbers = [int(line.strip()) for line in lines]
# Part 1
print(sum(y > x for x, y in zip(numbers[:-1], numbers[1:])))
# Part 2
print(sum(y > x for x, y in zip(numbers[:-3], numbers[3:])))
|
py | 1a31eefcbef8b29c2790d8902e2efec8f17bb8eb | def evalRPN(tokens):
OPS = {
"-" : lambda x,y : x-y,
"+" : lambda x,y : x+y,
"*" : lambda x,y : x*y,
"/" : lambda x,y : int(x/y)
}
s= []
for item in tokens:
if len(s)>=2 and item in OPS:
y = s.pop()
x = s.pop()
s.append(OPS[item](x,y))
else:
s.append(int(item))
#print(s)
return s.pop()
if __name__=='__main__':
tokens = ["2","1","+","3","*"]
#Output: 9
#print(evalRPN(tokens))
tokens = ["4","13","5","/","+"] #6
print(evalRPN(tokens))
tokens = ["10","6","9","3","+","-11","*","/","*","17","+","5","+"]
#Output: 22
print(evalRPN(tokens)) |
py | 1a31f1302aa154ee3eaa33c99e9f3024c88c1add | from django import forms
class CodeSubmissionForm(forms.Form):
code = forms.CharField(required=False, label="Submit code")
file = forms.FileField(required=False, label="Submit file")
|
py | 1a31f25f32b8b55c4ab971e2213beef365bf5b3d | # container-service-extension
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
import os
import click
from vcd_cli.utils import stderr
from vcd_cli.utils import stdout
import yaml
from container_service_extension.client.cluster import Cluster
import container_service_extension.client.command_filter as cmd_filter
import container_service_extension.client.constants as cli_constants
from container_service_extension.client.de_cluster_native import DEClusterNative # noqa: E501
import container_service_extension.client.sample_generator as client_sample_generator # noqa: E501
import container_service_extension.client.utils as client_utils
from container_service_extension.common.constants.server_constants import LocalTemplateKey # noqa: E501
import container_service_extension.common.constants.shared_constants as shared_constants # noqa: E501
import container_service_extension.common.utils.core_utils as utils
from container_service_extension.exception.exceptions import CseResponseError
from container_service_extension.exception.exceptions import CseServerNotRunningError # noqa: E501
from container_service_extension.exception.minor_error_codes import MinorErrorCode # noqa: E501
from container_service_extension.logging.logger import CLIENT_LOGGER
import container_service_extension.rde.utils as def_utils
@click.group(name='cluster', cls=cmd_filter.GroupCommandFilter,
short_help='Manage Kubernetes clusters (native and vSphere with '
'Tanzu)')
@click.pass_context
def cluster_group(ctx):
"""Manage Kubernetes clusters (Native, vSphere with Tanzu and Ent-PKS).
\b
Cluster names should follow the syntax for valid hostnames and can have
up to 25 characters .`system`, `template` and `swagger*` are reserved
words and cannot be used to name a cluster.
"""
pass
@cluster_group.command('list',
short_help='Display clusters in vCD that are visible '
'to the logged in user')
@click.pass_context
@click.option(
'-v',
'--vdc',
'vdc',
required=False,
default=None,
metavar='VDC_NAME',
help='Filter list to show clusters from a specific org VDC')
@click.option(
'-A',
'--all',
'should_print_all',
is_flag=True,
default=False,
required=False,
metavar='DISPLAY_ALL',
help='Display all the clusters non-interactively')
@click.option(
'-o',
'--org',
'org_name',
default=None,
required=False,
metavar='ORG_NAME',
help="Filter list to show clusters from a specific org")
def list_clusters(ctx, vdc, org_name, should_print_all):
"""Display clusters in vCD that are visible to the logged in user.
\b
Examples
vcd cse cluster list
Display clusters in vCD that are visible to the logged in user.
\b
vcd cse cluster list -vdc ovdc1
Display clusters in vdc 'ovdc1'.
"""
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
client_utils.cse_restore_session(ctx)
client = ctx.obj['client']
cluster = Cluster(client)
if not client.is_sysadmin() and org_name is None:
org_name = ctx.obj['profiles'].get('org_in_use')
client_utils.print_paginated_result(cluster.list_clusters(vdc=vdc, org=org_name), # noqa: E501
should_print_all=should_print_all,
logger=CLIENT_LOGGER)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('delete',
short_help='Delete a cluster')
@click.pass_context
@click.argument('name', required=False, default=None)
@click.confirmation_option(prompt='Are you sure you want to delete the '
'cluster?')
@click.option(
'-f',
'--force',
'force',
is_flag=True,
required=False,
default=False,
help='Force delete the native entity type cluster regardless of the '
'cluster state. Removes Runtime Defined Entity, vApp and DNAT rule, '
'if any, that represents the cluster. Also, user needs special rights'
' and ACL on entity type. Please see the online documentation.')
@click.option(
'-v',
'--vdc',
'vdc',
required=False,
default=None,
metavar='VDC_NAME',
help='Restrict cluster search to specified org VDC')
@click.option(
'-o',
'--org',
'org',
default=None,
required=False,
metavar='ORG_NAME',
help='Restrict cluster search to specified org')
@click.option(
'-k',
'--k8-runtime',
'k8_runtime',
default=None,
required=False,
metavar='K8-RUNTIME',
help='Restrict cluster search to cluster kind; Supported only for'
' vcd api version >= 35')
@click.option(
'--id',
'cluster_id',
default=None,
required=False,
metavar='CLUSTER_ID',
help="ID of the cluster which needs to be deleted;"
"Supported only for CSE api version >= 35."
"ID gets precedence over cluster name.")
def cluster_delete(ctx, name, force, vdc, org, k8_runtime=None, cluster_id=None): # noqa: E501
"""Delete a Kubernetes cluster.
\b
Example
vcd cse cluster delete mycluster --yes
Delete cluster 'mycluster' without prompting.
'--vdc' option can be used for faster command execution.
\b
vcd cse cluster delete mycluster --force
Force delete the native entity type cluster regardless of the state of the cluster. Force delete removes
Runtime Defined Entity, vApp and DNAT rule, if any, that represents the cluster.
\b
vcd cse cluster delete --id urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561
Delete cluster with cluster ID 'urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561'.
(--id option is supported only applicable for api version >= 35)
""" # noqa: E501
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
client_utils.cse_restore_session(ctx)
if not (cluster_id or name):
# --id is not required when working with api version 33 and 34
raise Exception("Please specify cluster name (or) cluster Id. "
"Note that '--id' flag is applicable for API versions >= 35 only.") # noqa: E501
client = ctx.obj['client']
if client_utils.is_cli_for_tkg_s_only():
if k8_runtime in shared_constants.CSE_SERVER_RUNTIMES:
# Cannot run the command as cse cli is enabled only for native
raise CseServerNotRunningError()
k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value
cluster = Cluster(client, k8_runtime=k8_runtime)
if not client.is_sysadmin() and org is None:
org = ctx.obj['profiles'].get('org_in_use')
if force:
result = cluster.force_delete_cluster(
name,
cluster_id=cluster_id,
org=org, vdc=vdc
)
else:
result = cluster.delete_cluster(
name, cluster_id=cluster_id, org=org, vdc=vdc)
if len(result) == 0:
# TODO(CLI): Update message to use vcd task wait instead
click.secho(f"Delete cluster operation has been initiated on "
f"{name}, please check the status using"
f" 'vcd cse cluster info {name}'.", fg='yellow')
stdout(result, ctx)
CLIENT_LOGGER.debug(result)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('create', short_help='Create a Kubernetes cluster')
@click.pass_context
@click.argument('name', required=True)
@click.option(
'-v',
'--vdc',
'vdc',
required=False,
default=None,
metavar='VDC_NAME',
help='Org VDC to use. Defaults to currently logged-in org VDC')
@click.option(
'-N',
'--nodes',
'node_count',
required=False,
default=None,
type=click.INT,
help='Number of worker nodes to create')
@click.option(
'-c',
'--cpu',
'cpu',
required=False,
default=None,
type=click.INT,
help='Number of virtual CPUs on each node')
@click.option(
'-m',
'--memory',
'memory',
required=False,
default=None,
type=click.INT,
help='Megabytes of memory on each node')
@click.option(
'-n',
'--network',
'network_name',
default=None,
required=False,
help='Org vDC network name (Required)')
@click.option(
'-s',
'--storage-profile',
'storage_profile',
required=False,
default=None,
help='Name of the storage profile for the nodes')
@click.option(
'-k',
'--ssh-key',
'ssh_key_file',
required=False,
default=None,
type=click.File('r'),
help='SSH public key filepath')
@click.option(
'-t',
'--template-name',
'template_name',
required=False,
default=None,
help='Name of the template to create new nodes from. '
'If not specified, server default will be used '
'(Must be used with --template-revision).')
@click.option(
'-r',
'--template-revision',
'template_revision',
required=False,
default=None,
help='Revision number of the template to create new nodes from. '
'If not specified, server default will be used '
'(Must be used with --template-revision).')
@click.option(
'--enable-nfs',
'enable_nfs',
is_flag=True,
help='Create 1 additional NFS node (if --nodes=2, then CSE will create '
'2 worker nodes and 1 NFS node)')
@click.option(
'--disable-rollback',
'disable_rollback',
is_flag=True,
help='Disable rollback on cluster creation failure')
@click.option(
'-o',
'--org',
'org_name',
default=None,
required=False,
metavar='ORG_NAME',
help='Org to use. Defaults to currently logged-in org')
def cluster_create(ctx, name, vdc, node_count, network_name,
storage_profile, ssh_key_file, template_name,
template_revision, enable_nfs, disable_rollback, org_name,
cpu=None, memory=None):
"""Create a Kubernetes cluster (max name length is 25 characters).
\b
Examples
vcd cse cluster create mycluster --network mynetwork
Create a Kubernetes cluster named 'mycluster'.
The cluster will have 2 worker nodes.
The cluster will be connected to org VDC network 'mynetwork'.
All VMs will use the default template.
On create failure, the invalid cluster is deleted.
\b
vcd cse cluster create mycluster --nodes 1 --enable-nfs \\
--network mynetwork --template-name photon-v2 --template-revision 1 \\
--cpu 3 --memory 1024 --storage-profile mystorageprofile \\
--ssh-key ~/.ssh/id_rsa.pub --disable-rollback --vdc othervdc
Create a Kubernetes cluster named 'mycluster' on org VDC 'othervdc'.
The cluster will have 1 worker node and 1 NFS node.
The cluster will be connected to org VDC network 'mynetwork'.
All VMs will use the template 'photon-v2'.
Each VM in the cluster will have 3 vCPUs and 1024mb of memory.
All VMs will use the storage profile 'mystorageprofile'.
The public ssh key at '~/.ssh/id_rsa.pub' will be placed into all
VMs for user accessibility.
On create failure, cluster will be left cluster in error state for
troubleshooting.
"""
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
if (template_name and not template_revision) or \
(not template_name and template_revision):
raise Exception("Both flags --template-name(-t) and "
"--template-revision (-r) must be specified.")
client_utils.cse_restore_session(ctx)
if vdc is None:
vdc = ctx.obj['profiles'].get('vdc_in_use')
if not vdc:
raise Exception("Virtual datacenter context is not set. "
"Use either command 'vcd vdc use' or option "
"'--vdc' to set the vdc context.")
if org_name is None:
org_name = ctx.obj['profiles'].get('org_in_use')
ssh_key = None
if ssh_key_file is not None:
ssh_key = ssh_key_file.read()
client = ctx.obj['client']
cluster = Cluster(client)
result = cluster.create_cluster(
vdc,
network_name,
name,
node_count=node_count,
cpu=cpu,
memory=memory,
storage_profile=storage_profile,
ssh_key=ssh_key,
template_name=template_name,
template_revision=template_revision,
enable_nfs=enable_nfs,
rollback=not disable_rollback,
org=org_name)
stdout(result, ctx)
CLIENT_LOGGER.debug(result)
except CseResponseError as e:
minor_error_code_to_error_message = {
MinorErrorCode.REQUEST_KEY_NETWORK_NAME_MISSING: 'Missing option "-n" / "--network".', # noqa: E501
MinorErrorCode.REQUEST_KEY_NETWORK_NAME_INVALID: 'Invalid or missing value for option "-n" / "--network".' # noqa: E501
}
e.error_message = \
minor_error_code_to_error_message.get(
e.minor_error_code, e.error_message)
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('resize',
short_help='Resize the cluster to contain the '
'specified number of worker nodes')
@click.pass_context
@click.argument('cluster_name', required=True)
@click.option(
'-N',
'--nodes',
'node_count',
required=True,
default=None,
type=click.INT,
help='Desired number of worker nodes for the cluster')
@click.option(
'-n',
'--network',
'network_name',
default=None,
required=False,
help='Network name (Exclusive to native Kubernetes provider) (Required)')
@click.option(
'-v',
'--vdc',
'vdc_name',
required=False,
default=None,
metavar='VDC_NAME',
help='Restrict cluster search to specified org VDC')
@click.option(
'-o',
'--org',
'org_name',
default=None,
required=False,
metavar='ORG_NAME',
help='Restrict cluster search to specified org')
@click.option(
'--disable-rollback',
'disable_rollback',
is_flag=True,
help='Disable rollback on node creation failure '
'(Exclusive to native Kubernetes provider)')
@click.option(
'-t',
'--template-name',
'template_name',
required=False,
default=None,
help='Name of the template to create new nodes from. '
'If not specified, server default will be used '
'(Exclusive to native Kubernetes provider) '
'(Must be used with --template-revision).')
@click.option(
'-r',
'--template-revision',
'template_revision',
required=False,
default=None,
help='Revision number of the template to create new nodes from. '
'If not specified, server default will be used '
'(Exclusive to native Kubernetes provider) '
'(Must be used with --template-revision).')
@click.option(
'-c',
'--cpu',
'cpu',
required=False,
default=None,
type=click.INT,
help='Number of virtual CPUs on each node '
'(Exclusive to native Kubernetes provider)')
@click.option(
'-m',
'--memory',
'memory',
required=False,
default=None,
type=click.INT,
help='Megabytes of memory on each node '
'(Exclusive to native Kubernetes provider)')
@click.option(
'-k',
'--ssh-key',
'ssh_key_file',
required=False,
default=None,
type=click.File('r'),
help='SSH public key filepath (Exclusive to native Kubernetes provider)')
def cluster_resize(ctx, cluster_name, node_count, network_name, org_name,
vdc_name, disable_rollback, template_name,
template_revision, cpu, memory, ssh_key_file):
"""Resize the cluster to contain the specified number of worker nodes.
Clusters that use native Kubernetes provider can not be sized down
(use 'vcd cse node delete' command to do so).
\b
Examples
vcd cse cluster resize mycluster --nodes 5 --network mynetwork
Resize the cluster to have 5 worker nodes. On resize failure,
returns cluster to original size.
Nodes will be created from server default template at default revision.
'--vdc' option can be used for faster command execution.
\b
vcd cse cluster resize mycluster -N 10 --template-name my_template \\
--template-revision 2 --disable-rollback
Resize the cluster size to 10 worker nodes. On resize failure,
cluster will be left cluster in error state for troubleshooting.
Nodes will be created from template 'my_template' revision 2.
"""
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
if (template_name and not template_revision) or \
(not template_name and template_revision):
raise Exception("Both --template-name (-t) and "
"--template-revision (-r) must be specified.")
client_utils.cse_restore_session(ctx)
client = ctx.obj['client']
if not client.is_sysadmin() and org_name is None:
org_name = ctx.obj['profiles'].get('org_in_use')
ssh_key = None
if ssh_key_file:
ssh_key = ssh_key_file.read()
cluster = Cluster(client)
result = cluster.resize_cluster(
network_name,
cluster_name,
node_count=node_count,
org=org_name,
vdc=vdc_name,
rollback=not disable_rollback,
template_name=template_name,
template_revision=template_revision,
cpu=cpu,
memory=memory,
ssh_key=ssh_key)
stdout(result, ctx)
CLIENT_LOGGER.debug(result)
except CseResponseError as e:
minor_error_code_to_error_message = {
MinorErrorCode.REQUEST_KEY_NETWORK_NAME_MISSING: 'Missing option "-n" / "--network".', # noqa: E501
MinorErrorCode.REQUEST_KEY_NETWORK_NAME_INVALID: 'Invalid or missing value for option "-n" / "--network".' # noqa: E501
}
e.error_message = \
minor_error_code_to_error_message.get(
e.minor_error_code, e.error_message)
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('apply',
short_help='apply a configuration to a cluster resource'
' by filename. The resource will be created '
'if it does not exist. (The command can be '
'used to create the cluster, scale-up/down '
'worker count, scale-up NFS nodes, upgrade '
'the cluster to a new K8s version. Note '
'that for api_version <36.0, upgrades are '
'not supported with this command.)')
@click.pass_context
@click.argument(
'cluster_config_file_path',
required=False,
metavar='CLUSTER_CONFIG_FILE_PATH',
type=click.Path(exists=True))
@click.option(
'-s',
'--sample',
'generate_sample_config',
is_flag=True,
required=False,
default=False,
help="generate sample cluster configuration file; This flag can't be used together with CLUSTER_CONFIG_FILE_PATH") # noqa: E501
@click.option(
'-n',
'--native',
'k8_runtime',
is_flag=True,
flag_value=shared_constants.ClusterEntityKind.NATIVE.value,
help="should be used with --sample, this flag generates sample yaml for k8 runtime: native" # noqa: E501
)
@click.option(
'-k',
'--tkg-s',
'k8_runtime',
is_flag=True,
flag_value=shared_constants.ClusterEntityKind.TKG_S.value,
help="should be used with --sample, this flag generates sample yaml for k8 runtime: TKG" # noqa: E501
)
@click.option(
'-p',
'--tkg-plus',
'k8_runtime',
is_flag=True,
hidden=not utils.is_environment_variable_enabled(cli_constants.ENV_CSE_TKG_PLUS_ENABLED), # noqa: E501
flag_value=shared_constants.ClusterEntityKind.TKG_PLUS.value,
help="should be used with --sample, this flag generates sample yaml for k8 runtime: TKG+" # noqa: E501
)
@click.option(
'-t',
'--tkg',
'k8_runtime',
is_flag=True,
flag_value=shared_constants.ClusterEntityKind.TKG_M.value,
help="should be used with --sample, this flag generates sample yaml for k8 runtime: TKGm" # noqa: E501
)
@click.option(
'-o',
'--output',
'output',
required=False,
default=None,
metavar='OUTPUT_FILE_NAME',
help="Filepath to write sample configuration file to; This flag should be used with -s") # noqa: E501
@click.option(
'--org',
'org',
default=None,
required=False,
metavar='ORGANIZATION',
help="Organization on which the cluster configuration needs to be applied")
@click.option(
'--id',
'cluster_id',
default=None,
required=False,
metavar='CLUSTER_ID',
help="ID of the cluster to which the configuration should be applied;"
"Supported only for CSE api version >=35."
"ID gets precedence over cluster name.")
def apply(ctx, cluster_config_file_path, generate_sample_config, k8_runtime, output, org, cluster_id): # noqa: E501
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
console_message_printer = utils.ConsoleMessagePrinter()
if cluster_config_file_path and (generate_sample_config or output or k8_runtime): # noqa: E501
console_message_printer.general_no_color(ctx.get_help())
msg = "-s/-o/-n/-t/-k flag can't be used together with CLUSTER_CONFIG_FILE_PATH" # noqa: E501
CLIENT_LOGGER.error(msg)
raise Exception(msg)
if not cluster_config_file_path and not generate_sample_config:
console_message_printer.general_no_color(ctx.get_help())
msg = "No option chosen/invalid option"
CLIENT_LOGGER.error(msg)
raise Exception(msg)
client = ctx.obj['client']
if generate_sample_config:
tkg_plus_env_enabled = utils.is_environment_variable_enabled(cli_constants.ENV_CSE_TKG_PLUS_ENABLED) # noqa: E501
if not k8_runtime:
console_message_printer.general_no_color(ctx.get_help())
msg = "with option --sample you must specify either of options: --native or --tkg-s or --tkg" # noqa: E501
if tkg_plus_env_enabled:
msg += " or --tkg-plus"
CLIENT_LOGGER.error(msg)
raise Exception(msg)
if k8_runtime == shared_constants.ClusterEntityKind.TKG_PLUS.value and not tkg_plus_env_enabled: # noqa: E501
raise Exception(f"{shared_constants.ClusterEntityKind.TKG_PLUS.value} not enabled") # noqa: E501
# since apply command is not exposed when CSE server is not
# running, it is safe to get the server_rde_version from
# VCD API version as VCD API version will be the supported by
# CSE server.
server_rde_version = \
def_utils.get_runtime_rde_version_by_vcd_api_version(
client.get_api_version())
sample_cluster_config = \
client_sample_generator.get_sample_cluster_configuration(
output=output,
k8_runtime=k8_runtime,
server_rde_in_use=server_rde_version)
console_message_printer.general_no_color(sample_cluster_config)
return
with open(cluster_config_file_path) as f:
cluster_config_map = yaml.safe_load(f) or {}
k8_runtime = cluster_config_map.get('kind')
if not k8_runtime:
raise Exception("Cluster kind missing from the spec.")
if client_utils.is_cli_for_tkg_s_only():
if k8_runtime in shared_constants.CSE_SERVER_RUNTIMES:
# Cannot run the command as cse cli is enabled only for native
raise CseServerNotRunningError()
k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value
org_name = None
if k8_runtime == shared_constants.ClusterEntityKind.TKG_S.value:
org_name = org
if not org:
org_name = ctx.obj['profiles'].get('org_in_use')
cluster = Cluster(client, k8_runtime=cluster_config_map.get('kind'))
result = cluster.apply(cluster_config_map, cluster_id=cluster_id,
org=org_name)
stdout(result, ctx)
CLIENT_LOGGER.debug(result)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('delete-nfs',
help="Examples:\n\nvcd cse cluster delete-nfs mycluster nfs-uitj", # noqa: E501
short_help='Delete nfs node from Native Kubernetes cluster') # noqa: E501
@click.pass_context
@click.argument('cluster_name', required=True)
@click.argument('node_name', required=True)
@click.option(
'-v',
'--vdc',
'vdc',
required=False,
default=None,
metavar='VDC_NAME',
help='Restrict cluster search to specified org VDC')
@click.option(
'-o',
'--org',
'org',
default=None,
required=False,
metavar='ORG_NAME',
help='Restrict cluster search to specified org')
def delete_nfs(ctx, cluster_name, node_name, vdc, org):
"""Remove nfs node in a cluster that uses native Kubernetes provider."""
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
# NOTE: command is exposed only if cli is enabled for native clusters
try:
client_utils.cse_restore_session(ctx)
client = ctx.obj['client']
if not client.is_sysadmin() and org is None:
org = ctx.obj['profiles'].get('org_in_use')
cluster = DEClusterNative(client)
result = cluster.delete_nfs_node(cluster_name, node_name, org=org, vdc=vdc) # noqa: E501
stdout(result, ctx)
CLIENT_LOGGER.debug(result)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('upgrade-plan',
short_help='Display templates that the specified '
'native cluster can be upgraded to')
@click.pass_context
@click.argument('cluster_name', required=True)
@click.option(
'-v',
'--vdc',
'vdc',
required=False,
default=None,
metavar='VDC_NAME',
help='Restrict cluster search to specific org VDC')
@click.option(
'-o',
'--org',
'org_name',
default=None,
required=False,
metavar='ORG_NAME',
help="Restrict cluster search to specific org")
@click.option(
'-k',
'--k8-runtime',
'k8_runtime',
default=None,
required=False,
metavar='K8-RUNTIME',
help='Restrict cluster search to cluster kind; Supported only '
'for vcd api version >= 35.')
def cluster_upgrade_plan(ctx, cluster_name, vdc, org_name, k8_runtime=None):
"""Display templates that the specified cluster can upgrade to.
\b
Examples
vcd cse cluster upgrade-plan my-cluster
(Supported only for vcd api version < 35)
\b
vcd cse cluster upgrade-plan --k8-runtime native my-cluster
(Supported only for vcd api version >= 35)
"""
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
# NOTE: Command is exposed only if CLI is enabled for native clusters
try:
client_utils.cse_restore_session(ctx)
if client_utils.is_cli_for_tkg_s_only():
if k8_runtime in shared_constants.CSE_SERVER_RUNTIMES:
# Cannot run the command as cse cli is enabled only for native
raise CseServerNotRunningError()
k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value
client = ctx.obj['client']
cluster = Cluster(client, k8_runtime=k8_runtime)
if not client.is_sysadmin() and org_name is None:
org_name = ctx.obj['profiles'].get('org_in_use')
templates = cluster.get_upgrade_plan(cluster_name, vdc=vdc,
org=org_name)
result = []
for template in templates:
result.append({
'Template Name': template[LocalTemplateKey.NAME],
'Template Revision': template[LocalTemplateKey.REVISION],
'Kubernetes': template[LocalTemplateKey.KUBERNETES_VERSION],
'Docker-CE': template[LocalTemplateKey.DOCKER_VERSION],
'CNI': f"{template[LocalTemplateKey.CNI]} {template[LocalTemplateKey.CNI_VERSION]}" # noqa: E501
})
if not templates:
result = f"No valid upgrade targets for cluster '{cluster_name}'"
stdout(result, ctx, sort_headers=False)
CLIENT_LOGGER.debug(result)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('upgrade',
help="Examples:\n\nvcd cse cluster upgrade my-cluster ubuntu-16.04_k8-1.18_weave-2.6.4 1" # noqa: E501
"\n(Supported only for vcd api version < 35)"
"\n\nvcd cse cluster upgrade -k native mcluster photon-v2_k8-1.14_weave-2.5.2 2" # noqa: E501
"\n(Supported only for vcd api version >= 35)",
short_help="Upgrade native cluster software to "
"specified template's software versions")
@click.pass_context
@click.argument('cluster_name', required=True)
@click.argument('template_name', required=True)
@click.argument('template_revision', required=True)
@click.option(
'-v',
'--vdc',
'vdc',
required=False,
default=None,
metavar='VDC_NAME',
help='Restrict cluster search to specific org VDC')
@click.option(
'-o',
'--org',
'org_name',
default=None,
required=False,
metavar='ORG_NAME',
help="Restrict cluster search to specific org")
@click.option(
'-k',
'--k8-runtime',
'k8_runtime',
default=None,
required=False,
metavar='K8-RUNTIME',
help='Restrict cluster search to cluster kind; Supported '
'only for vcd api version >= 35.')
def cluster_upgrade(ctx, cluster_name, template_name, template_revision,
vdc, org_name, k8_runtime=None):
"""Upgrade cluster software to specified template's software versions.
\b
Example
vcd cse cluster upgrade my-cluster ubuntu-16.04_k8-1.18_weave-2.6.4 1
Upgrade cluster 'mycluster' Docker-CE, Kubernetes, and CNI to match
template 'ubuntu-16.04_k8-1.18_weave-2.6.4' at revision 1.
Affected software: Docker-CE, Kubernetes, CNI
"""
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
# NOTE: Command is exposed only if CLI is enabled for native
try:
client_utils.cse_restore_session(ctx)
if client_utils.is_cli_for_tkg_s_only():
if k8_runtime in shared_constants.CSE_SERVER_RUNTIMES:
# Cannot run the command as cse cli is enabled only for native
raise CseServerNotRunningError()
k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value
client = ctx.obj['client']
cluster = Cluster(client, k8_runtime=k8_runtime)
if not client.is_sysadmin() and org_name is None:
org_name = ctx.obj['profiles'].get('org_in_use')
result = cluster.upgrade_cluster(cluster_name, template_name,
template_revision, ovdc_name=vdc,
org_name=org_name)
stdout(result, ctx)
CLIENT_LOGGER.debug(result)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('config',
short_help='Retrieve cluster configuration details')
@click.pass_context
@click.argument('name', default=None, required=False)
@click.option(
'-o',
'--org',
'org',
required=False,
default=None,
metavar='ORG_NAME',
help='Restrict cluster search to specified org')
@click.option(
'-v',
'--vdc',
'vdc',
required=False,
default=None,
metavar='VDC_NAME',
help='Restrict cluster search to specified org VDC')
@click.option(
'-k',
'--k8-runtime',
'k8_runtime',
default=None,
required=False,
metavar='K8-RUNTIME',
help='Restrict cluster search to cluster kind;'
'Supported only for vcd api version >= 35')
@click.option(
'--id',
'cluster_id',
default=None,
required=False,
metavar='CLUSTER_ID',
help="ID of the cluster whose cluster config has to be obtained;"
"supported only for CSE api version >= 35."
"ID gets precedence over cluster name.")
def cluster_config(ctx, name, vdc, org, k8_runtime=None, cluster_id=None):
"""Display cluster configuration.
\b
Examples:
vcd cse cluster config my-cluster
(Supported only for vcd api version < 35)
\b
vcd cse cluster config -k native my-cluster
(Supported only for vcd api version >= 35)
To write to a file: `vcd cse cluster config mycluster > ~/.kube/my_config`
\b
vcd cse cluster config --id urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561
(--id option is supported only for vcd api version >= 35)
""" # noqa: E501
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
if not (cluster_id or name):
# --id is not required when working with api version 33 and 34
raise Exception("Please specify cluster name (or) cluster Id. "
"Note that '--id' flag is applicable for API versions >= 35 only.") # noqa: E501
client_utils.cse_restore_session(ctx)
if client_utils.is_cli_for_tkg_s_only():
if k8_runtime in shared_constants.CSE_SERVER_RUNTIMES:
# Cannot run the command as cse cli is enabled only for native
raise CseServerNotRunningError()
k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value
client = ctx.obj['client']
cluster = Cluster(client, k8_runtime=k8_runtime)
if not client.is_sysadmin() and org is None:
org = ctx.obj['profiles'].get('org_in_use')
ret_val = cluster.get_cluster_config(
name,
cluster_id=cluster_id,
vdc=vdc,
org=org
).get(shared_constants.RESPONSE_MESSAGE_KEY)
if os.name == 'nt':
ret_val = str.replace(ret_val, '\n', '\r\n')
click.secho(ret_val)
CLIENT_LOGGER.debug(ret_val)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('info',
short_help='Display info about a cluster')
@click.pass_context
@click.argument('name', default=None, required=False)
@click.option(
'-v',
'--vdc',
'vdc',
required=False,
default=None,
metavar='VDC_NAME',
help='Restrict cluster search to specified org VDC')
@click.option(
'-o',
'--org',
'org',
default=None,
required=False,
metavar='ORG_NAME',
help='Restrict cluster search to specified org')
@click.option(
'-k',
'--k8-runtime',
'k8_runtime',
default=None,
required=False,
metavar='K8-RUNTIME',
help='Restrict cluster search to cluster kind;'
'Supported only for vcd api version >=35')
@click.option(
'--id',
'cluster_id',
default=None,
required=False,
metavar='CLUSTER_ID',
help="ID of the cluster whose info has to be obtained;"
"Supported only for CSE api version >=35. "
"ID gets precedence over cluster name.")
def cluster_info(ctx, name, org, vdc, k8_runtime=None, cluster_id=None):
"""Display info about a Kubernetes cluster.
\b
Example
vcd cse cluster info mycluster
Display detailed information about cluster 'mycluster'.
'--vdc' option can be used for faster command execution.
\b
vcd cse cluster info --id urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561
Display cluster information about cluster with
ID 'urn:vcloud:entity:cse:nativeCluster:1.0.0:0632c7c7-a613-427c-b4fc-9f1247da5561'
(--id option is supported only for api version >= 35)
""" # noqa: E501
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
if not (cluster_id or name):
# --id is not required when working with api version 33 and 34
raise Exception("Please specify cluster name (or) cluster Id. "
"Note that '--id' flag is applicable for API versions >= 35 only.") # noqa: E501
client_utils.cse_restore_session(ctx)
if client_utils.is_cli_for_tkg_s_only():
if k8_runtime in shared_constants.CSE_SERVER_RUNTIMES:
# Cannot run the command as cse cli is enabled only for native
raise CseServerNotRunningError()
k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value
client = ctx.obj['client']
cluster = Cluster(client, k8_runtime=k8_runtime)
# Users should be explicit in their intent about the org on which the
# command needs to be executed.
if not client.is_sysadmin() and org is None:
org = ctx.obj['profiles'].get('org_in_use')
result = cluster.get_cluster_info(name, cluster_id=cluster_id,
org=org, vdc=vdc)
stdout(result, ctx)
CLIENT_LOGGER.debug(result)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('share',
short_help='Share a cluster with at least one user')
@click.pass_context
@click.argument('users', nargs=-1, required=True)
@click.option(
'-n',
'--name',
'name',
required=False,
default=None,
metavar='CLUSTER_NAME',
help='Name of the cluster to share')
@click.option(
'--acl',
'acl',
required=True,
default=None,
metavar='ACL',
help=f'access control: {shared_constants.READ_ONLY}, '
f'{shared_constants.READ_WRITE}, or {shared_constants.FULL_CONTROL}')
@click.option(
'-v',
'--vdc',
'vdc',
required=False,
default=None,
metavar='VDC_NAME',
help='Restrict cluster search to specified org VDC')
@click.option(
'-o',
'--org',
'org',
default=None,
required=False,
metavar='ORG_NAME',
help='Restrict cluster search to specified org')
@click.option(
'-k',
'--k8-runtime',
'k8_runtime',
default=None,
required=False,
metavar='K8-RUNTIME',
help='Restrict cluster search to cluster kind')
@click.option(
'--id',
'cluster_id',
default=None,
required=False,
metavar='CLUSTER_ID',
help="ID of the cluster to share; "
"ID gets precedence over cluster name.")
def cluster_share(ctx, name, acl, users, vdc, org, k8_runtime, cluster_id):
"""Share cluster with users.
Either the cluster name or cluster id is required.
By default, this command searches for the cluster in the currently logged in user's org.
Note: this command does not remove an ACL entry.
\b
Examples:
vcd cse cluster share --name mycluster --acl FullControl user1 user2
Share cluster 'mycluster' with FullControl access with 'user1' and 'user2'
\b
vcd cse cluster share --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 --acl ReadOnly user1
Share TKG-S cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057'
with ReadOnly access with 'user1'
""" # noqa: E501
try:
# If cluster kind is not specified, let the server handle this check
if k8_runtime:
def_utils.raise_error_if_tkgm_cluster_operation(cluster_kind=k8_runtime) # noqa: E501
# Verify access level and cluster name/id arguments
access_level_id = shared_constants.ACCESS_LEVEL_TYPE_TO_ID.get(acl.lower()) # noqa: E501
if not access_level_id:
raise Exception(f'Please enter a valid access control type: '
f'{shared_constants.READ_ONLY}, '
f'{shared_constants.READ_WRITE}, or '
f'{shared_constants.FULL_CONTROL}')
if not (cluster_id or name):
raise Exception("Please specify cluster name or cluster id.")
client_utils.cse_restore_session(ctx)
if client_utils.is_cli_for_tkg_s_only():
if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value,
shared_constants.ClusterEntityKind.TKG_PLUS.value]: # noqa: E501
# Cannot run the command as cse cli is enabled only for TKG-S
raise CseServerNotRunningError()
k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value
client = ctx.obj['client']
# Users should be explicit in their intent about the org on which the
# command needs to be executed.
is_system_user = client.is_sysadmin()
if not is_system_user and org is None:
org = ctx.obj['profiles'].get('org_in_use')
elif is_system_user and org is None:
raise Exception("Need to specify cluster org since logged in user is in system org") # noqa: E501
users_list = list(users)
cluster = Cluster(client, k8_runtime)
cluster.share_cluster(cluster_id, name, users_list, access_level_id,
org, vdc)
stdout(f'Cluster {cluster_id or name} successfully shared with: {users_list}') # noqa: E501
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('share-list',
short_help='List access information of shared cluster '
'users')
@click.pass_context
@click.option(
'-A',
'--all',
'should_print_all',
is_flag=True,
default=False,
required=False,
metavar='DISPLAY_ALL',
help="Display all cluster user access information non-interactively")
@click.option(
'-n',
'--name',
'name',
required=False,
default=None,
metavar='CLUSTER_NAME',
help='Name of the cluster to list shared users')
@click.option(
'-v',
'--vdc',
'vdc',
required=False,
default=None,
metavar='VDC_NAME',
help='Restrict cluster search to specified org VDC')
@click.option(
'-o',
'--org',
'org',
default=None,
required=False,
metavar='ORG_NAME',
help='Restrict cluster search to specified org')
@click.option(
'-k',
'--k8-runtime',
'k8_runtime',
default=None,
required=False,
metavar='K8-RUNTIME',
help='Restrict cluster search to cluster kind')
@click.option(
'--id',
'cluster_id',
default=None,
required=False,
metavar='CLUSTER_ID',
help="ID of the cluster whose share lists we want to retrieve; "
"ID gets precedence over cluster name.")
def cluster_share_list(ctx, should_print_all, name, vdc, org, k8_runtime,
cluster_id):
"""List cluster shared user information.
Either the cluster name or cluster id is required.
\b
Examples:
vcd cse cluster share-list --name mycluster
List shared user information for cluster 'mycluster'
\b
vcd cse cluster share --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057
List shared user information for cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057'
""" # noqa: E501
try:
# If cluster kind is not specified, let the server handle this check
if k8_runtime:
def_utils.raise_error_if_tkgm_cluster_operation(cluster_kind=k8_runtime) # noqa: E501
if not (cluster_id or name):
raise Exception("Please specify cluster name or cluster id.")
client_utils.cse_restore_session(ctx)
if client_utils.is_cli_for_tkg_s_only():
if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value,
shared_constants.ClusterEntityKind.TKG_PLUS.value]: # noqa: E501
# Cannot run the command as cse cli is enabled only for TKG-S
raise CseServerNotRunningError()
k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value
# Determine cluster type and retrieve cluster id if needed
client = ctx.obj['client']
# Users should be explicit in their intent about the org on which the
# command needs to be executed.
is_system_user = client.is_sysadmin()
if not is_system_user and org is None:
org = ctx.obj['profiles'].get('org_in_use')
elif is_system_user and org is None:
raise Exception("Need to specify cluster org since logged in user is in system org") # noqa: E501
cluster = Cluster(client, k8_runtime)
share_entries = cluster.list_share_entries(cluster_id, name, org, vdc)
client_utils.print_paginated_result(share_entries, should_print_all)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@cluster_group.command('unshare',
short_help='Unshare a cluster with specified user(s)')
@click.pass_context
@click.argument('users', nargs=-1, required=True)
@click.option(
'-n',
'--name',
'name',
required=False,
default=None,
metavar='CLUSTER_NAME',
help='Name of the cluster to share')
@click.option(
'-v',
'--vdc',
'vdc',
required=False,
default=None,
metavar='VDC_NAME',
help='Restrict cluster search to specified org VDC')
@click.option(
'-o',
'--org',
'org',
default=None,
required=False,
metavar='ORG_NAME',
help='Restrict cluster search to specified org')
@click.option(
'-k',
'--k8-runtime',
'k8_runtime',
default=None,
required=False,
metavar='K8-RUNTIME',
help='Restrict cluster search to cluster kind')
@click.option(
'--id',
'cluster_id',
default=None,
required=False,
metavar='CLUSTER_ID',
help="ID of the cluster to unshare; "
"ID gets precedence over cluster name.")
def cluster_unshare(ctx, name, users, vdc, org, k8_runtime, cluster_id):
"""Remove access from current shared cluster users.
Either the cluster name or cluster id is required. By default, this command searches
for the cluster in the currently logged in user's org.
\b
Examples:
vcd cse cluster unshare --name mycluster user1 user2
Unshare cluster 'mycluster' with FullControl access with 'user1' and 'user2'
\b
vcd cse cluster unshare --id urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057 user1
Unshare TKG-S cluster with cluster ID 'urn:vcloud:entity:vmware:tkgcluster:1.0.0:71fa7b01-84dc-4a58-ae54-a1098219b057' with 'user1'
""" # noqa: E501
try:
if not (cluster_id or name):
raise Exception("Please specify cluster name or cluster id.")
client_utils.cse_restore_session(ctx)
if client_utils.is_cli_for_tkg_s_only():
if k8_runtime in [shared_constants.ClusterEntityKind.NATIVE.value,
shared_constants.ClusterEntityKind.TKG_PLUS.value]: # noqa: E501
# Cannot run the command as cse cli is enabled only for tkg
raise CseServerNotRunningError()
k8_runtime = shared_constants.ClusterEntityKind.TKG_S.value
client = ctx.obj['client']
# Users should be explicit in their intent about the org on which the
# command needs to be executed.
is_system_user = client.is_sysadmin()
if not is_system_user and org is None:
org = ctx.obj['profiles'].get('org_in_use')
elif is_system_user and org is None:
raise Exception("Need to specify cluster org since logged in user is in system org") # noqa: E501
# If cluster kind is not specified, let the server handle this check
if k8_runtime:
def_utils.raise_error_if_tkgm_cluster_operation(cluster_kind=k8_runtime) # noqa: E501
users_list = list(users)
cluster = Cluster(client, k8_runtime)
cluster.unshare_cluster(cluster_id, name, users_list, org, vdc)
stdout(f'Cluster {cluster_id or name} successfully unshared with: {users_list}') # noqa: E501
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
|
py | 1a31f2b080d1c8e7e4b33083fd496de6477e6bc3 | try:
from cyordereddict import OrderedDict
except:
from collections import OrderedDict
from . import util
from .pprint import PrettyPrinter
class AttrTree(object):
"""
An AttrTree offers convenient, multi-level attribute access for
collections of objects. AttrTree objects may also be combined
together using the update method or merge classmethod. Here is an
example of adding a ViewableElement to an AttrTree and accessing it:
>>> t = AttrTree()
>>> t.Example.Path = 1
>>> t.Example.Path #doctest: +ELLIPSIS
1
"""
_disabled_prefixes = [] # Underscore attributes that should be
_sanitizer = util.sanitize_identifier
@classmethod
def merge(cls, trees):
"""
Merge a collection of AttrTree objects.
"""
first = trees[0]
for tree in trees:
first.update(tree)
return first
def __dir__(self):
"""
The _dir_mode may be set to 'default' or 'user' in which case
only the child nodes added by the user are listed.
"""
dict_keys = self.__dict__.keys()
if self.__dict__['_dir_mode'] == 'user':
return self.__dict__['children']
else:
return dir(type(self)) + list(dict_keys)
def __init__(self, items=None, identifier=None, parent=None, dir_mode='default'):
"""
identifier: A string identifier for the current node (if any)
parent: The parent node (if any)
items: Items as (path, value) pairs to construct
(sub)tree down to given leaf values.
Note that the root node does not have a parent and does not
require an identifier.
"""
self.__dict__['parent'] = parent
self.__dict__['identifier'] = type(self)._sanitizer(identifier, escape=False)
self.__dict__['children'] = []
self.__dict__['_fixed'] = False
self.__dict__['_dir_mode'] = dir_mode # Either 'default' or 'user'
fixed_error = 'No attribute %r in this AttrTree, and none can be added because fixed=True'
self.__dict__['_fixed_error'] = fixed_error
self.__dict__['data'] = OrderedDict()
items = items.items() if isinstance(items, OrderedDict) else items
# Python 3
items = list(items) if items else items
items = [] if not items else items
for path, item in items:
self.set_path(path, item)
@property
def path(self):
"Returns the path up to the root for the current node."
if self.parent:
return '.'.join([self.parent.path, str(self.identifier)])
else:
return self.identifier if self.identifier else self.__class__.__name__
@property
def fixed(self):
"If fixed, no new paths can be created via attribute access"
return self.__dict__['_fixed']
@fixed.setter
def fixed(self, val):
self.__dict__['_fixed'] = val
def update(self, other):
"""
Updated the contents of the current AttrTree with the
contents of a second AttrTree.
"""
if not isinstance(other, AttrTree):
raise Exception('Can only update with another AttrTree type.')
fixed_status = (self.fixed, other.fixed)
(self.fixed, other.fixed) = (False, False)
for identifier, element in other.items():
if identifier not in self.data:
self[identifier] = element
else:
self[identifier].update(element)
(self.fixed, other.fixed) = fixed_status
def set_path(self, path, val):
"""
Set the given value at the supplied path where path is either
a tuple of strings or a string in A.B.C format.
"""
path = tuple(path.split('.')) if isinstance(path , str) else tuple(path)
disallowed = [p for p in path if not type(self)._sanitizer.allowable(p)]
if any(disallowed):
raise Exception("Attribute strings in path elements cannot be "
"correctly escaped : %s" % ','.join(repr(el) for el in disallowed))
if len(path) > 1:
attrtree = self.__getattr__(path[0])
attrtree.set_path(path[1:], val)
else:
self.__setattr__(path[0], val)
def filter(self, path_filters):
"""
Filters the loaded AttrTree using the supplied path_filters.
"""
if not path_filters: return self
# Convert string path filters
path_filters = [tuple(pf.split('.')) if not isinstance(pf, tuple)
else pf for pf in path_filters]
# Search for substring matches between paths and path filters
new_attrtree = self.__class__()
for path, item in self.data.items():
if any([all([subpath in path for subpath in pf]) for pf in path_filters]):
new_attrtree.set_path(path, item)
return new_attrtree
def _propagate(self, path, val):
"""
Propagate the value up to the root node.
"""
self.data[path] = val
if self.parent is not None:
self.parent._propagate((self.identifier,)+path, val)
def __setitem__(self, identifier, val):
"""
Set a value at a child node with given identifier. If at a root
node, multi-level path specifications is allowed (i.e. 'A.B.C'
format or tuple format) in which case the behaviour matches
that of set_path.
"""
if isinstance(identifier, str) and '.' not in identifier:
self.__setattr__(identifier, val)
elif isinstance(identifier, str) and self.parent is None:
self.set_path(tuple(identifier.split('.')), val)
elif isinstance(identifier, tuple) and self.parent is None:
self.set_path(identifier, val)
else:
raise Exception("Multi-level item setting only allowed from root node.")
def __getitem__(self, identifier):
"""
For a given non-root node, access a child element by identifier.
If the node is a root node, you may also access elements using
either tuple format or the 'A.B.C' string format.
"""
split_label = (tuple(identifier.split('.'))
if isinstance(identifier, str) else tuple(identifier))
if len(split_label) == 1:
identifier = split_label[0]
if identifier in self.children:
return self.__dict__[identifier]
else:
raise KeyError(identifier)
path_item = self
for identifier in split_label:
path_item = path_item[identifier]
return path_item
def __setattr__(self, identifier, val):
# Getattr is skipped for root and first set of children
shallow = (self.parent is None or self.parent.parent is None)
if identifier[0].isupper() and self.fixed and shallow:
raise AttributeError(self._fixed_error % identifier)
super(AttrTree, self).__setattr__(identifier, val)
if identifier[0].isupper():
if not identifier in self.children:
self.children.append(identifier)
self._propagate((identifier,), val)
def __getattr__(self, identifier):
"""
Access a identifier from the AttrTree or generate a new AttrTree
with the chosen attribute path.
"""
try:
return super(AttrTree, self).__getattr__(identifier)
except AttributeError: pass
# Attributes starting with __ get name mangled
if identifier.startswith('_' + type(self).__name__) or identifier.startswith('__'):
raise AttributeError('Attribute %s not found.' % identifier)
elif self.fixed==True:
raise AttributeError(self._fixed_error % identifier)
if not any(identifier.startswith(prefix)
for prefix in type(self)._disabled_prefixes):
identifier = type(self)._sanitizer(identifier, escape=False)
if identifier in self.children:
return self.__dict__[identifier]
if not identifier.startswith('_'):
self.children.append(identifier)
dir_mode = self.__dict__['_dir_mode']
child_tree = self.__class__(identifier=identifier,
parent=self, dir_mode=dir_mode)
self.__dict__[identifier] = child_tree
return child_tree
else:
raise AttributeError
def __iter__(self):
return iter(self.data.values())
def __contains__(self, name):
return name in self.children or name in self.data
def __len__(self):
return len(self.data)
def get(self, identifier, default=None):
split_label = (tuple(identifier.split('.'))
if isinstance(identifier, str) else tuple(identifier))
if len(split_label) == 1:
identifier = split_label[0]
return self.__dict__.get(identifier, default)
path_item = self
for identifier in split_label:
if path_item == default or path_item is None:
return default
path_item = path_item.get(identifier, default)
return path_item
def keys(self):
return list(self.data.keys())
def items(self):
return list(self.data.items())
def values(self):
return list(self.data.values())
def pop(self, identifier, default=None):
if identifier in self.children:
item = self[identifier]
self.__delitem__(identifier)
return item
else:
return default
def __repr__(self):
return PrettyPrinter.pprint(self)
__all__ = ['AttrTree']
|
py | 1a31f3ca0f9e50a075ac5d7641497f49ff34f21c | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
import google.api_core.operations_v1
from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc
class PredictionServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.automl.v1beta1 PredictionService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self, channel=None, credentials=None, address="automl.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(address=address, credentials=credentials)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"prediction_service_stub": prediction_service_pb2_grpc.PredictionServiceStub(
channel
)
}
# Because this API includes a method that returns a
# long-running operation (proto: google.longrunning.Operation),
# instantiate an LRO client.
self._operations_client = google.api_core.operations_v1.OperationsClient(
channel
)
@classmethod
def create_channel(cls, address="automl.googleapis.com:443", credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def predict(self):
"""Return the gRPC stub for :meth:`PredictionServiceClient.predict`.
Perform an online prediction. The prediction result will be directly
returned in the response. Available for following ML problems, and their
expected request payloads:
- Image Classification - Image in .JPEG, .GIF or .PNG format,
image\_bytes up to 30MB.
- Image Object Detection - Image in .JPEG, .GIF or .PNG format,
image\_bytes up to 30MB.
- Text Classification - TextSnippet, content up to 10,000 characters,
UTF-8 encoded.
- Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8
NFC encoded. \* Translation - TextSnippet, content up to 25,000
characters, UTF-8 encoded.
- Tables - Row, with column values matching the columns of the model,
up to 5MB.
- Text Sentiment - TextSnippet, content up 500 characters, UTF-8
encoded.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["prediction_service_stub"].Predict
@property
def batch_predict(self):
"""Return the gRPC stub for :meth:`PredictionServiceClient.batch_predict`.
Perform a batch prediction. Unlike the online ``Predict``, batch
prediction result won't be immediately available in the response.
Instead, a long running operation object is returned. User can poll the
operation result via ``GetOperation`` method. Once the operation is
done, ``BatchPredictResult`` is returned in the ``response`` field.
Available for following ML problems:
- Video Classification
- Text Extraction
- Tables
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["prediction_service_stub"].BatchPredict
|
py | 1a31f4fd7c5bd0620b2da2778354a47e7f77a5c8 | #!/usr/bin/env python
"""Execute the tests for sam2matrix.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print('Executing test for sam2matrix')
print('=============================')
print()
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/sam2matrix/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'apps/sam2matrix', 'sam2matrix')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# ============================================================
# First Section.
# ============================================================
# App TestConf objects to conf_list, just like this for each
# test you want to run.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('out.stdout'),
args=['-m', ph.inFile('ecoli.sam'), '-m', ph.inFile('ehec.sam'), '-r',
ph.inFile('ecoli_0.50_ehec_0.50.fq'), '-rf', 'ecoli.fa', '-rf',
'ehec.fa', '-o', ph.outFile('test_sam2matrix.tsv')],
to_diff=[(ph.inFile('out.stdout'),
ph.outFile('out.stdout')),
(ph.inFile('gold.tsv'),
ph.outFile('test_sam2matrix.tsv'))])
conf_list.append(conf)
# ============================================================
# Execute the tests.
# ============================================================
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print(' '.join(['sam2matrix'] + conf.args), end=' ')
if res:
print('OK')
else:
failures += 1
print('FAILED')
# Cleanup.
ph.deleteTempDir()
print('==============================')
print(' total tests: %d' % len(conf_list))
print(' failed tests: %d' % failures)
print('successful tests: %d' % (len(conf_list) - failures))
print('==============================')
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
|
py | 1a31f51be3afef99b39f5463a3d4d4e61775dc9d |
badList = [7,8,4,6,2,3,4,1];
def bubble_sort(data):
n = len(data)
swapped_elements = True
while swapped_elements == True:
swapped_elements = False
for j in range(0, n-1):
if data[j] > data[j+1]:
swapped_elements = True
data[j],data[j+1] = data[j+1],data[j]
return data
goodList = bubble_sort(badList)
print goodList
|
py | 1a31f573342cacc24d9b35048e87718bbdae0464 | class Solution:
# @param num, a list of integer
# @return a list of lists of integers
def permuteUnique(self, num):
res = [num]
temp = self.nextPermu(num[:])
while temp != num:
res.append(temp)
temp = self.nextPermu(temp[:])
return res
def nextPermu(self, num):
s = 0
e = len(num) - 2
while e >= s:
if num[e+1] <= num[e]: e -= 1
else: break
if e == -1:
return num[::-1]
if e == len(num) - 2:
return num[:-2] + [num[-1],num[-2]]
t = e + 1
while t < len(num)-1:
if num[t]>num[e]>=num[t+1]:
break
t+=1
temp = num[t]
num[t] = num[e]
num[e] = temp
return num[:e+1]+num[e+1:][::-1]
|
py | 1a31f58dddc5609d7e6c2e37e2ac8c0a148d0c3f | """ Tests for AttMap. """
import itertools
import os
import pickle
import numpy as np
import pytest
import yaml
from attmap import AttMap, AttMapEcho
__author__ = "Vince Reuter"
__email__ = "[email protected]"
# Provide some basic atomic-type data for models tests.
_BASE_KEYS = ("epigenomics", "H3K", "ac", "EWS", "FLI1")
_BASE_VALUES = \
("topic", "residue", "acetylation", "RNA binding protein", "FLI1")
_ENTRIES_PROVISION_MODES = ["gen", "dict", "zip", "list", "items"]
_SEASON_HIERARCHY = {
"spring": {"February": 28, "March": 31, "April": 30, "May": 31},
"summer": {"June": 30, "July": 31, "August": 31},
"fall": {"September": 30, "October": 31, "November": 30},
"winter": {"December": 31, "January": 31}
}
ADDITIONAL_NON_NESTED = {"West Complex": {"CPHG": 6}, "BIG": {"MR-4": 6}}
ADDITIONAL_NESTED = {"JPA": {"West Complex": {"CPHG": 6}},
"Lane": {"BIG": {"MR-4": 6}}}
ADDITIONAL_VALUES_BY_NESTING = {
False: ADDITIONAL_NON_NESTED,
True: ADDITIONAL_NESTED
}
COMPARISON_FUNCTIONS = ["__eq__", "__ne__", "__len__",
"keys", "values", "items"]
def pytest_generate_tests(metafunc):
""" Centralize dynamic test case parameterization. """
if "empty_collection" in metafunc.fixturenames:
# Test case strives to validate expected behavior on empty container.
collection_types = [tuple, list, set, dict]
metafunc.parametrize(
"empty_collection",
argvalues=[ctype() for ctype in collection_types],
ids=[ctype.__name__ for ctype in collection_types])
def basic_entries():
""" AttMap data that lack nested structure. """
for k, v in zip(_BASE_KEYS, _BASE_VALUES):
yield k, v
def nested_entries():
""" AttributeDict data with some nesting going on. """
for k, v in _SEASON_HIERARCHY.items():
yield k, v
@pytest.mark.parametrize("base", ["random", "irrelevant", "arbitrary"])
@pytest.mark.parametrize("protect", [False, True])
def test_echo_is_conditional(base, protect):
""" Protected member isn't echoed. """
m = AttMapEcho({})
if protect:
with pytest.raises(AttributeError):
m.__getattr__("__{}__".format(base))
else:
assert base == m.__getattr__(base)
class AttributeConstructionDictTests:
"""Tests for the AttMap ADT.
Note that the implementation of the equality comparison operator
is tested indirectly via the mechanism of many of the assertion
statements used throughout these test cases. Some test cases are
parameterized by comparison function to test for equivalence, rather
than via input data as is typically the case. This avoids some overhead,
This is to ensure that the implemented `collections.MutableMapping`
or `collections.abc.MutableMapping` methods are valid.
"""
# Refer to tail of class definition for
# data and fixtures specific to this class.
def test_null_construction(self):
""" Null entries value creates empty AttMap. """
assert AttMap({}) == AttMap(None)
def test_empty_construction(self, empty_collection):
""" Empty entries container create empty AttMap. """
m = AttMap(empty_collection)
assert AttMap(None) == m
assert m != dict()
@pytest.mark.parametrize(
argnames="entries_gen,entries_provision_type",
argvalues=itertools.product([basic_entries, nested_entries],
_ENTRIES_PROVISION_MODES),
ids=["{entries}-{mode}".format(entries=gen.__name__, mode=mode)
for gen, mode in
itertools.product([basic_entries, nested_entries],
_ENTRIES_PROVISION_MODES)]
)
def test_construction_modes_supported(
self, entries_gen, entries_provision_type):
""" Construction wants key-value pairs; wrapping doesn't matter. """
entries_mapping = dict(entries_gen())
if entries_provision_type == "dict":
entries = entries_mapping
elif entries_provision_type == "zip":
keys, values = zip(*entries_gen())
entries = zip(keys, values)
elif entries_provision_type == "items":
entries = entries_mapping.items()
elif entries_provision_type == "list":
entries = list(entries_gen())
elif entries_provision_type == "gen":
entries = entries_gen
else:
raise ValueError("Unexpected entries type: {}".
format(entries_provision_type))
expected = AttMap(entries_mapping)
observed = AttMap(entries)
assert expected == observed
@staticmethod
def _validate_mapping_function_implementation(entries_gen, name_comp_func):
data = dict(entries_gen())
attrdict = AttMap(data)
if __name__ == '__main__':
if name_comp_func in ["__eq__", "__ne__"]:
are_equal = getattr(attrdict, name_comp_func).__call__(data)
assert are_equal if name_comp_func == "__eq__" \
else (not are_equal)
else:
raw_dict_comp_func = getattr(data, name_comp_func)
attrdict_comp_func = getattr(attrdict, name_comp_func)
expected = raw_dict_comp_func.__call__()
observed = attrdict_comp_func.__call__()
try:
# Most comparison methods are returning iterables.
assert set(expected) == set(observed)
except TypeError:
# Could be int or other non-iterable that we're comparing.
assert expected == observed
class AttMapUpdateTests:
"""Validate behavior of post-construction addition of entries.
Though entries may and often will be provided at instantiation time,
AttMap is motivated to support inheritance by domain-specific
data types for which use cases are likely to be unable to provide
all relevant data at construction time. So let's verify that we get the
expected behavior when entries are added after initial construction.
"""
_TOTALLY_ARBITRARY_VALUES = [
"abc", 123,
(4, "text", ("nes", "ted")), list("-101")
]
_GETTERS = ["__getattr__", "__getitem__"]
_SETTERS = ["__setattr__", "__setitem__"]
@pytest.mark.parametrize(
argnames="setter_name,getter_name,is_novel",
argvalues=itertools.product(_SETTERS, _GETTERS, (False, True)))
def test_set_get_atomic(self, setter_name, getter_name, is_novel):
""" For new and existing items, validate set/get behavior. """
# Establish the AttMap for the test case.
data = dict(basic_entries())
ad = AttMap(basic_entries())
# Establish a ground truth and select name/value(s) based on
# whether or not the test case wants to test a new or existing item.
if is_novel:
item_name = "awesome_novel_attribute"
assert item_name not in ad
with pytest.raises(AttributeError):
getattr(ad, item_name)
item_values = self._TOTALLY_ARBITRARY_VALUES
else:
item_name = np.random.choice(a=list(data.keys()), size=1)[0]
item_value = data[item_name]
assert ad[item_name] == item_value
assert getattr(ad, item_name) == item_value
item_values = [item_value]
# Determine which functions to use to make the set/get calls.
setter = getattr(ad, setter_name)
getter = getattr(ad, getter_name)
# Validate set/get for each value.
for value in item_values:
setter(item_name, value)
assert getter(item_name) == value
class AttMapCollisionTests:
""" Tests for proper merging and type conversion of mappings.
AttMap converts a mapping being inserted as a value to an
AttMap. """
@pytest.mark.parametrize(
argnames="name_update_func",
argvalues=["add_entries", "__setattr__", "__setitem__"])
def test_squash_existing(self, name_update_func):
""" When a value that's a mapping is assigned to existing key with
non-mapping value, the new value overwrites the old. """
ad = AttMap({"MR": 4})
assert 4 == ad.MR
assert 4 == ad["MR"]
new_value = [4, 5, 6]
args = ("MR", new_value)
setter = getattr(ad, name_update_func)
if name_update_func == "add_entries":
setter([args])
else:
setter(*args)
assert new_value == ad.MR
assert new_value == ad["MR"]
@pytest.mark.parametrize(
argnames="name_update_func",
argvalues=["add_entries", "__setattr__", "__setitem__"])
@pytest.mark.parametrize(
argnames="name_fetch_func",
argvalues=["__getattr__", "__getitem__"])
class AttMapNullTests:
""" AttMap has configurable behavior regarding null values. """
def test_new_null(self, name_update_func, name_fetch_func):
""" When a key/item, isn't known, null is allowed. """
ad = AttMap()
setter = getattr(ad, name_update_func)
args = ("new_key", None)
self._do_update(name_update_func, setter, args)
getter = getattr(ad, name_fetch_func)
assert getter("new_key") is None
def test_replace_null(self, name_update_func, name_fetch_func):
""" Null can be replaced by non-null. """
ad = AttMap({"lone_attr": None})
assert getattr(ad, name_fetch_func)("lone_attr") is None
setter = getattr(ad, name_update_func)
non_null_value = AttMap({"was_null": "not_now"})
self._do_update(name_update_func, setter,
("lone_attr", non_null_value))
assert non_null_value == getattr(ad, name_fetch_func)("lone_attr")
@staticmethod
def _do_update(name_setter_func, setter_bound_method, args):
if name_setter_func == "add_entries":
setter_bound_method([args])
else:
setter_bound_method(*args)
class AttMapItemAccessTests:
""" Tests for access of items (key- or attribute- style). """
@pytest.mark.parametrize(argnames="missing", argvalues=["att", ""])
def test_missing_getattr(self, missing):
attrd = AttMap()
with pytest.raises(AttributeError):
getattr(attrd, missing)
@pytest.mark.parametrize(argnames="missing", argvalues=["", "b", "missing"])
def test_missing_getitem(self, missing):
attrd = AttMap()
with pytest.raises(KeyError):
attrd[missing]
def test_numeric_key(self):
""" Attribute request must be string. """
ad = AttMap({1: 'a'})
assert 'a' == ad[1]
with pytest.raises(TypeError):
getattr(ad, 1)
class AttMapSerializationTests:
""" Tests for AttMap serialization. """
DATA_PAIRS = [('a', 1), ('b', False), ('c', range(5)),
('d', {'A': None, 'T': []}),
('e', AttMap({'G': 1, 'C': [False, None]})),
('f', [AttMap({"DNA": "deoxyribose", "RNA": "ribose"}),
AttMap({"DNA": "thymine", "RNA": "uracil"})])]
@pytest.mark.parametrize(
argnames="data",
argvalues=itertools.combinations(DATA_PAIRS, 2),
ids=lambda data: " data = {}".format(str(data)))
@pytest.mark.parametrize(
argnames="data_type", argvalues=[list, dict],
ids=lambda data_type: " data_type = {}".format(data_type))
def test_pickle_restoration(self, tmpdir, data, data_type):
""" Pickled and restored AttMap objects are identical. """
# Type the AttMap input data argument according to parameter.
data = data_type(data)
original_attrdict = AttMap(data)
filename = "attrdict-test.pkl"
# Allow either Path or raw string.
try:
dirpath = tmpdir.strpath
except AttributeError:
dirpath = tmpdir
# Serialize AttMap and write to disk.
filepath = os.path.join(dirpath, filename)
with open(filepath, 'wb') as pkl:
pickle.dump(original_attrdict, pkl)
# Validate equivalence between original and restored versions.
with open(filepath, 'rb') as pkl:
restored_attrdict = AttMap(pickle.load(pkl))
assert restored_attrdict == original_attrdict
class AttMapObjectSyntaxAccessTests:
""" Test behavior of dot attribute access / identity setting. """
DEFAULT_VALUE = "totally-arbitrary"
NORMAL_ITEM_ARG_VALUES = \
["__getattr__", "__getitem__", "__dict__", "__repr__", "__str__"]
PICKLE_ITEM_ARG_VALUES = ["__getstate__", "__setstate__"]
ATTR_DICT_DATA = {"a": 0, "b": range(1, 3), "c": {"CO": 70, "WA": 5}}
UNMAPPED = ["arb-att-1", "random-attribute-2"]
@pytest.fixture(scope="function")
def attrdict(self, request):
""" Provide a test case with an AttMap. """
d = self.ATTR_DICT_DATA
return AttMapEcho(d) if request.getfixturevalue("return_identity") \
else AttMap(d)
@pytest.mark.parametrize(
argnames="return_identity", argvalues=[False, True],
ids=lambda ret_id: " identity setting={} ".format(ret_id))
@pytest.mark.parametrize(
argnames="attr_to_request",
argvalues=NORMAL_ITEM_ARG_VALUES + PICKLE_ITEM_ARG_VALUES +
UNMAPPED + list(ATTR_DICT_DATA.keys()),
ids=lambda attr: " requested={} ".format(attr))
def test_attribute_access(
self, return_identity, attr_to_request, attrdict):
""" Access behavior depends on request and behavior toggle. """
if attr_to_request == "__dict__":
# The underlying mapping is still accessible.
assert attrdict.__dict__ is getattr(attrdict, "__dict__")
elif attr_to_request in self.NORMAL_ITEM_ARG_VALUES:
# Request for common protected function returns the function.
assert callable(getattr(attrdict, attr_to_request))
elif attr_to_request in self.PICKLE_ITEM_ARG_VALUES:
# We don't tinker with the pickle-relevant attributes.
with pytest.raises(AttributeError):
print("Should have failed, but got result: {}".
format(getattr(attrdict, attr_to_request)))
elif attr_to_request in self.UNMAPPED:
# Unmapped request behavior depends on parameterization.
if return_identity:
assert attr_to_request == getattr(attrdict, attr_to_request)
else:
with pytest.raises(AttributeError):
getattr(attrdict, attr_to_request)
else:
# A mapped attribute returns its known value.
expected = self.ATTR_DICT_DATA[attr_to_request]
if isinstance(expected, dict):
expected = type(attrdict)(expected)
observed = getattr(attrdict, attr_to_request)
print("AD (below):\n{}".format(attrdict))
assert expected == observed
class NullityTests:
""" Tests of null/non-null values """
_KEYNAMES = ["sample_name", "protocol", "arbitrary_attribute"]
@pytest.mark.parametrize(argnames="item", argvalues=_KEYNAMES)
def test_missing_is_neither_null_nor_non_null(self, item):
""" Value of absent key is neither null nor non-null """
ad = AttMap()
assert not ad.is_null(item) and not ad.non_null(item)
@pytest.mark.parametrize(argnames="item", argvalues=_KEYNAMES)
def test_is_null(self, item):
""" Null-valued key/item evaluates as such. """
ad = AttMap()
ad[item] = None
assert ad.is_null(item) and not ad.non_null(item)
@pytest.mark.parametrize(
argnames=["k", "v"],
argvalues=list(zip(_KEYNAMES, ["sampleA", "WGBS", "random"])))
def test_non_null(self, k, v):
""" AD is sensitive to value updates """
ad = AttMap()
assert not ad.is_null(k) and not ad.non_null(k)
ad[k] = None
assert ad.is_null(k) and not ad.non_null(k)
ad[k] = v
assert not ad.is_null(k) and ad.non_null(k)
@pytest.mark.usefixtures("write_project_files")
class SampleYamlTests:
""" AttMap metadata only appear in YAML if non-default. """
@staticmethod
def _yaml_data(sample, filepath, section_to_change=None,
attr_to_change=None, newval=None):
"""
Serialize a Sample, possibly tweaking it first, write, and parse.
:param models.Sample sample: what to serialize and write
:param str filepath: where to write the data
:param str section_to_change: name of section
in which to change attribute
:param str attr_to_change: name of attribute to change
:param object newval: value to set for targeted attribute
:return (Iterable[str], dict): raw lines and parsed version (YAML)
"""
if section_to_change:
getattr(sample, section_to_change)[attr_to_change] = newval
sample.to_yaml(filepath)
with open(filepath, 'r') as f:
data = yaml.safe_load(f)
with open(filepath, 'r') as f:
lines = f.readlines()
return lines, data
@pytest.mark.parametrize(
["func", "exp"], [(repr, "AttMap: {}"), (str, "AttMap: {}")])
def test_text_repr_empty(func, exp):
""" Empty AttMap is correctly represented as text. """
assert exp == func(AttMap())
|
py | 1a31f67ee1b408a01f26481df6bd625a5e442c91 | # Copyright (c) 2020 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .core import (
require_version as _require_version,
int2bytes,
bytes2int,
Version,
Tlv,
AID,
CommandError,
NotSupportedError,
BadResponseError,
)
from .core.smartcard import (
SmartCardConnection,
SmartCardProtocol,
ApduError,
SW,
ApduFormat,
)
from cryptography import x509
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.constant_time import bytes_eq
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
from cryptography.hazmat.primitives.asymmetric import rsa, ec
from cryptography.hazmat.primitives.asymmetric.padding import AsymmetricPadding
from cryptography.hazmat.backends import default_backend
from dataclasses import dataclass
from enum import Enum, IntEnum, unique
from typing import Optional, Union, Type, cast
import logging
import os
import re
logger = logging.getLogger(__name__)
@unique
class ALGORITHM(str, Enum):
EC = "ec"
RSA = "rsa"
# Don't treat pre 1.0 versions as "developer builds".
def require_version(my_version: Version, *args, **kwargs):
if my_version <= (0, 1, 3): # Last pre 1.0 release of ykneo-piv
my_version = Version(1, 0, 0)
_require_version(my_version, *args, **kwargs)
@unique
class KEY_TYPE(IntEnum):
RSA1024 = 0x06
RSA2048 = 0x07
ECCP256 = 0x11
ECCP384 = 0x14
@property
def algorithm(self):
return ALGORITHM.EC if self.name.startswith("ECC") else ALGORITHM.RSA
@property
def bit_len(self):
match = re.search(r"\d+$", self.name)
if match:
return int(match.group())
raise ValueError("No bit_len")
@classmethod
def from_public_key(cls, key):
if isinstance(key, rsa.RSAPublicKey):
try:
return getattr(cls, "RSA%d" % key.key_size)
except AttributeError:
raise ValueError("Unsupported RSA key size: %d" % key.key_size)
pass # Fall through to ValueError
elif isinstance(key, ec.EllipticCurvePublicKey):
curve_name = key.curve.name
if curve_name == "secp256r1":
return cls.ECCP256
elif curve_name == "secp384r1":
return cls.ECCP384
raise ValueError(f"Unsupported EC curve: {curve_name}")
raise ValueError(f"Unsupported key type: {type(key).__name__}")
@unique
class MANAGEMENT_KEY_TYPE(IntEnum):
TDES = 0x03
AES128 = 0x08
AES192 = 0x0A
AES256 = 0x0C
@property
def key_len(self):
if self.name == "TDES":
return 24
# AES
return int(self.name[3:]) // 8
@property
def challenge_len(self):
if self.name == "TDES":
return 8
return 16
def _parse_management_key(key_type, management_key):
if key_type == MANAGEMENT_KEY_TYPE.TDES:
return algorithms.TripleDES(management_key)
else:
return algorithms.AES(management_key)
# The card management slot is special, we don't include it in SLOT below
SLOT_CARD_MANAGEMENT = 0x9B
@unique
class SLOT(IntEnum):
AUTHENTICATION = 0x9A
SIGNATURE = 0x9C
KEY_MANAGEMENT = 0x9D
CARD_AUTH = 0x9E
RETIRED1 = 0x82
RETIRED2 = 0x83
RETIRED3 = 0x84
RETIRED4 = 0x85
RETIRED5 = 0x86
RETIRED6 = 0x87
RETIRED7 = 0x88
RETIRED8 = 0x89
RETIRED9 = 0x8A
RETIRED10 = 0x8B
RETIRED11 = 0x8C
RETIRED12 = 0x8D
RETIRED13 = 0x8E
RETIRED14 = 0x8F
RETIRED15 = 0x90
RETIRED16 = 0x91
RETIRED17 = 0x92
RETIRED18 = 0x93
RETIRED19 = 0x94
RETIRED20 = 0x95
ATTESTATION = 0xF9
@unique
class OBJECT_ID(IntEnum):
CAPABILITY = 0x5FC107
CHUID = 0x5FC102
AUTHENTICATION = 0x5FC105 # cert for 9a key
FINGERPRINTS = 0x5FC103
SECURITY = 0x5FC106
FACIAL = 0x5FC108
PRINTED = 0x5FC109
SIGNATURE = 0x5FC10A # cert for 9c key
KEY_MANAGEMENT = 0x5FC10B # cert for 9d key
CARD_AUTH = 0x5FC101 # cert for 9e key
DISCOVERY = 0x7E
KEY_HISTORY = 0x5FC10C
IRIS = 0x5FC121
RETIRED1 = 0x5FC10D
RETIRED2 = 0x5FC10E
RETIRED3 = 0x5FC10F
RETIRED4 = 0x5FC110
RETIRED5 = 0x5FC111
RETIRED6 = 0x5FC112
RETIRED7 = 0x5FC113
RETIRED8 = 0x5FC114
RETIRED9 = 0x5FC115
RETIRED10 = 0x5FC116
RETIRED11 = 0x5FC117
RETIRED12 = 0x5FC118
RETIRED13 = 0x5FC119
RETIRED14 = 0x5FC11A
RETIRED15 = 0x5FC11B
RETIRED16 = 0x5FC11C
RETIRED17 = 0x5FC11D
RETIRED18 = 0x5FC11E
RETIRED19 = 0x5FC11F
RETIRED20 = 0x5FC120
ATTESTATION = 0x5FFF01
@classmethod
def from_slot(cls, slot):
return getattr(cls, SLOT(slot).name)
@unique
class PIN_POLICY(IntEnum):
DEFAULT = 0x0
NEVER = 0x1
ONCE = 0x2
ALWAYS = 0x3
@unique
class TOUCH_POLICY(IntEnum):
DEFAULT = 0x0
NEVER = 0x1
ALWAYS = 0x2
CACHED = 0x3
# 010203040506070801020304050607080102030405060708
DEFAULT_MANAGEMENT_KEY = (
b"\x01\x02\x03\x04\x05\x06\x07\x08"
+ b"\x01\x02\x03\x04\x05\x06\x07\x08"
+ b"\x01\x02\x03\x04\x05\x06\x07\x08"
)
PIN_LEN = 8
# Instruction set
INS_VERIFY = 0x20
INS_CHANGE_REFERENCE = 0x24
INS_RESET_RETRY = 0x2C
INS_GENERATE_ASYMMETRIC = 0x47
INS_AUTHENTICATE = 0x87
INS_GET_DATA = 0xCB
INS_PUT_DATA = 0xDB
INS_GET_METADATA = 0xF7
INS_ATTEST = 0xF9
INS_SET_PIN_RETRIES = 0xFA
INS_RESET = 0xFB
INS_GET_VERSION = 0xFD
INS_IMPORT_KEY = 0xFE
INS_SET_MGMKEY = 0xFF
# Tags for parsing responses and preparing requests
TAG_AUTH_WITNESS = 0x80
TAG_AUTH_CHALLENGE = 0x81
TAG_AUTH_RESPONSE = 0x82
TAG_AUTH_EXPONENTIATION = 0x85
TAG_GEN_ALGORITHM = 0x80
TAG_OBJ_DATA = 0x53
TAG_OBJ_ID = 0x5C
TAG_CERTIFICATE = 0x70
TAG_CERT_INFO = 0x71
TAG_DYN_AUTH = 0x7C
TAG_LRC = 0xFE
TAG_PIN_POLICY = 0xAA
TAG_TOUCH_POLICY = 0xAB
# Metadata tags
TAG_METADATA_ALGO = 0x01
TAG_METADATA_POLICY = 0x02
TAG_METADATA_ORIGIN = 0x03
TAG_METADATA_PUBLIC_KEY = 0x04
TAG_METADATA_IS_DEFAULT = 0x05
TAG_METADATA_RETRIES = 0x06
ORIGIN_GENERATED = 1
ORIGIN_IMPORTED = 2
INDEX_PIN_POLICY = 0
INDEX_TOUCH_POLICY = 1
INDEX_RETRIES_TOTAL = 0
INDEX_RETRIES_REMAINING = 1
PIN_P2 = 0x80
PUK_P2 = 0x81
class InvalidPinError(CommandError):
def __init__(self, attempts_remaining):
super(InvalidPinError, self).__init__(
"Invalid PIN/PUK. Remaining attempts: %d" % attempts_remaining
)
self.attempts_remaining = attempts_remaining
def _pin_bytes(pin):
pin = pin.encode()
if len(pin) > PIN_LEN:
raise ValueError("PIN/PUK must be no longer than 8 bytes")
return pin.ljust(PIN_LEN, b"\xff")
def _retries_from_sw(version, sw):
if sw == SW.AUTH_METHOD_BLOCKED:
return 0
if version < (1, 0, 4):
if 0x6300 <= sw <= 0x63FF:
return sw & 0xFF
else:
if 0x63C0 <= sw <= 0x63CF:
return sw & 0x0F
return None
@dataclass
class PinMetadata:
default_value: bool
total_attempts: int
attempts_remaining: int
@dataclass
class ManagementKeyMetadata:
key_type: MANAGEMENT_KEY_TYPE
default_value: bool
touch_policy: TOUCH_POLICY
@dataclass
class SlotMetadata:
key_type: KEY_TYPE
pin_policy: PIN_POLICY
touch_policy: TOUCH_POLICY
generated: bool
public_key_encoded: bytes
@property
def public_key(self):
return _parse_device_public_key(self.key_type, self.public_key_encoded)
def _pad_message(key_type, message, hash_algorithm, padding):
if key_type.algorithm == ALGORITHM.EC:
h = hashes.Hash(hash_algorithm, default_backend())
h.update(message)
hashed = h.finalize()
byte_len = key_type.bit_len // 8
if len(hashed) < byte_len:
return hashed.rjust(byte_len // 8, b"\0")
return hashed[:byte_len]
elif key_type.algorithm == ALGORITHM.RSA:
# Sign with a dummy key, then encrypt the signature to get the padded message
e = 65537
dummy = rsa.generate_private_key(e, key_type.bit_len, default_backend())
signature = dummy.sign(message, padding, hash_algorithm)
# Raw (textbook) RSA encrypt
n = dummy.public_key().public_numbers().n
return int2bytes(pow(bytes2int(signature), e, n), key_type.bit_len // 8)
def _unpad_message(padded, padding):
e = 65537
dummy = rsa.generate_private_key(e, len(padded) * 8, default_backend())
# Raw (textbook) RSA encrypt
n = dummy.public_key().public_numbers().n
encrypted = int2bytes(pow(bytes2int(padded), e, n), len(padded))
return dummy.decrypt(encrypted, padding)
def check_key_support(
version: Version,
key_type: KEY_TYPE,
pin_policy: PIN_POLICY,
touch_policy: TOUCH_POLICY,
generate: bool = True,
) -> None:
"""Check if a key type is supported by a specific YubiKey firmware version.
This method will return None if the key (with PIN and touch policies) is supported,
or it will raise a NotSupportedError if it is not.
"""
if version[0] == 0 and version > (0, 1, 3):
return # Development build, skip version checks
if version < (4, 0, 0):
if key_type == KEY_TYPE.ECCP384:
raise NotSupportedError("ECCP384 requires YubiKey 4 or later")
if touch_policy != TOUCH_POLICY.DEFAULT or pin_policy != PIN_POLICY.DEFAULT:
raise NotSupportedError("PIN/Touch policy requires YubiKey 4 or later")
if version < (4, 3, 0) and touch_policy == TOUCH_POLICY.CACHED:
raise NotSupportedError("Cached touch policy requires YubiKey 4.3 or later")
# ROCA
if (4, 2, 0) <= version < (4, 3, 5):
if generate and key_type.algorithm == ALGORITHM.RSA:
raise NotSupportedError("RSA key generation not supported on this YubiKey")
# FIPS
if (4, 4, 0) <= version < (4, 5, 0):
if key_type == KEY_TYPE.RSA1024:
raise NotSupportedError("RSA 1024 not supported on YubiKey FIPS")
if pin_policy == PIN_POLICY.NEVER:
raise NotSupportedError("PIN_POLICY.NEVER not allowed on YubiKey FIPS")
def _parse_device_public_key(key_type, encoded):
data = Tlv.parse_dict(encoded)
if key_type.algorithm == ALGORITHM.RSA:
modulus = bytes2int(data[0x81])
exponent = bytes2int(data[0x82])
return rsa.RSAPublicNumbers(exponent, modulus).public_key(default_backend())
else:
if key_type == KEY_TYPE.ECCP256:
curve: Type[ec.EllipticCurve] = ec.SECP256R1
else:
curve = ec.SECP384R1
try:
# Added in cryptography 2.5
return ec.EllipticCurvePublicKey.from_encoded_point(curve(), data[0x86])
except AttributeError:
return ec.EllipticCurvePublicNumbers.from_encoded_point(
curve(), data[0x86]
).public_key(default_backend())
class PivSession:
def __init__(self, connection: SmartCardConnection):
self.protocol = SmartCardProtocol(connection)
self.protocol.select(AID.PIV)
self._version = Version.from_bytes(
self.protocol.send_apdu(0, INS_GET_VERSION, 0, 0)
)
self.protocol.enable_touch_workaround(self.version)
if self.version >= (4, 0, 0):
self.protocol.apdu_format = ApduFormat.EXTENDED
self._current_pin_retries = 3
self._max_pin_retries = 3
@property
def version(self) -> Version:
return self._version
def reset(self) -> None:
# Block PIN
counter = self.get_pin_attempts()
while counter > 0:
try:
self.verify_pin("")
except InvalidPinError as e:
counter = e.attempts_remaining
# Block PUK
counter = 1
while counter > 0:
try:
self._change_reference(INS_RESET_RETRY, PIN_P2, "", "")
except InvalidPinError as e:
counter = e.attempts_remaining
# Reset
self.protocol.send_apdu(0, INS_RESET, 0, 0)
self._current_pin_retries = 3
self._max_pin_retries = 3
def authenticate(
self, key_type: MANAGEMENT_KEY_TYPE, management_key: bytes
) -> None:
key_type = MANAGEMENT_KEY_TYPE(key_type)
response = self.protocol.send_apdu(
0,
INS_AUTHENTICATE,
key_type,
SLOT_CARD_MANAGEMENT,
Tlv(TAG_DYN_AUTH, Tlv(TAG_AUTH_WITNESS)),
)
witness = Tlv.unpack(TAG_AUTH_WITNESS, Tlv.unpack(TAG_DYN_AUTH, response))
challenge = os.urandom(key_type.challenge_len)
backend = default_backend()
cipher_key = _parse_management_key(key_type, management_key)
cipher = Cipher(cipher_key, modes.ECB(), backend) # nosec
decryptor = cipher.decryptor()
decrypted = decryptor.update(witness) + decryptor.finalize()
response = self.protocol.send_apdu(
0,
INS_AUTHENTICATE,
key_type,
SLOT_CARD_MANAGEMENT,
Tlv(
TAG_DYN_AUTH,
Tlv(TAG_AUTH_WITNESS, decrypted) + Tlv(TAG_AUTH_CHALLENGE, challenge),
),
)
encrypted = Tlv.unpack(TAG_AUTH_RESPONSE, Tlv.unpack(TAG_DYN_AUTH, response))
encryptor = cipher.encryptor()
expected = encryptor.update(challenge) + encryptor.finalize()
if not bytes_eq(expected, encrypted):
raise BadResponseError("Device response is incorrect")
def set_management_key(
self,
key_type: MANAGEMENT_KEY_TYPE,
management_key: bytes,
require_touch: bool = False,
) -> None:
key_type = MANAGEMENT_KEY_TYPE(key_type)
if key_type != MANAGEMENT_KEY_TYPE.TDES:
require_version(self.version, (5, 4, 0))
if len(management_key) != key_type.key_len:
raise ValueError("Management key must be %d bytes" % key_type.key_len)
self.protocol.send_apdu(
0,
INS_SET_MGMKEY,
0xFF,
0xFE if require_touch else 0xFF,
int2bytes(key_type) + Tlv(SLOT_CARD_MANAGEMENT, management_key),
)
def verify_pin(self, pin: str) -> None:
try:
self.protocol.send_apdu(0, INS_VERIFY, 0, PIN_P2, _pin_bytes(pin))
self._current_pin_retries = self._max_pin_retries
except ApduError as e:
retries = _retries_from_sw(self.version, e.sw)
if retries is None:
raise
self._current_pin_retries = retries
raise InvalidPinError(retries)
def get_pin_attempts(self) -> int:
try:
return self.get_pin_metadata().attempts_remaining
except NotSupportedError:
try:
self.protocol.send_apdu(0, INS_VERIFY, 0, PIN_P2)
# Already verified, no way to know true count
return self._current_pin_retries
except ApduError as e:
retries = _retries_from_sw(self.version, e.sw)
if retries is None:
raise
self._current_pin_retries = retries
return retries
def change_pin(self, old_pin: str, new_pin: str) -> None:
self._change_reference(INS_CHANGE_REFERENCE, PIN_P2, old_pin, new_pin)
def change_puk(self, old_puk: str, new_puk: str) -> None:
self._change_reference(INS_CHANGE_REFERENCE, PUK_P2, old_puk, new_puk)
def unblock_pin(self, puk: str, new_pin: str) -> None:
self._change_reference(INS_RESET_RETRY, PIN_P2, puk, new_pin)
def set_pin_attempts(self, pin_attempts: int, puk_attempts: int) -> None:
self.protocol.send_apdu(0, INS_SET_PIN_RETRIES, pin_attempts, puk_attempts)
self._max_pin_retries = pin_attempts
self._current_pin_retries = pin_attempts
def get_pin_metadata(self) -> PinMetadata:
return self._get_pin_puk_metadata(PIN_P2)
def get_puk_metadata(self) -> PinMetadata:
return self._get_pin_puk_metadata(PUK_P2)
def get_management_key_metadata(self) -> ManagementKeyMetadata:
require_version(self.version, (5, 3, 0))
data = Tlv.parse_dict(
self.protocol.send_apdu(0, INS_GET_METADATA, 0, SLOT_CARD_MANAGEMENT)
)
policy = data[TAG_METADATA_POLICY]
return ManagementKeyMetadata(
MANAGEMENT_KEY_TYPE(data.get(TAG_METADATA_ALGO, b"\x03")[0]),
data[TAG_METADATA_IS_DEFAULT] != b"\0",
TOUCH_POLICY(policy[INDEX_TOUCH_POLICY]),
)
def get_slot_metadata(self, slot: SLOT) -> SlotMetadata:
require_version(self.version, (5, 3, 0))
data = Tlv.parse_dict(self.protocol.send_apdu(0, INS_GET_METADATA, 0, slot))
policy = data[TAG_METADATA_POLICY]
return SlotMetadata(
KEY_TYPE(data[TAG_METADATA_ALGO][0]),
PIN_POLICY(policy[INDEX_PIN_POLICY]),
TOUCH_POLICY(policy[INDEX_TOUCH_POLICY]),
data[TAG_METADATA_ORIGIN][0] == ORIGIN_GENERATED,
data[TAG_METADATA_PUBLIC_KEY],
)
def sign(
self,
slot: SLOT,
key_type: KEY_TYPE,
message: bytes,
hash_algorithm: hashes.HashAlgorithm,
padding: Optional[AsymmetricPadding] = None,
) -> bytes:
key_type = KEY_TYPE(key_type)
padded = _pad_message(key_type, message, hash_algorithm, padding)
return self._use_private_key(slot, key_type, padded, False)
def decrypt(
self, slot: SLOT, cipher_text: bytes, padding: AsymmetricPadding
) -> bytes:
if len(cipher_text) == 1024 // 8:
key_type = KEY_TYPE.RSA1024
elif len(cipher_text) == 2048 // 8:
key_type = KEY_TYPE.RSA2048
else:
raise ValueError("Invalid length of ciphertext")
padded = self._use_private_key(slot, key_type, cipher_text, False)
return _unpad_message(padded, padding)
def calculate_secret(
self, slot: SLOT, peer_public_key: ec.EllipticCurvePublicKey
) -> bytes:
key_type = KEY_TYPE.from_public_key(peer_public_key)
if key_type.algorithm != ALGORITHM.EC:
raise ValueError("Unsupported key type")
data = peer_public_key.public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
)
return self._use_private_key(slot, key_type, data, True)
def get_object(self, object_id: int) -> bytes:
if object_id == OBJECT_ID.DISCOVERY:
expected: int = OBJECT_ID.DISCOVERY
else:
expected = TAG_OBJ_DATA
try:
return Tlv.unpack(
expected,
self.protocol.send_apdu(
0,
INS_GET_DATA,
0x3F,
0xFF,
Tlv(TAG_OBJ_ID, int2bytes(object_id)),
),
)
except ValueError as e:
raise BadResponseError("Malformed object data", e)
def put_object(self, object_id: int, data: Optional[bytes] = None) -> None:
self.protocol.send_apdu(
0,
INS_PUT_DATA,
0x3F,
0xFF,
Tlv(TAG_OBJ_ID, int2bytes(object_id)) + Tlv(TAG_OBJ_DATA, data or b""),
)
def get_certificate(self, slot: SLOT) -> x509.Certificate:
try:
data = Tlv.parse_dict(self.get_object(OBJECT_ID.from_slot(slot)))
except ValueError:
raise BadResponseError("Malformed certificate data object")
cert_info = data.get(TAG_CERT_INFO)
if cert_info and cert_info[0] != 0:
raise NotSupportedError("Compressed certificates are not supported")
try:
return x509.load_der_x509_certificate(
data[TAG_CERTIFICATE], default_backend()
)
except Exception as e:
raise BadResponseError("Invalid certificate", e)
def put_certificate(self, slot: SLOT, certificate: x509.Certificate) -> None:
cert_data = certificate.public_bytes(Encoding.DER)
data = (
Tlv(TAG_CERTIFICATE, cert_data) + Tlv(TAG_CERT_INFO, b"\0") + Tlv(TAG_LRC)
)
self.put_object(OBJECT_ID.from_slot(slot), data)
def delete_certificate(self, slot: SLOT) -> None:
self.put_object(OBJECT_ID.from_slot(slot))
def put_key(
self,
slot: SLOT,
private_key: Union[
rsa.RSAPrivateKeyWithSerialization,
ec.EllipticCurvePrivateKeyWithSerialization,
],
pin_policy: PIN_POLICY = PIN_POLICY.DEFAULT,
touch_policy: TOUCH_POLICY = TOUCH_POLICY.DEFAULT,
) -> None:
key_type = KEY_TYPE.from_public_key(private_key.public_key())
check_key_support(self.version, key_type, pin_policy, touch_policy, False)
ln = key_type.bit_len // 8
numbers = private_key.private_numbers()
if key_type.algorithm == ALGORITHM.RSA:
numbers = cast(rsa.RSAPrivateNumbers, numbers)
if numbers.public_numbers.e != 65537:
raise NotSupportedError("RSA exponent must be 65537")
ln //= 2
data = (
Tlv(0x01, int2bytes(numbers.p, ln))
+ Tlv(0x02, int2bytes(numbers.q, ln))
+ Tlv(0x03, int2bytes(numbers.dmp1, ln))
+ Tlv(0x04, int2bytes(numbers.dmq1, ln))
+ Tlv(0x05, int2bytes(numbers.iqmp, ln))
)
else:
numbers = cast(ec.EllipticCurvePrivateNumbers, numbers)
data = Tlv(0x06, int2bytes(numbers.private_value, ln))
if pin_policy:
data += Tlv(TAG_PIN_POLICY, int2bytes(pin_policy))
if touch_policy:
data += Tlv(TAG_TOUCH_POLICY, int2bytes(touch_policy))
self.protocol.send_apdu(0, INS_IMPORT_KEY, key_type, slot, data)
return key_type
def generate_key(
self,
slot: SLOT,
key_type: KEY_TYPE,
pin_policy: PIN_POLICY = PIN_POLICY.DEFAULT,
touch_policy: TOUCH_POLICY = TOUCH_POLICY.DEFAULT,
) -> Union[rsa.RSAPublicKey, ec.EllipticCurvePublicKey]:
key_type = KEY_TYPE(key_type)
check_key_support(self.version, key_type, pin_policy, touch_policy, True)
data: bytes = Tlv(TAG_GEN_ALGORITHM, int2bytes(key_type))
if pin_policy:
data += Tlv(TAG_PIN_POLICY, int2bytes(pin_policy))
if touch_policy:
data += Tlv(TAG_TOUCH_POLICY, int2bytes(touch_policy))
response = self.protocol.send_apdu(
0, INS_GENERATE_ASYMMETRIC, 0, slot, Tlv(0xAC, data)
)
return _parse_device_public_key(key_type, Tlv.unpack(0x7F49, response))
def attest_key(self, slot: SLOT) -> x509.Certificate:
require_version(self.version, (4, 3, 0))
response = self.protocol.send_apdu(0, INS_ATTEST, slot, 0)
return x509.load_der_x509_certificate(response, default_backend())
def _change_reference(self, ins, p2, value1, value2):
try:
self.protocol.send_apdu(
0, ins, 0, p2, _pin_bytes(value1) + _pin_bytes(value2)
)
except ApduError as e:
retries = _retries_from_sw(self.version, e.sw)
if retries is None:
raise
if p2 == PIN_P2:
self._current_pin_retries = retries
raise InvalidPinError(retries)
def _get_pin_puk_metadata(self, p2):
require_version(self.version, (5, 3, 0))
data = Tlv.parse_dict(self.protocol.send_apdu(0, INS_GET_METADATA, 0, p2))
attempts = data[TAG_METADATA_RETRIES]
return PinMetadata(
data[TAG_METADATA_IS_DEFAULT] != b"\0",
attempts[INDEX_RETRIES_TOTAL],
attempts[INDEX_RETRIES_REMAINING],
)
def _use_private_key(self, slot, key_type, message, exponentiation):
try:
response = self.protocol.send_apdu(
0,
INS_AUTHENTICATE,
key_type,
slot,
Tlv(
TAG_DYN_AUTH,
Tlv(TAG_AUTH_RESPONSE)
+ Tlv(
TAG_AUTH_EXPONENTIATION
if exponentiation
else TAG_AUTH_CHALLENGE,
message,
),
),
)
return Tlv.unpack(
TAG_AUTH_RESPONSE,
Tlv.unpack(
TAG_DYN_AUTH,
response,
),
)
except ApduError as e:
if e.sw == SW.INCORRECT_PARAMETERS:
raise e # TODO: Different error, No key?
raise
|
py | 1a31f759315de1ef2f30dd1619f96c3245741770 | def fib(n):
if n <= 2:
return 1
return fib(n-1) + fib(n-2)
print(fib(30))
|
py | 1a31f794950b5cf873938be74148abbf31363022 | # Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torchvision.models as models
def test():
net = models.resnet18().half().float()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 3, 224, 224)
a = net(x)
# export torchscript
mod = torch.jit.trace(net, x)
mod.save("test_resnet18.pt")
# torchscript to pnnx
import os
os.system("../../src/pnnx test_resnet18.pt inputshape=[1,3,224,224]")
# ncnn inference
import test_resnet18_ncnn
b = test_resnet18_ncnn.test_inference()
return torch.allclose(a, b, 1e-2, 1e-2)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
py | 1a31fa6db50a40751283a6143368157bf0587327 | from pydyn.operations.binary_tree import has_nested_add
from pydyn.operations.geometry import Dot, Cross, Vee, Hat
from pydyn.operations.addition import Add, VAdd, MAdd
from pydyn.operations.multiplication import Mul, SMMul, SVMul, MVMul, VVMul, MMMul
from pydyn.base.matrices import MatrixExpr
from pydyn.base.scalars import ScalarExpr
from pydyn.base.vectors import VectorExpr
from pydyn.utils.errors import UndefinedCaseError
def expand_scalar(expr):
if isinstance(expr, Add):
expanded_expr = Add()
for n in expr.nodes:
expanded_expr += expand(n)
return expanded_expr
elif isinstance(expr, Mul):
if isinstance(expr.left, Add) and isinstance(expr.right, Add):
"""(a+b)(c+d) = ac + ad + bc + bd"""
expanded_expr = Add()
for nl in expr.left.nodes:
for nr in expr.right.nodes:
expanded_expr += expand(nl * nr)
return expanded_expr
elif isinstance(expr.left, Add):
"""(a+b)c = ac + bc"""
expanded_expr = Add()
for n in expr.left.nodes:
expanded_expr += expand(n * expr.right)
return expanded_expr
elif isinstance(expr.right, Add):
"""a(b+c) = ab + ac"""
expanded_expr = Add()
for n in expr.right.nodes:
expanded_expr += expand(expr.left * n)
return expanded_expr
else:
if has_nested_add(expr):
return expand(expand(expr.left) * expand(expr.right))
else:
return expr
elif isinstance(expr, Dot):
if isinstance(expr.left, VAdd) and isinstance(expr.right, VAdd):
"""(x+y).(u+v) = x.u + x.v + y.u + y.v"""
expanded_expr = Add()
for nl in expr.left.nodes:
for nr in expr.right.nodes:
expanded_expr += expand(Dot(nl, nr))
return expanded_expr
elif isinstance(expr.right, VAdd):
"""x.(u+v) = x.u + x.v"""
expanded_expr = Add()
for n in expr.right.nodes:
expanded_expr += expand(Dot(expr.left, n))
return expanded_expr
elif isinstance(expr.left, VAdd):
"""(x+y).u = x.u + y.u"""
expanded_expr = Add()
for n in expr.left.nodes:
expanded_expr += expand(Dot(n, expr.right))
return expanded_expr
else:
if has_nested_add(expr):
return expand(Dot(expand(expr.left), expand(expr.right)))
else:
return Dot(expand(expr.left), expand(expr.right))
elif isinstance(expr, VVMul):
raise NotImplementedError
return expr
def expand_vector(expr):
if isinstance(expr, VAdd):
expanded_expr = VAdd()
for n in expr.nodes:
expanded_expr += expand(n)
return expanded_expr
elif isinstance(expr, MVMul):
if isinstance(expr.left, MAdd):
"""(A+B)x = Ax+Bx"""
expanded_expr = VAdd()
for n in expr.left.nodes:
expanded_expr += expand(MVMul(n, expr.right))
return expanded_expr
elif isinstance(expr.right, VAdd):
"""A(x+y) = Ax + Ay"""
expanded_expr = VAdd()
for n in expr.right.nodes:
expanded_expr += expand(MVMul(expr.left, n))
return expanded_expr
else:
if has_nested_add(expr):
return expand(MVMul(expand(expr.left), expand(expr.right)))
else:
return expr
elif isinstance(expr, SVMul):
if isinstance(expr.left, VAdd):
"""(x+y)a=xa+ya"""
expanded_expr = VAdd()
for n in expr.left.nodes:
expanded_expr += expand(SVMul(n, expr.right))
return expanded_expr
else:
if has_nested_add(expr):
return expand(SVMul(expand(expr.left), expand(expr.right)))
else:
return expr
pass
elif isinstance(expr, Cross):
if isinstance(expr.left, VAdd) and isinstance(expr.right, VAdd):
expanded_expr = VAdd()
for nl in expr.left.nodes:
for nr in expr.right.nodes:
expanded_expr += expand(Cross(nl, nr))
return expanded_expr
elif isinstance(expr.left, VAdd):
expanded_expr = VAdd()
for n in expr.left.nodes:
expanded_expr += expand(Cross(n, expr.right))
return expanded_expr
elif isinstance(expr.right, VAdd):
"""x.(u+v) = x.u + x.v"""
expanded_expr = VAdd()
for n in expr.right.nodes:
expanded_expr += expand(Cross(expr.left, n))
return expanded_expr
else:
if has_nested_add(expr):
return expand(Cross(expand(expr.left), expand(expr.right)))
else:
return expr
elif isinstance(expr, Vee):
return Vee(expand(expr))
return expr
def expand_matrix(expr):
if isinstance(expr, MAdd):
expanded_expr = MAdd()
for n in expr.nodes:
expanded_expr += expand(n)
return expanded_expr
elif isinstance(expr, MMMul):
if isinstance(expr.left, MAdd) and isinstance(expr.right, MAdd):
expanded_expr = MAdd()
for nl in expr.left.nodes:
for nr in expr.right.nodes:
expanded_expr += expand(nl * nr)
return expanded_expr
elif isinstance(expr.left, MAdd):
expanded_expr = MAdd()
for nl in expr.left.nodes:
expanded_expr += expand(nl * expr.right)
return expanded_expr
elif isinstance(expr.right, MAdd):
expanded_expr = MAdd()
for nr in expr.right.nodes:
expanded_expr += expand(expr.left * nr)
return expanded_expr
else:
if has_nested_add(expr):
return expand(MMMul(expand(expr.left), expand(expr.right)))
else:
return expr
elif isinstance(expr, SMMul):
raise Exception('SSMul in expand_matrix is not implemented')
elif isinstance(expr, VVMul):
raise Exception('VVMul in expand_matrix is not implemented')
elif isinstance(expr, Hat):
return Hat(expand(expr.expr))
return expr
def expand(expr):
# TODO add expand functionality to the Expr class directly
if isinstance(expr, ScalarExpr):
return expand_scalar(expr)
elif isinstance(expr, VectorExpr):
return expand_vector(expr)
elif isinstance(expr, MatrixExpr):
return expand_matrix(expr)
else:
raise UndefinedCaseError
|
py | 1a31fa9d0fd3f105a74caea0918f9561b94d2300 | import matplotlib.pyplot as plt
import pprint
from string import ascii_lowercase as letters
def read_and_count_letters(to_read):
# Open the file that is passed in as an argument to the function
with open(to_read, encoding='utf-8') as f:
text = f.read().lower()
text_count = dict((l, text.count(l)) for l in letters)
# Sorting the returned data to be processed further
text_sort = sorted(text_count.items(), key=lambda x: x[1], reverse=True)
# Printing the returned data to the command line
pprint.pprint(text_sort)
# Initialising a new matplotlib bar graph illustrating the occurrences of letters detected in the text file
plt.bar(*zip(*text_count.items()))
plt.show()
# Enter the .txt document you wish to process between the two apostrophes below
to_read = ''
read_and_count_letters(to_read)
|
py | 1a31fab74897924e40da1c4549ca112c476bd25b | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import copy
import unicodedata
from typing import (
Any,
ClassVar,
Dict,
List,
NamedTuple,
Sequence,
Set,
Literal,
Optional,
TYPE_CHECKING,
Tuple,
Union,
overload,
)
from . import utils, abc
from .role import Role
from .member import Member, VoiceState
from .emoji import Emoji
from .errors import InvalidData
from .permissions import PermissionOverwrite
from .colour import Colour
from .errors import InvalidArgument, ClientException
from .channel import *
from .channel import _guild_channel_factory
from .channel import _threaded_guild_channel_factory
from .enums import (
AuditLogAction,
VideoQualityMode,
VoiceRegion,
ChannelType,
try_enum,
VerificationLevel,
ContentFilter,
NotificationLevel,
NSFWLevel,
)
from .mixins import Hashable
from .user import User
from .invite import Invite
from .iterators import AuditLogIterator, MemberIterator
from .widget import Widget
from .asset import Asset
from .flags import SystemChannelFlags
from .integrations import Integration, _integration_factory
from .stage_instance import StageInstance
from .threads import Thread, ThreadMember
from .sticker import GuildSticker
from .file import File
from .welcome_screen import WelcomeScreen, WelcomeScreenChannel
__all__ = (
'Guild',
)
MISSING = utils.MISSING
if TYPE_CHECKING:
from .abc import Snowflake, SnowflakeTime
from .types.guild import Ban as BanPayload, Guild as GuildPayload, MFALevel, GuildFeature
from .types.threads import (
Thread as ThreadPayload,
)
from .types.voice import GuildVoiceState
from .permissions import Permissions
from .channel import VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel
from .template import Template
from .webhook import Webhook
from .state import ConnectionState
from .voice_client import VoiceProtocol
import datetime
VocalGuildChannel = Union[VoiceChannel, StageChannel]
GuildChannel = Union[VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel]
ByCategoryItem = Tuple[Optional[CategoryChannel], List[GuildChannel]]
class BanEntry(NamedTuple):
reason: Optional[str]
user: User
class _GuildLimit(NamedTuple):
emoji: int
stickers: int
bitrate: float
filesize: int
class Guild(Hashable):
"""Represents a Discord guild.
This is referred to as a "server" in the official Discord UI.
.. container:: operations
.. describe:: x == y
Checks if two guilds are equal.
.. describe:: x != y
Checks if two guilds are not equal.
.. describe:: hash(x)
Returns the guild's hash.
.. describe:: str(x)
Returns the guild's name.
Attributes
----------
name: :class:`str`
The guild name.
emojis: Tuple[:class:`Emoji`, ...]
All emojis that the guild owns.
stickers: Tuple[:class:`GuildSticker`, ...]
All stickers that the guild owns.
.. versionadded:: 2.0
region: :class:`VoiceRegion`
The region the guild belongs on. There is a chance that the region
will be a :class:`str` if the value is not recognised by the enumerator.
afk_timeout: :class:`int`
The timeout to get sent to the AFK channel.
afk_channel: Optional[:class:`VoiceChannel`]
The channel that denotes the AFK channel. ``None`` if it doesn't exist.
id: :class:`int`
The guild's ID.
owner_id: :class:`int`
The guild owner's ID. Use :attr:`Guild.owner` instead.
unavailable: :class:`bool`
Indicates if the guild is unavailable. If this is ``True`` then the
reliability of other attributes outside of :attr:`Guild.id` is slim and they might
all be ``None``. It is best to not do anything with the guild if it is unavailable.
Check the :func:`on_guild_unavailable` and :func:`on_guild_available` events.
max_presences: Optional[:class:`int`]
The maximum amount of presences for the guild.
max_members: Optional[:class:`int`]
The maximum amount of members for the guild.
.. note::
This attribute is only available via :meth:`.Client.fetch_guild`.
max_video_channel_users: Optional[:class:`int`]
The maximum amount of users in a video channel.
.. versionadded:: 1.4
description: Optional[:class:`str`]
The guild's description.
mfa_level: :class:`int`
Indicates the guild's two factor authorisation level. If this value is 0 then
the guild does not require 2FA for their administrative members. If the value is
1 then they do.
verification_level: :class:`VerificationLevel`
The guild's verification level.
explicit_content_filter: :class:`ContentFilter`
The guild's explicit content filter.
default_notifications: :class:`NotificationLevel`
The guild's notification settings.
features: List[:class:`str`]
A list of features that the guild has. The features that a guild can have are
subject to arbitrary change by Discord.
They are currently as follows:
- ``ANIMATED_BANNER``: Guild can upload an animated banner.
- ``ANIMATED_ICON``: Guild can upload an animated icon.
- ``BANNER``: Guild can upload and use a banner. (i.e. :attr:`.banner`)
- ``CHANNEL_BANNER``: Guild can upload and use a channel banners.
- ``COMMERCE``: Guild can sell things using store channels.
- ``COMMUNITY``: Guild is a community server.
- ``DISCOVERABLE``: Guild shows up in Server Discovery.
- ``FEATURABLE``: Guild is able to be featured in Server Discovery.
- ``HAS_DIRECTORY_ENTRY``: Unknown.
- ``HUB``: Hubs contain a directory channel that let you find school-related, student-run servers for your school or university.
- ``INTERNAL_EMPLOYEE_ONLY``: Indicates that only users with the staff badge can join the guild.
- ``INVITE_SPLASH``: Guild's invite page can have a special splash.
- ``LINKED_TO_HUB``: 'Guild is linked to a hub.
- ``MEMBER_PROFILES``: Unknown.
- ``MEMBER_VERIFICATION_GATE_ENABLED``: Guild has Membership Screening enabled.
- ``MONETIZATION_ENABLED``: Guild has enabled monetization.
- ``MORE_EMOJI``: Guild has increased custom emoji slots.
- ``MORE_STICKERS``: Guild has increased custom sticker slots.
- ``NEWS``: Guild can create news channels.
- ``NEW_THREAD_PERMISSIONS``: Guild has new thread permissions.
- ``PARTNERED``: Guild is a partnered server.
- ``PREMIUM_TIER_3_OVERRIDE``: Forces the server to server boosting level 3 (specifically created by Discord Staff Member "Jethro" for their personal server).
- ``PREVIEW_ENABLED``: Guild can be viewed before being accepted via Membership Screening.
- ``PRIVATE_THREADS``: Guild has access to create private threads.
- ``ROLE_ICONS``: Guild can set an image or emoji as a role icon.
- ``ROLE_SUBSCRIPTIONS_AVAILABLE_FOR_PURCHASE``: Role subscriptions are available for purchasing.
- ``ROLE_SUBSCRIPTIONS_ENABLED``: Guild is able to view and manage role subscriptions.
- ``SEVEN_DAY_THREAD_ARCHIVE``: Guild has access to the seven day archive time for threads.
- ``TEXT_IN_VOICE_ENABLED``: Guild has a chat button inside voice channels that opens a dedicated text channel in a sidebar similar to thread view.
- ``THREAD_DEFAULT_AUTO_ARCHIVE_DURATION``: Unknown, presumably used for testing changes to the thread default auto archive duration..
- ``THREADS_ENABLED_TESTING``: Used by bot developers to test their bots with threads in guilds with 5 or less members and a bot. Also gives the premium thread features.
- ``THREE_DAY_THREAD_ARCHIVE``: Guild has access to the three day archive time for threads.
- ``TICKETED_EVENTS_ENABLED``: Guild has enabled ticketed events.
- ``VANITY_URL``: Guild can have a vanity invite URL (e.g. discord.gg/discord-api).
- ``VERIFIED``: Guild is a verified server.
- ``VIP_REGIONS``: Guild has VIP voice regions.
- ``WELCOME_SCREEN_ENABLED``: Guild has enabled the welcome screen.
premium_tier: :class:`int`
The premium tier for this guild. Corresponds to "Nitro Server" in the official UI.
The number goes from 0 to 3 inclusive.
premium_subscription_count: :class:`int`
The number of "boosts" this guild currently has.
premium_progress_bar_enabled: :class:`bool`
Indicates if the guild has premium progress bar enabled.
.. versionadded:: 2.0
preferred_locale: Optional[:class:`str`]
The preferred locale for the guild. Used when filtering Server Discovery
results to a specific language.
nsfw_level: :class:`NSFWLevel`
The guild's NSFW level.
.. versionadded:: 2.0
approximate_member_count: Optional[:class:`int`]
The approximate number of members in the guild. This is ``None`` unless the guild is obtained
using :meth:`Client.fetch_guild` with ``with_counts=True``.
.. versionadded:: 2.0
approximate_presence_count: Optional[:class:`int`]
The approximate number of members currently active in the guild.
This includes idle, dnd, online, and invisible members. Offline members are excluded.
This is ``None`` unless the guild is obtained using :meth:`Client.fetch_guild`
with ``with_counts=True``.
.. versionadded:: 2.0
"""
__slots__ = (
'afk_timeout',
'afk_channel',
'name',
'id',
'unavailable',
'region',
'owner_id',
'mfa_level',
'emojis',
'stickers',
'features',
'verification_level',
'explicit_content_filter',
'default_notifications',
'description',
'max_presences',
'max_members',
'max_video_channel_users',
'premium_tier',
'premium_subscription_count',
'premium_progress_bar_enabled',
'preferred_locale',
'nsfw_level',
'_members',
'_channels',
'_icon',
'_banner',
'_state',
'_roles',
'_member_count',
'_large',
'_splash',
'_voice_states',
'_system_channel_id',
'_system_channel_flags',
'_discovery_splash',
'_rules_channel_id',
'_public_updates_channel_id',
'_stage_instances',
'_threads',
"approximate_member_count",
"approximate_presence_count",
)
_PREMIUM_GUILD_LIMITS: ClassVar[Dict[Optional[int], _GuildLimit]] = {
None: _GuildLimit(emoji=50, stickers=0, bitrate=96e3, filesize=8388608),
0: _GuildLimit(emoji=50, stickers=0, bitrate=96e3, filesize=8388608),
1: _GuildLimit(emoji=100, stickers=15, bitrate=128e3, filesize=8388608),
2: _GuildLimit(emoji=150, stickers=30, bitrate=256e3, filesize=52428800),
3: _GuildLimit(emoji=250, stickers=60, bitrate=384e3, filesize=104857600),
}
def __init__(self, *, data: GuildPayload, state: ConnectionState):
self._channels: Dict[int, GuildChannel] = {}
self._members: Dict[int, Member] = {}
self._voice_states: Dict[int, VoiceState] = {}
self._threads: Dict[int, Thread] = {}
self._state: ConnectionState = state
self._from_data(data)
def _add_channel(self, channel: GuildChannel, /) -> None:
self._channels[channel.id] = channel
def _remove_channel(self, channel: Snowflake, /) -> None:
self._channels.pop(channel.id, None)
def _voice_state_for(self, user_id: int, /) -> Optional[VoiceState]:
return self._voice_states.get(user_id)
def _add_member(self, member: Member, /) -> None:
self._members[member.id] = member
def _store_thread(self, payload: ThreadPayload, /) -> Thread:
thread = Thread(guild=self, state=self._state, data=payload)
self._threads[thread.id] = thread
return thread
def _remove_member(self, member: Snowflake, /) -> None:
self._members.pop(member.id, None)
def _add_thread(self, thread: Thread, /) -> None:
self._threads[thread.id] = thread
def _remove_thread(self, thread: Snowflake, /) -> None:
self._threads.pop(thread.id, None)
def _clear_threads(self) -> None:
self._threads.clear()
def _remove_threads_by_channel(self, channel_id: int) -> None:
to_remove = [k for k, t in self._threads.items() if t.parent_id == channel_id]
for k in to_remove:
del self._threads[k]
def _filter_threads(self, channel_ids: Set[int]) -> Dict[int, Thread]:
to_remove: Dict[int, Thread] = {k: t for k, t in self._threads.items() if t.parent_id in channel_ids}
for k in to_remove:
del self._threads[k]
return to_remove
def __str__(self) -> str:
return self.name or ''
def __repr__(self) -> str:
attrs = (
('id', self.id),
('name', self.name),
('shard_id', self.shard_id),
('chunked', self.chunked),
('member_count', getattr(self, '_member_count', None)),
)
inner = ' '.join('%s=%r' % t for t in attrs)
return f'<Guild {inner}>'
def _update_voice_state(self, data: GuildVoiceState, channel_id: int) -> Tuple[Optional[Member], VoiceState, VoiceState]:
user_id = int(data['user_id'])
channel = self.get_channel(channel_id)
try:
# check if we should remove the voice state from cache
if channel is None:
after = self._voice_states.pop(user_id)
else:
after = self._voice_states[user_id]
before = copy.copy(after)
after._update(data, channel)
except KeyError:
# if we're here then we're getting added into the cache
after = VoiceState(data=data, channel=channel)
before = VoiceState(data=data, channel=None)
self._voice_states[user_id] = after
member = self.get_member(user_id)
if member is None:
try:
member = Member(data=data['member'], state=self._state, guild=self)
except KeyError:
member = None
return member, before, after
def _add_role(self, role: Role, /) -> None:
# roles get added to the bottom (position 1, pos 0 is @everyone)
# so since self.roles has the @everyone role, we can't increment
# its position because it's stuck at position 0. Luckily x += False
# is equivalent to adding 0. So we cast the position to a bool and
# increment it.
for r in self._roles.values():
r.position += not r.is_default()
self._roles[role.id] = role
def _remove_role(self, role_id: int, /) -> Role:
# this raises KeyError if it fails..
role = self._roles.pop(role_id)
# since it didn't, we can change the positions now
# basically the same as above except we only decrement
# the position if we're above the role we deleted.
for r in self._roles.values():
r.position -= r.position > role.position
return role
def _from_data(self, guild: GuildPayload) -> None:
# according to Stan, this is always available even if the guild is unavailable
# I don't have this guarantee when someone updates the guild.
member_count = guild.get('member_count', None)
if member_count is not None:
self._member_count: int = member_count
self.name: str = guild.get('name')
self.region: VoiceRegion = try_enum(VoiceRegion, guild.get('region'))
self.verification_level: VerificationLevel = try_enum(VerificationLevel, guild.get('verification_level'))
self.default_notifications: NotificationLevel = try_enum(
NotificationLevel, guild.get('default_message_notifications')
)
self.explicit_content_filter: ContentFilter = try_enum(ContentFilter, guild.get('explicit_content_filter', 0))
self.afk_timeout: int = guild.get('afk_timeout')
self._icon: Optional[str] = guild.get('icon')
self._banner: Optional[str] = guild.get('banner')
self.unavailable: bool = guild.get('unavailable', False)
self.id: int = int(guild['id'])
self._roles: Dict[int, Role] = {}
state = self._state # speed up attribute access
for r in guild.get('roles', []):
role = Role(guild=self, data=r, state=state)
self._roles[role.id] = role
self.mfa_level: MFALevel = guild.get('mfa_level')
self.emojis: Tuple[Emoji, ...] = tuple(map(lambda d: state.store_emoji(self, d), guild.get('emojis', [])))
self.stickers: Tuple[GuildSticker, ...] = tuple(
map(lambda d: state.store_sticker(self, d), guild.get('stickers', []))
)
self.features: List[GuildFeature] = guild.get('features', [])
self._splash: Optional[str] = guild.get('splash')
self._system_channel_id: Optional[int] = utils._get_as_snowflake(guild, 'system_channel_id')
self.description: Optional[str] = guild.get('description')
self.max_presences: Optional[int] = guild.get('max_presences')
self.max_members: Optional[int] = guild.get('max_members')
self.max_video_channel_users: Optional[int] = guild.get('max_video_channel_users')
self.premium_tier: int = guild.get('premium_tier', 0)
self.premium_subscription_count: int = guild.get('premium_subscription_count') or 0
self.premium_progress_bar_enabled: bool = guild.get('premium_progress_bar_enabled') or False
self._system_channel_flags: int = guild.get('system_channel_flags', 0)
self.preferred_locale: Optional[str] = guild.get('preferred_locale')
self._discovery_splash: Optional[str] = guild.get('discovery_splash')
self._rules_channel_id: Optional[int] = utils._get_as_snowflake(guild, 'rules_channel_id')
self._public_updates_channel_id: Optional[int] = utils._get_as_snowflake(guild, 'public_updates_channel_id')
self.nsfw_level: NSFWLevel = try_enum(NSFWLevel, guild.get('nsfw_level', 0))
self.approximate_presence_count = guild.get('approximate_presence_count')
self.approximate_member_count = guild.get('approximate_member_count')
self._stage_instances: Dict[int, StageInstance] = {}
for s in guild.get('stage_instances', []):
stage_instance = StageInstance(guild=self, data=s, state=state)
self._stage_instances[stage_instance.id] = stage_instance
cache_joined = self._state.member_cache_flags.joined
self_id = self._state.self_id
for mdata in guild.get('members', []):
member = Member(data=mdata, guild=self, state=state)
if cache_joined or member.id == self_id:
self._add_member(member)
self._sync(guild)
self._large: Optional[bool] = None if member_count is None else self._member_count >= 250
self.owner_id: Optional[int] = utils._get_as_snowflake(guild, 'owner_id')
self.afk_channel: Optional[VocalGuildChannel] = self.get_channel(utils._get_as_snowflake(guild, 'afk_channel_id')) # type: ignore
for obj in guild.get('voice_states', []):
self._update_voice_state(obj, int(obj['channel_id']))
# TODO: refactor/remove?
def _sync(self, data: GuildPayload) -> None:
try:
self._large = data['large']
except KeyError:
pass
empty_tuple = tuple()
for presence in data.get('presences', []):
user_id = int(presence['user']['id'])
member = self.get_member(user_id)
if member is not None:
member._presence_update(presence, empty_tuple) # type: ignore
if 'channels' in data:
channels = data['channels']
for c in channels:
factory, ch_type = _guild_channel_factory(c['type'])
if factory:
self._add_channel(factory(guild=self, data=c, state=self._state)) # type: ignore
if 'threads' in data:
threads = data['threads']
for thread in threads:
self._add_thread(Thread(guild=self, state=self._state, data=thread))
@property
def channels(self) -> List[GuildChannel]:
"""List[:class:`abc.GuildChannel`]: A list of channels that belongs to this guild."""
return list(self._channels.values())
@property
def threads(self) -> List[Thread]:
"""List[:class:`Thread`]: A list of threads that you have permission to view.
.. versionadded:: 2.0
"""
return list(self._threads.values())
@property
def large(self) -> bool:
""":class:`bool`: Indicates if the guild is a 'large' guild.
A large guild is defined as having more than ``large_threshold`` count
members, which for this library is set to the maximum of 250.
"""
if self._large is None:
try:
return self._member_count >= 250
except AttributeError:
return len(self._members) >= 250
return self._large
@property
def voice_channels(self) -> List[VoiceChannel]:
"""List[:class:`VoiceChannel`]: A list of voice channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, VoiceChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def stage_channels(self) -> List[StageChannel]:
"""List[:class:`StageChannel`]: A list of stage channels that belongs to this guild.
.. versionadded:: 1.7
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, StageChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def me(self) -> Member:
""":class:`Member`: Similar to :attr:`Client.user` except an instance of :class:`Member`.
This is essentially used to get the member version of yourself.
"""
self_id = self._state.user.id
# The self member is *always* cached
return self.get_member(self_id) # type: ignore
@property
def voice_client(self) -> Optional[VoiceProtocol]:
"""Optional[:class:`VoiceProtocol`]: Returns the :class:`VoiceProtocol` associated with this guild, if any."""
return self._state._get_voice_client(self.id)
@property
def text_channels(self) -> List[TextChannel]:
"""List[:class:`TextChannel`]: A list of text channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, TextChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def categories(self) -> List[CategoryChannel]:
"""List[:class:`CategoryChannel`]: A list of categories that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, CategoryChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
def by_category(self) -> List[ByCategoryItem]:
"""Returns every :class:`CategoryChannel` and their associated channels.
These channels and categories are sorted in the official Discord UI order.
If the channels do not have a category, then the first element of the tuple is
``None``.
Returns
--------
List[Tuple[Optional[:class:`CategoryChannel`], List[:class:`abc.GuildChannel`]]]:
The categories and their associated channels.
"""
grouped: Dict[Optional[int], List[GuildChannel]] = {}
for channel in self._channels.values():
if isinstance(channel, CategoryChannel):
grouped.setdefault(channel.id, [])
continue
try:
grouped[channel.category_id].append(channel)
except KeyError:
grouped[channel.category_id] = [channel]
def key(t: ByCategoryItem) -> Tuple[Tuple[int, int], List[GuildChannel]]:
k, v = t
return ((k.position, k.id) if k else (-1, -1), v)
_get = self._channels.get
as_list: List[ByCategoryItem] = [(_get(k), v) for k, v in grouped.items()] # type: ignore
as_list.sort(key=key)
for _, channels in as_list:
channels.sort(key=lambda c: (c._sorting_bucket, c.position, c.id))
return as_list
def _resolve_channel(self, id: Optional[int], /) -> Optional[Union[GuildChannel, Thread]]:
if id is None:
return
return self._channels.get(id) or self._threads.get(id)
def get_channel_or_thread(self, channel_id: int, /) -> Optional[Union[Thread, GuildChannel]]:
"""Returns a channel or thread with the given ID.
.. versionadded:: 2.0
Parameters
-----------
channel_id: :class:`int`
The ID to search for.
Returns
--------
Optional[Union[:class:`Thread`, :class:`.abc.GuildChannel`]]
The returned channel or thread or ``None`` if not found.
"""
return self._channels.get(channel_id) or self._threads.get(channel_id)
def get_channel(self, channel_id: int, /) -> Optional[GuildChannel]:
"""Returns a channel with the given ID.
.. note::
This does *not* search for threads.
Parameters
-----------
channel_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.abc.GuildChannel`]
The returned channel or ``None`` if not found.
"""
return self._channels.get(channel_id)
def get_thread(self, thread_id: int, /) -> Optional[Thread]:
"""Returns a thread with the given ID.
.. versionadded:: 2.0
Parameters
-----------
thread_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`Thread`]
The returned thread or ``None`` if not found.
"""
return self._threads.get(thread_id)
@property
def system_channel(self) -> Optional[TextChannel]:
"""Optional[:class:`TextChannel`]: Returns the guild's channel used for system messages.
If no channel is set, then this returns ``None``.
"""
channel_id = self._system_channel_id
return channel_id and self._channels.get(channel_id) # type: ignore
@property
def system_channel_flags(self) -> SystemChannelFlags:
""":class:`SystemChannelFlags`: Returns the guild's system channel settings."""
return SystemChannelFlags._from_value(self._system_channel_flags)
@property
def rules_channel(self) -> Optional[TextChannel]:
"""Optional[:class:`TextChannel`]: Return's the guild's channel used for the rules.
The guild must be a Community guild.
If no channel is set, then this returns ``None``.
.. versionadded:: 1.3
"""
channel_id = self._rules_channel_id
return channel_id and self._channels.get(channel_id) # type: ignore
@property
def public_updates_channel(self) -> Optional[TextChannel]:
"""Optional[:class:`TextChannel`]: Return's the guild's channel where admins and
moderators of the guilds receive notices from Discord. The guild must be a
Community guild.
If no channel is set, then this returns ``None``.
.. versionadded:: 1.4
"""
channel_id = self._public_updates_channel_id
return channel_id and self._channels.get(channel_id) # type: ignore
@property
def emoji_limit(self) -> int:
""":class:`int`: The maximum number of emoji slots this guild has."""
more_emoji = 200 if 'MORE_EMOJI' in self.features else 50
return max(more_emoji, self._PREMIUM_GUILD_LIMITS[self.premium_tier].emoji)
@property
def sticker_limit(self) -> int:
""":class:`int`: The maximum number of sticker slots this guild has.
.. versionadded:: 2.0
"""
more_stickers = 60 if 'MORE_STICKERS' in self.features else 0
return max(more_stickers, self._PREMIUM_GUILD_LIMITS[self.premium_tier].stickers)
@property
def bitrate_limit(self) -> float:
""":class:`float`: The maximum bitrate for voice channels this guild can have."""
vip_guild = self._PREMIUM_GUILD_LIMITS[1].bitrate if 'VIP_REGIONS' in self.features else 96e3
return max(vip_guild, self._PREMIUM_GUILD_LIMITS[self.premium_tier].bitrate)
@property
def filesize_limit(self) -> int:
""":class:`int`: The maximum number of bytes files can have when uploaded to this guild."""
return self._PREMIUM_GUILD_LIMITS[self.premium_tier].filesize
@property
def members(self) -> List[Member]:
"""List[:class:`Member`]: A list of members that belong to this guild."""
return list(self._members.values())
def get_member(self, user_id: int, /) -> Optional[Member]:
"""Returns a member with the given ID.
Parameters
-----------
user_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`Member`]
The member or ``None`` if not found.
"""
return self._members.get(user_id)
@property
def premium_subscribers(self) -> List[Member]:
"""List[:class:`Member`]: A list of members who have "boosted" this guild."""
return [member for member in self.members if member.premium_since is not None]
@property
def roles(self) -> List[Role]:
"""List[:class:`Role`]: Returns a :class:`list` of the guild's roles in hierarchy order.
The first element of this list will be the lowest role in the
hierarchy.
"""
return sorted(self._roles.values())
def get_role(self, role_id: int, /) -> Optional[Role]:
"""Returns a role with the given ID.
Parameters
-----------
role_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`Role`]
The role or ``None`` if not found.
"""
return self._roles.get(role_id)
@property
def default_role(self) -> Role:
""":class:`Role`: Gets the @everyone role that all members have by default."""
# The @everyone role is *always* given
return self.get_role(self.id) # type: ignore
@property
def premium_subscriber_role(self) -> Optional[Role]:
"""Optional[:class:`Role`]: Gets the premium subscriber role, AKA "boost" role, in this guild.
.. versionadded:: 1.6
"""
for role in self._roles.values():
if role.is_premium_subscriber():
return role
return None
@property
def self_role(self) -> Optional[Role]:
"""Optional[:class:`Role`]: Gets the role associated with this client's user, if any.
.. versionadded:: 1.6
"""
self_id = self._state.self_id
for role in self._roles.values():
tags = role.tags
if tags and tags.bot_id == self_id:
return role
return None
@property
def stage_instances(self) -> List[StageInstance]:
"""List[:class:`StageInstance`]: Returns a :class:`list` of the guild's stage instances that
are currently running.
.. versionadded:: 2.0
"""
return list(self._stage_instances.values())
def get_stage_instance(self, stage_instance_id: int, /) -> Optional[StageInstance]:
"""Returns a stage instance with the given ID.
.. versionadded:: 2.0
Parameters
-----------
stage_instance_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`StageInstance`]
The stage instance or ``None`` if not found.
"""
return self._stage_instances.get(stage_instance_id)
@property
def owner(self) -> Optional[Member]:
"""Optional[:class:`Member`]: The member that owns the guild."""
return self.get_member(self.owner_id) # type: ignore
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's icon asset, if available."""
if self._icon is None:
return None
return Asset._from_guild_icon(self._state, self.id, self._icon)
@property
def banner(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's banner asset, if available."""
if self._banner is None:
return None
return Asset._from_guild_image(self._state, self.id, self._banner, path='banners')
@property
def splash(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's invite splash asset, if available."""
if self._splash is None:
return None
return Asset._from_guild_image(self._state, self.id, self._splash, path='splashes')
@property
def discovery_splash(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's discovery splash asset, if available."""
if self._discovery_splash is None:
return None
return Asset._from_guild_image(self._state, self.id, self._discovery_splash, path='discovery-splashes')
@property
def member_count(self) -> int:
""":class:`int`: Returns the true member count regardless of it being loaded fully or not.
.. warning::
Due to a Discord limitation, in order for this attribute to remain up-to-date and
accurate, it requires :attr:`Intents.members` to be specified.
"""
return self._member_count
@property
def chunked(self) -> bool:
""":class:`bool`: Returns a boolean indicating if the guild is "chunked".
A chunked guild means that :attr:`member_count` is equal to the
number of members stored in the internal :attr:`members` cache.
If this value returns ``False``, then you should request for
offline members.
"""
count = getattr(self, '_member_count', None)
if count is None:
return False
return count == len(self._members)
@property
def shard_id(self) -> int:
""":class:`int`: Returns the shard ID for this guild if applicable."""
count = self._state.shard_count
if count is None:
return 0
return (self.id >> 22) % count
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the guild's creation time in UTC."""
return utils.snowflake_time(self.id)
def get_member_named(self, name: str, /) -> Optional[Member]:
"""Returns the first member found that matches the name provided.
The name can have an optional discriminator argument, e.g. "Jake#0001"
or "Jake" will both do the lookup. However the former will give a more
precise result. Note that the discriminator must have all 4 digits
for this to work.
If a nickname is passed, then it is looked up via the nickname. Note
however, that a nickname + discriminator combo will not lookup the nickname
but rather the username + discriminator combo due to nickname + discriminator
not being unique.
If no member is found, ``None`` is returned.
Parameters
-----------
name: :class:`str`
The name of the member to lookup with an optional discriminator.
Returns
--------
Optional[:class:`Member`]
The member in this guild with the associated name. If not found
then ``None`` is returned.
"""
result = None
members = self.members
if len(name) > 5 and name[-5] == '#':
# The 5 length is checking to see if #0000 is in the string,
# as a#0000 has a length of 6, the minimum for a potential
# discriminator lookup.
potential_discriminator = name[-4:]
# do the actual lookup and return if found
# if it isn't found then we'll do a full name lookup below.
result = utils.get(members, name=name[:-5], discriminator=potential_discriminator)
if result is not None:
return result
def pred(m: Member) -> bool:
return m.nick == name or m.name == name
return utils.find(pred, members)
def _create_channel(
self,
name: str,
channel_type: ChannelType,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
category: Optional[Snowflake] = None,
**options: Any,
):
if overwrites is MISSING:
overwrites = {}
elif not isinstance(overwrites, dict):
raise InvalidArgument('overwrites parameter expects a dict.')
perms = []
for target, perm in overwrites.items():
if not isinstance(perm, PermissionOverwrite):
raise InvalidArgument(f'Expected PermissionOverwrite received {perm.__class__.__name__}')
allow, deny = perm.pair()
payload = {'allow': allow.value, 'deny': deny.value, 'id': target.id}
if isinstance(target, Role):
payload['type'] = abc._Overwrites.ROLE
else:
payload['type'] = abc._Overwrites.MEMBER
perms.append(payload)
parent_id = category.id if category else None
return self._state.http.create_channel(
self.id, channel_type.value, name=name, parent_id=parent_id, permission_overwrites=perms, **options
)
async def create_text_channel(
self,
name: str,
*,
reason: Optional[str] = None,
category: Optional[CategoryChannel] = None,
position: int = MISSING,
topic: str = MISSING,
slowmode_delay: int = MISSING,
nsfw: bool = MISSING,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
) -> TextChannel:
"""|coro|
Creates a :class:`TextChannel` for the guild.
Note that you need the :attr:`~Permissions.manage_channels` permission
to create the channel.
The ``overwrites`` parameter can be used to create a 'secret'
channel upon creation. This parameter expects a :class:`dict` of
overwrites with the target (either a :class:`Member` or a :class:`Role`)
as the key and a :class:`PermissionOverwrite` as the value.
.. note::
Creating a channel of a specified position will not update the position of
other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit`
will be required to update the position of the channel in the channel list.
Examples
----------
Creating a basic channel:
.. code-block:: python3
channel = await guild.create_text_channel('cool-channel')
Creating a "secret" channel:
.. code-block:: python3
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
channel = await guild.create_text_channel('secret', overwrites=overwrites)
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites: Dict[Union[:class:`Role`, :class:`Member`], :class:`PermissionOverwrite`]
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
topic: :class:`str`
The new channel's topic.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel, in seconds.
The maximum value possible is `21600`.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`TextChannel`
The channel that was just created.
"""
options = {}
if position is not MISSING:
options['position'] = position
if topic is not MISSING:
options['topic'] = topic
if slowmode_delay is not MISSING:
options['rate_limit_per_user'] = slowmode_delay
if nsfw is not MISSING:
options['nsfw'] = nsfw
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.text, category=category, reason=reason, **options
)
channel = TextChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_voice_channel(
self,
name: str,
*,
reason: Optional[str] = None,
category: Optional[CategoryChannel] = None,
position: int = MISSING,
bitrate: int = MISSING,
user_limit: int = MISSING,
rtc_region: Optional[VoiceRegion] = MISSING,
video_quality_mode: VideoQualityMode = MISSING,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
) -> VoiceChannel:
"""|coro|
This is similar to :meth:`create_text_channel` except makes a :class:`VoiceChannel` instead.
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites: Dict[Union[:class:`Role`, :class:`Member`], :class:`PermissionOverwrite`]
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
bitrate: :class:`int`
The channel's preferred audio bitrate in bits per second.
user_limit: :class:`int`
The channel's limit for number of members that can be in a voice channel.
rtc_region: Optional[:class:`VoiceRegion`]
The region for the voice channel's voice communication.
A value of ``None`` indicates automatic voice region detection.
.. versionadded:: 1.7
video_quality_mode: :class:`VideoQualityMode`
The camera video quality for the voice channel's participants.
.. versionadded:: 2.0
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`VoiceChannel`
The channel that was just created.
"""
options = {}
if position is not MISSING:
options['position'] = position
if bitrate is not MISSING:
options['bitrate'] = bitrate
if user_limit is not MISSING:
options['user_limit'] = user_limit
if rtc_region is not MISSING:
options['rtc_region'] = None if rtc_region is None else str(rtc_region)
if video_quality_mode is not MISSING:
options['video_quality_mode'] = video_quality_mode.value
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.voice, category=category, reason=reason, **options
)
channel = VoiceChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_stage_channel(
self,
name: str,
*,
topic: str,
position: int = MISSING,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
category: Optional[CategoryChannel] = None,
reason: Optional[str] = None,
) -> StageChannel:
"""|coro|
This is similar to :meth:`create_text_channel` except makes a :class:`StageChannel` instead.
.. versionadded:: 1.7
Parameters
-----------
name: :class:`str`
The channel's name.
topic: :class:`str`
The new channel's topic.
overwrites: Dict[Union[:class:`Role`, :class:`Member`], :class:`PermissionOverwrite`]
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`StageChannel`
The channel that was just created.
"""
options: Dict[str, Any] = {
'topic': topic,
}
if position is not MISSING:
options['position'] = position
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.stage_voice, category=category, reason=reason, **options
)
channel = StageChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_category(
self,
name: str,
*,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
reason: Optional[str] = None,
position: int = MISSING,
) -> CategoryChannel:
"""|coro|
Same as :meth:`create_text_channel` except makes a :class:`CategoryChannel` instead.
.. note::
The ``category`` parameter is not supported in this function since categories
cannot have categories.
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`CategoryChannel`
The channel that was just created.
"""
options: Dict[str, Any] = {}
if position is not MISSING:
options['position'] = position
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.category, reason=reason, **options
)
channel = CategoryChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
create_category_channel = create_category
async def leave(self) -> None:
"""|coro|
Leaves the guild.
.. note::
You cannot leave the guild that you own, you must delete it instead
via :meth:`delete`.
Raises
--------
HTTPException
Leaving the guild failed.
"""
await self._state.http.leave_guild(self.id)
async def delete(self) -> None:
"""|coro|
Deletes the guild. You must be the guild owner to delete the
guild.
Raises
--------
HTTPException
Deleting the guild failed.
Forbidden
You do not have permissions to delete the guild.
"""
await self._state.http.delete_guild(self.id)
async def edit(
self,
*,
reason: Optional[str] = MISSING,
name: str = MISSING,
description: Optional[str] = MISSING,
icon: Optional[bytes] = MISSING,
banner: Optional[bytes] = MISSING,
splash: Optional[bytes] = MISSING,
discovery_splash: Optional[bytes] = MISSING,
community: bool = MISSING,
region: Optional[Union[str, VoiceRegion]] = MISSING,
afk_channel: Optional[VoiceChannel] = MISSING,
owner: Snowflake = MISSING,
afk_timeout: int = MISSING,
default_notifications: NotificationLevel = MISSING,
verification_level: VerificationLevel = MISSING,
explicit_content_filter: ContentFilter = MISSING,
vanity_code: str = MISSING,
system_channel: Optional[TextChannel] = MISSING,
system_channel_flags: SystemChannelFlags = MISSING,
preferred_locale: str = MISSING,
rules_channel: Optional[TextChannel] = MISSING,
public_updates_channel: Optional[TextChannel] = MISSING,
premium_progress_bar_enabled: bool = MISSING,
) -> Guild:
r"""|coro|
Edits the guild.
You must have the :attr:`~Permissions.manage_guild` permission
to edit the guild.
.. versionchanged:: 1.4
The `rules_channel` and `public_updates_channel` keyword-only parameters were added.
.. versionchanged:: 2.0
The `discovery_splash` and `community` keyword-only parameters were added.
.. versionchanged:: 2.0
The newly updated guild is returned.
Parameters
----------
name: :class:`str`
The new name of the guild.
description: Optional[:class:`str`]
The new description of the guild. Could be ``None`` for no description.
This is only available to guilds that contain ``PUBLIC`` in :attr:`Guild.features`.
icon: :class:`bytes`
A :term:`py:bytes-like object` representing the icon. Only PNG/JPEG is supported.
GIF is only available to guilds that contain ``ANIMATED_ICON`` in :attr:`Guild.features`.
Could be ``None`` to denote removal of the icon.
banner: :class:`bytes`
A :term:`py:bytes-like object` representing the banner.
Could be ``None`` to denote removal of the banner. This is only available to guilds that contain
``BANNER`` in :attr:`Guild.features`.
splash: :class:`bytes`
A :term:`py:bytes-like object` representing the invite splash.
Only PNG/JPEG supported. Could be ``None`` to denote removing the
splash. This is only available to guilds that contain ``INVITE_SPLASH``
in :attr:`Guild.features`.
discovery_splash: :class:`bytes`
A :term:`py:bytes-like object` representing the discovery splash.
Only PNG/JPEG supported. Could be ``None`` to denote removing the
splash. This is only available to guilds that contain ``DISCOVERABLE``
in :attr:`Guild.features`.
community: :class:`bool`
Whether the guild should be a Community guild. If set to ``True``\, both ``rules_channel``
and ``public_updates_channel`` parameters are required.
region: Union[:class:`str`, :class:`VoiceRegion`]
The new region for the guild's voice communication.
afk_channel: Optional[:class:`VoiceChannel`]
The new channel that is the AFK channel. Could be ``None`` for no AFK channel.
afk_timeout: :class:`int`
The number of seconds until someone is moved to the AFK channel.
owner: :class:`Member`
The new owner of the guild to transfer ownership to. Note that you must
be owner of the guild to do this.
verification_level: :class:`VerificationLevel`
The new verification level for the guild.
default_notifications: :class:`NotificationLevel`
The new default notification level for the guild.
explicit_content_filter: :class:`ContentFilter`
The new explicit content filter for the guild.
vanity_code: :class:`str`
The new vanity code for the guild.
system_channel: Optional[:class:`TextChannel`]
The new channel that is used for the system channel. Could be ``None`` for no system channel.
system_channel_flags: :class:`SystemChannelFlags`
The new system channel settings to use with the new system channel.
preferred_locale: :class:`str`
The new preferred locale for the guild. Used as the primary language in the guild.
If set, this must be an ISO 639 code, e.g. ``en-US`` or ``ja`` or ``zh-CN``.
rules_channel: Optional[:class:`TextChannel`]
The new channel that is used for rules. This is only available to
guilds that contain ``PUBLIC`` in :attr:`Guild.features`. Could be ``None`` for no rules
channel.
public_updates_channel: Optional[:class:`TextChannel`]
The new channel that is used for public updates from Discord. This is only available to
guilds that contain ``PUBLIC`` in :attr:`Guild.features`. Could be ``None`` for no
public updates channel.
premium_progress_bar_enabled: :class:`bool`
Whether the guild should have premium progress bar enabled.
reason: Optional[:class:`str`]
The reason for editing this guild. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to edit the guild.
HTTPException
Editing the guild failed.
InvalidArgument
The image format passed in to ``icon`` is invalid. It must be
PNG or JPG. This is also raised if you are not the owner of the
guild and request an ownership transfer.
Returns
--------
:class:`Guild`
The newly updated guild. Note that this has the same limitations as
mentioned in :meth:`Client.fetch_guild` and may not have full data.
"""
http = self._state.http
if vanity_code is not MISSING:
await http.change_vanity_code(self.id, vanity_code, reason=reason)
fields: Dict[str, Any] = {}
if name is not MISSING:
fields['name'] = name
if description is not MISSING:
fields['description'] = description
if preferred_locale is not MISSING:
fields['preferred_locale'] = preferred_locale
if afk_timeout is not MISSING:
fields['afk_timeout'] = afk_timeout
if icon is not MISSING:
if icon is None:
fields['icon'] = icon
else:
fields['icon'] = utils._bytes_to_base64_data(icon)
if banner is not MISSING:
if banner is None:
fields['banner'] = banner
else:
fields['banner'] = utils._bytes_to_base64_data(banner)
if splash is not MISSING:
if splash is None:
fields['splash'] = splash
else:
fields['splash'] = utils._bytes_to_base64_data(splash)
if discovery_splash is not MISSING:
if discovery_splash is None:
fields['discovery_splash'] = discovery_splash
else:
fields['discovery_splash'] = utils._bytes_to_base64_data(discovery_splash)
if default_notifications is not MISSING:
if not isinstance(default_notifications, NotificationLevel):
raise InvalidArgument('default_notifications field must be of type NotificationLevel')
fields['default_message_notifications'] = default_notifications.value
if afk_channel is not MISSING:
if afk_channel is None:
fields['afk_channel_id'] = afk_channel
else:
fields['afk_channel_id'] = afk_channel.id
if system_channel is not MISSING:
if system_channel is None:
fields['system_channel_id'] = system_channel
else:
fields['system_channel_id'] = system_channel.id
if rules_channel is not MISSING:
if rules_channel is None:
fields['rules_channel_id'] = rules_channel
else:
fields['rules_channel_id'] = rules_channel.id
if public_updates_channel is not MISSING:
if public_updates_channel is None:
fields['public_updates_channel_id'] = public_updates_channel
else:
fields['public_updates_channel_id'] = public_updates_channel.id
if owner is not MISSING:
if self.owner_id != self._state.self_id:
raise InvalidArgument('To transfer ownership you must be the owner of the guild.')
fields['owner_id'] = owner.id
if region is not MISSING:
fields['region'] = str(region)
if verification_level is not MISSING:
if not isinstance(verification_level, VerificationLevel):
raise InvalidArgument('verification_level field must be of type VerificationLevel')
fields['verification_level'] = verification_level.value
if explicit_content_filter is not MISSING:
if not isinstance(explicit_content_filter, ContentFilter):
raise InvalidArgument('explicit_content_filter field must be of type ContentFilter')
fields['explicit_content_filter'] = explicit_content_filter.value
if system_channel_flags is not MISSING:
if not isinstance(system_channel_flags, SystemChannelFlags):
raise InvalidArgument('system_channel_flags field must be of type SystemChannelFlags')
fields['system_channel_flags'] = system_channel_flags.value
if community is not MISSING:
features = []
if community:
if 'rules_channel_id' in fields and 'public_updates_channel_id' in fields:
features.append('COMMUNITY')
else:
raise InvalidArgument(
'community field requires both rules_channel and public_updates_channel fields to be provided'
)
fields['features'] = features
if premium_progress_bar_enabled is not MISSING:
fields['premium_progress_bar_enabled'] = premium_progress_bar_enabled
data = await http.edit_guild(self.id, reason=reason, **fields)
return Guild(data=data, state=self._state)
async def fetch_channels(self) -> Sequence[GuildChannel]:
"""|coro|
Retrieves all :class:`abc.GuildChannel` that the guild has.
.. note::
This method is an API call. For general usage, consider :attr:`channels` instead.
.. versionadded:: 1.2
Raises
-------
InvalidData
An unknown channel type was received from Discord.
HTTPException
Retrieving the channels failed.
Returns
-------
Sequence[:class:`abc.GuildChannel`]
All channels in the guild.
"""
data = await self._state.http.get_all_guild_channels(self.id)
def convert(d):
factory, ch_type = _guild_channel_factory(d['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(d))
channel = factory(guild=self, state=self._state, data=d)
return channel
return [convert(d) for d in data]
async def active_threads(self) -> List[Thread]:
"""|coro|
Returns a list of active :class:`Thread` that the client can access.
This includes both private and public threads.
.. versionadded:: 2.0
Raises
------
HTTPException
The request to get the active threads failed.
Returns
--------
List[:class:`Thread`]
The active threads
"""
data = await self._state.http.get_active_threads(self.id)
threads = [Thread(guild=self, state=self._state, data=d) for d in data.get('threads', [])]
thread_lookup: Dict[int, Thread] = {thread.id: thread for thread in threads}
for member in data.get('members', []):
thread = thread_lookup.get(int(member['id']))
if thread is not None:
thread._add_member(ThreadMember(parent=thread, data=member))
return threads
# TODO: Remove Optional typing here when async iterators are refactored
def fetch_members(self, *, limit: int = 1000, after: Optional[SnowflakeTime] = None) -> MemberIterator:
"""Retrieves an :class:`.AsyncIterator` that enables receiving the guild's members. In order to use this,
:meth:`Intents.members` must be enabled.
.. note::
This method is an API call. For general usage, consider :attr:`members` instead.
.. versionadded:: 1.3
All parameters are optional.
Parameters
----------
limit: Optional[:class:`int`]
The number of members to retrieve. Defaults to 1000.
Pass ``None`` to fetch all members. Note that this is potentially slow.
after: Optional[Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]]
Retrieve members after this date or object.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
Raises
------
ClientException
The members intent is not enabled.
HTTPException
Getting the members failed.
Yields
------
:class:`.Member`
The member with the member data parsed.
Examples
--------
Usage ::
async for member in guild.fetch_members(limit=150):
print(member.name)
Flattening into a list ::
members = await guild.fetch_members(limit=150).flatten()
# members is now a list of Member...
"""
if not self._state._intents.members:
raise ClientException('Intents.members must be enabled to use this.')
return MemberIterator(self, limit=limit, after=after)
async def fetch_member(self, member_id: int, /) -> Member:
"""|coro|
Retrieves a :class:`Member` from a guild ID, and a member ID.
.. note::
This method is an API call. If you have :attr:`Intents.members` and member cache enabled, consider :meth:`get_member` instead.
Parameters
-----------
member_id: :class:`int`
The member's ID to fetch from.
Raises
-------
Forbidden
You do not have access to the guild.
HTTPException
Fetching the member failed.
Returns
--------
:class:`Member`
The member from the member ID.
"""
data = await self._state.http.get_member(self.id, member_id)
return Member(data=data, state=self._state, guild=self)
async def fetch_ban(self, user: Snowflake) -> BanEntry:
"""|coro|
Retrieves the :class:`BanEntry` for a user.
You must have the :attr:`~Permissions.ban_members` permission
to get this information.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to get ban information from.
Raises
------
Forbidden
You do not have proper permissions to get the information.
NotFound
This user is not banned.
HTTPException
An error occurred while fetching the information.
Returns
-------
:class:`BanEntry`
The :class:`BanEntry` object for the specified user.
"""
data: BanPayload = await self._state.http.get_ban(user.id, self.id)
return BanEntry(user=User(state=self._state, data=data['user']), reason=data['reason'])
async def fetch_channel(self, channel_id: int, /) -> Union[GuildChannel, Thread]:
"""|coro|
Retrieves a :class:`.abc.GuildChannel` or :class:`.Thread` with the specified ID.
.. note::
This method is an API call. For general usage, consider :meth:`get_channel_or_thread` instead.
.. versionadded:: 2.0
Raises
-------
:exc:`.InvalidData`
An unknown channel type was received from Discord
or the guild the channel belongs to is not the same
as the one in this object points to.
:exc:`.HTTPException`
Retrieving the channel failed.
:exc:`.NotFound`
Invalid Channel ID.
:exc:`.Forbidden`
You do not have permission to fetch this channel.
Returns
--------
Union[:class:`.abc.GuildChannel`, :class:`.Thread`]
The channel from the ID.
"""
data = await self._state.http.get_channel(channel_id)
factory, ch_type = _threaded_guild_channel_factory(data['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(data))
if ch_type in (ChannelType.group, ChannelType.private):
raise InvalidData('Channel ID resolved to a private channel')
guild_id = int(data['guild_id'])
if self.id != guild_id:
raise InvalidData('Guild ID resolved to a different guild')
channel: GuildChannel = factory(guild=self, state=self._state, data=data) # type: ignore
return channel
async def bans(self) -> List[BanEntry]:
"""|coro|
Retrieves all the users that are banned from the guild as a :class:`list` of :class:`BanEntry`.
You must have the :attr:`~Permissions.ban_members` permission
to get this information.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
--------
List[:class:`BanEntry`]
A list of :class:`BanEntry` objects.
"""
data: List[BanPayload] = await self._state.http.get_bans(self.id)
return [BanEntry(user=User(state=self._state, data=e['user']), reason=e['reason']) for e in data]
async def prune_members(
self,
*,
days: int,
compute_prune_count: bool = True,
roles: List[Snowflake] = MISSING,
reason: Optional[str] = None,
) -> Optional[int]:
r"""|coro|
Prunes the guild from its inactive members.
The inactive members are denoted if they have not logged on in
``days`` number of days and they have no roles.
You must have the :attr:`~Permissions.kick_members` permission
to use this.
To check how many members you would prune without actually pruning,
see the :meth:`estimate_pruned_members` function.
To prune members that have specific roles see the ``roles`` parameter.
.. versionchanged:: 1.4
The ``roles`` keyword-only parameter was added.
Parameters
-----------
days: :class:`int`
The number of days before counting as inactive.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
compute_prune_count: :class:`bool`
Whether to compute the prune count. This defaults to ``True``
which makes it prone to timeouts in very large guilds. In order
to prevent timeouts, you must set this to ``False``. If this is
set to ``False``\, then this function will always return ``None``.
roles: List[:class:`abc.Snowflake`]
A list of :class:`abc.Snowflake` that represent roles to include in the pruning process. If a member
has a role that is not specified, they'll be excluded.
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while pruning members.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
Optional[:class:`int`]
The number of members pruned. If ``compute_prune_count`` is ``False``
then this returns ``None``.
"""
if not isinstance(days, int):
raise InvalidArgument(f'Expected int for ``days``, received {days.__class__.__name__} instead.')
if roles:
role_ids = [str(role.id) for role in roles]
else:
role_ids = []
data = await self._state.http.prune_members(
self.id, days, compute_prune_count=compute_prune_count, roles=role_ids, reason=reason
)
return data['pruned']
async def templates(self) -> List[Template]:
"""|coro|
Gets the list of templates from this guild.
Requires :attr:`~.Permissions.manage_guild` permissions.
.. versionadded:: 1.7
Raises
-------
Forbidden
You don't have permissions to get the templates.
Returns
--------
List[:class:`Template`]
The templates for this guild.
"""
from .template import Template
data = await self._state.http.guild_templates(self.id)
return [Template(data=d, state=self._state) for d in data]
async def webhooks(self) -> List[Webhook]:
"""|coro|
Gets the list of webhooks from this guild.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
Raises
-------
Forbidden
You don't have permissions to get the webhooks.
Returns
--------
List[:class:`Webhook`]
The webhooks for this guild.
"""
from .webhook import Webhook
data = await self._state.http.guild_webhooks(self.id)
return [Webhook.from_state(d, state=self._state) for d in data]
async def estimate_pruned_members(self, *, days: int, roles: List[Snowflake] = MISSING) -> int:
"""|coro|
Similar to :meth:`prune_members` except instead of actually
pruning members, it returns how many members it would prune
from the guild had it been called.
Parameters
-----------
days: :class:`int`
The number of days before counting as inactive.
roles: List[:class:`abc.Snowflake`]
A list of :class:`abc.Snowflake` that represent roles to include in the estimate. If a member
has a role that is not specified, they'll be excluded.
.. versionadded:: 1.7
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while fetching the prune members estimate.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
:class:`int`
The number of members estimated to be pruned.
"""
if not isinstance(days, int):
raise InvalidArgument(f'Expected int for ``days``, received {days.__class__.__name__} instead.')
if roles:
role_ids = [str(role.id) for role in roles]
else:
role_ids = []
data = await self._state.http.estimate_pruned_members(self.id, days, role_ids)
return data['pruned']
async def invites(self) -> List[Invite]:
"""|coro|
Returns a list of all active instant invites from the guild.
You must have the :attr:`~Permissions.manage_guild` permission to get
this information.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
-------
List[:class:`Invite`]
The list of invites that are currently active.
"""
data = await self._state.http.invites_from(self.id)
result = []
for invite in data:
channel = self.get_channel(int(invite['channel']['id']))
result.append(Invite(state=self._state, data=invite, guild=self, channel=channel))
return result
async def create_template(self, *, name: str, description: str = MISSING) -> Template:
"""|coro|
Creates a template for the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.7
Parameters
-----------
name: :class:`str`
The name of the template.
description: :class:`str`
The description of the template.
"""
from .template import Template
payload = {'name': name}
if description:
payload['description'] = description
data = await self._state.http.create_template(self.id, payload)
return Template(state=self._state, data=data)
async def create_integration(self, *, type: str, id: int) -> None:
"""|coro|
Attaches an integration to the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.4
Parameters
-----------
type: :class:`str`
The integration type (e.g. Twitch).
id: :class:`int`
The integration ID.
Raises
-------
Forbidden
You do not have permission to create the integration.
HTTPException
The account could not be found.
"""
await self._state.http.create_integration(self.id, type, id)
async def integrations(self) -> List[Integration]:
"""|coro|
Returns a list of all integrations attached to the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.4
Raises
-------
Forbidden
You do not have permission to create the integration.
HTTPException
Fetching the integrations failed.
Returns
--------
List[:class:`Integration`]
The list of integrations that are attached to the guild.
"""
data = await self._state.http.get_all_integrations(self.id)
def convert(d):
factory, _ = _integration_factory(d['type'])
if factory is None:
raise InvalidData('Unknown integration type {type!r} for integration ID {id}'.format_map(d))
return factory(guild=self, data=d)
return [convert(d) for d in data]
async def fetch_stickers(self) -> List[GuildSticker]:
r"""|coro|
Retrieves a list of all :class:`Sticker`\s for the guild.
.. versionadded:: 2.0
.. note::
This method is an API call. For general usage, consider :attr:`stickers` instead.
Raises
---------
HTTPException
An error occurred fetching the stickers.
Returns
--------
List[:class:`GuildSticker`]
The retrieved stickers.
"""
data = await self._state.http.get_all_guild_stickers(self.id)
return [GuildSticker(state=self._state, data=d) for d in data]
async def fetch_sticker(self, sticker_id: int, /) -> GuildSticker:
"""|coro|
Retrieves a custom :class:`Sticker` from the guild.
.. versionadded:: 2.0
.. note::
This method is an API call.
For general usage, consider iterating over :attr:`stickers` instead.
Parameters
-------------
sticker_id: :class:`int`
The sticker's ID.
Raises
---------
NotFound
The sticker requested could not be found.
HTTPException
An error occurred fetching the sticker.
Returns
--------
:class:`GuildSticker`
The retrieved sticker.
"""
data = await self._state.http.get_guild_sticker(self.id, sticker_id)
return GuildSticker(state=self._state, data=data)
async def create_sticker(
self,
*,
name: str,
description: Optional[str] = None,
emoji: str,
file: File,
reason: Optional[str] = None,
) -> GuildSticker:
"""|coro|
Creates a :class:`Sticker` for the guild.
You must have :attr:`~Permissions.manage_emojis_and_stickers` permission to
do this.
.. versionadded:: 2.0
Parameters
-----------
name: :class:`str`
The sticker name. Must be at least 2 characters.
description: Optional[:class:`str`]
The sticker's description. Can be ``None``.
emoji: :class:`str`
The name of a unicode emoji that represents the sticker's expression.
file: :class:`File`
The file of the sticker to upload.
reason: :class:`str`
The reason for creating this sticker. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to create stickers.
HTTPException
An error occurred creating a sticker.
Returns
--------
:class:`GuildSticker`
The created sticker.
"""
payload = {
'name': name,
}
if description:
payload['description'] = description
try:
emoji = unicodedata.name(emoji)
except TypeError:
pass
else:
emoji = emoji.replace(' ', '_')
payload['tags'] = emoji
data = await self._state.http.create_guild_sticker(self.id, payload, file, reason)
return self._state.store_sticker(self, data)
async def delete_sticker(self, sticker: Snowflake, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the custom :class:`Sticker` from the guild.
You must have :attr:`~Permissions.manage_emojis_and_stickers` permission to
do this.
.. versionadded:: 2.0
Parameters
-----------
sticker: :class:`abc.Snowflake`
The sticker you are deleting.
reason: Optional[:class:`str`]
The reason for deleting this sticker. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to delete stickers.
HTTPException
An error occurred deleting the sticker.
"""
await self._state.http.delete_guild_sticker(self.id, sticker.id, reason)
async def fetch_emojis(self) -> List[Emoji]:
r"""|coro|
Retrieves all custom :class:`Emoji`\s from the guild.
.. note::
This method is an API call. For general usage, consider :attr:`emojis` instead.
Raises
---------
HTTPException
An error occurred fetching the emojis.
Returns
--------
List[:class:`Emoji`]
The retrieved emojis.
"""
data = await self._state.http.get_all_custom_emojis(self.id)
return [Emoji(guild=self, state=self._state, data=d) for d in data]
async def fetch_emoji(self, emoji_id: int, /) -> Emoji:
"""|coro|
Retrieves a custom :class:`Emoji` from the guild.
.. note::
This method is an API call.
For general usage, consider iterating over :attr:`emojis` instead.
Parameters
-------------
emoji_id: :class:`int`
The emoji's ID.
Raises
---------
NotFound
The emoji requested could not be found.
HTTPException
An error occurred fetching the emoji.
Returns
--------
:class:`Emoji`
The retrieved emoji.
"""
data = await self._state.http.get_custom_emoji(self.id, emoji_id)
return Emoji(guild=self, state=self._state, data=data)
async def create_custom_emoji(
self,
*,
name: str,
image: bytes,
roles: List[Role] = MISSING,
reason: Optional[str] = None,
) -> Emoji:
r"""|coro|
Creates a custom :class:`Emoji` for the guild.
There is currently a limit of 50 static and animated emojis respectively per guild,
unless the guild has the ``MORE_EMOJI`` feature which extends the limit to 200.
You must have the :attr:`~Permissions.manage_emojis` permission to
do this.
Parameters
-----------
name: :class:`str`
The emoji name. Must be at least 2 characters.
image: :class:`bytes`
The :term:`py:bytes-like object` representing the image data to use.
Only JPG, PNG and GIF images are supported.
roles: List[:class:`Role`]
A :class:`list` of :class:`Role`\s that can use this emoji. Leave empty to make it available to everyone.
reason: Optional[:class:`str`]
The reason for creating this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to create emojis.
HTTPException
An error occurred creating an emoji.
Returns
--------
:class:`Emoji`
The created emoji.
"""
img = utils._bytes_to_base64_data(image)
if roles:
role_ids = [role.id for role in roles]
else:
role_ids = []
data = await self._state.http.create_custom_emoji(self.id, name, img, roles=role_ids, reason=reason)
return self._state.store_emoji(self, data)
async def delete_emoji(self, emoji: Snowflake, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the custom :class:`Emoji` from the guild.
You must have :attr:`~Permissions.manage_emojis` permission to
do this.
Parameters
-----------
emoji: :class:`abc.Snowflake`
The emoji you are deleting.
reason: Optional[:class:`str`]
The reason for deleting this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to delete emojis.
HTTPException
An error occurred deleting the emoji.
"""
await self._state.http.delete_custom_emoji(self.id, emoji.id, reason=reason)
async def fetch_roles(self) -> List[Role]:
"""|coro|
Retrieves all :class:`Role` that the guild has.
.. note::
This method is an API call. For general usage, consider :attr:`roles` instead.
.. versionadded:: 1.3
Raises
-------
HTTPException
Retrieving the roles failed.
Returns
-------
List[:class:`Role`]
All roles in the guild.
"""
data = await self._state.http.get_roles(self.id)
return [Role(guild=self, state=self._state, data=d) for d in data]
@overload
async def create_role(
self,
*,
reason: Optional[str] = ...,
name: str = ...,
permissions: Permissions = ...,
colour: Union[Colour, int] = ...,
hoist: bool = ...,
mentionable: bool = ...,
) -> Role:
...
@overload
async def create_role(
self,
*,
reason: Optional[str] = ...,
name: str = ...,
permissions: Permissions = ...,
color: Union[Colour, int] = ...,
hoist: bool = ...,
mentionable: bool = ...,
) -> Role:
...
async def create_role(
self,
*,
name: str = MISSING,
permissions: Permissions = MISSING,
color: Union[Colour, int] = MISSING,
colour: Union[Colour, int] = MISSING,
hoist: bool = MISSING,
mentionable: bool = MISSING,
reason: Optional[str] = None,
) -> Role:
"""|coro|
Creates a :class:`Role` for the guild.
All fields are optional.
You must have the :attr:`~Permissions.manage_roles` permission to
do this.
.. versionchanged:: 1.6
Can now pass ``int`` to ``colour`` keyword-only parameter.
Parameters
-----------
name: :class:`str`
The role name. Defaults to 'new role'.
permissions: :class:`Permissions`
The permissions to have. Defaults to no permissions.
colour: Union[:class:`Colour`, :class:`int`]
The colour for the role. Defaults to :meth:`Colour.default`.
This is aliased to ``color`` as well.
hoist: :class:`bool`
Indicates if the role should be shown separately in the member list.
Defaults to ``False``.
mentionable: :class:`bool`
Indicates if the role should be mentionable by others.
Defaults to ``False``.
reason: Optional[:class:`str`]
The reason for creating this role. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to create the role.
HTTPException
Creating the role failed.
InvalidArgument
An invalid keyword argument was given.
Returns
--------
:class:`Role`
The newly created role.
"""
fields: Dict[str, Any] = {}
if permissions is not MISSING:
fields['permissions'] = str(permissions.value)
else:
fields['permissions'] = '0'
actual_colour = colour or color or Colour.default()
if isinstance(actual_colour, int):
fields['color'] = actual_colour
else:
fields['color'] = actual_colour.value
if hoist is not MISSING:
fields['hoist'] = hoist
if mentionable is not MISSING:
fields['mentionable'] = mentionable
if name is not MISSING:
fields['name'] = name
data = await self._state.http.create_role(self.id, reason=reason, **fields)
role = Role(guild=self, data=data, state=self._state)
# TODO: add to cache
return role
async def edit_role_positions(self, positions: Dict[Snowflake, int], *, reason: Optional[str] = None) -> List[Role]:
"""|coro|
Bulk edits a list of :class:`Role` in the guild.
You must have the :attr:`~Permissions.manage_roles` permission to
do this.
.. versionadded:: 1.4
Example:
.. code-block:: python3
positions = {
bots_role: 1, # penultimate role
tester_role: 2,
admin_role: 6
}
await guild.edit_role_positions(positions=positions)
Parameters
-----------
positions
A :class:`dict` of :class:`Role` to :class:`int` to change the positions
of each given role.
reason: Optional[:class:`str`]
The reason for editing the role positions. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to move the roles.
HTTPException
Moving the roles failed.
InvalidArgument
An invalid keyword argument was given.
Returns
--------
List[:class:`Role`]
A list of all the roles in the guild.
"""
if not isinstance(positions, dict):
raise InvalidArgument('positions parameter expects a dict.')
role_positions: List[Dict[str, Any]] = []
for role, position in positions.items():
payload = {'id': role.id, 'position': position}
role_positions.append(payload)
data = await self._state.http.move_role_position(self.id, role_positions, reason=reason)
roles: List[Role] = []
for d in data:
role = Role(guild=self, data=d, state=self._state)
roles.append(role)
self._roles[role.id] = role
return roles
async def kick(self, user: Snowflake, *, reason: Optional[str] = None) -> None:
"""|coro|
Kicks a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.kick_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to kick from their guild.
reason: Optional[:class:`str`]
The reason the user got kicked.
Raises
-------
Forbidden
You do not have the proper permissions to kick.
HTTPException
Kicking failed.
"""
await self._state.http.kick(user.id, self.id, reason=reason)
async def ban(
self,
user: Snowflake,
*,
reason: Optional[str] = None,
delete_message_days: Literal[0, 1, 2, 3, 4, 5, 6, 7] = 1,
) -> None:
"""|coro|
Bans a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.ban_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to ban from their guild.
delete_message_days: :class:`int`
The number of days worth of messages to delete from the user
in the guild. The minimum is 0 and the maximum is 7.
reason: Optional[:class:`str`]
The reason the user got banned.
Raises
-------
Forbidden
You do not have the proper permissions to ban.
HTTPException
Banning failed.
"""
await self._state.http.ban(user.id, self.id, delete_message_days, reason=reason)
async def unban(self, user: Snowflake, *, reason: Optional[str] = None) -> None:
"""|coro|
Unbans a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.ban_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to unban.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to unban.
HTTPException
Unbanning failed.
"""
await self._state.http.unban(user.id, self.id, reason=reason)
async def vanity_invite(self) -> Optional[Invite]:
"""|coro|
Returns the guild's special vanity invite.
The guild must have ``VANITY_URL`` in :attr:`~Guild.features`.
You must have the :attr:`~Permissions.manage_guild` permission to use
this as well.
Raises
-------
Forbidden
You do not have the proper permissions to get this.
HTTPException
Retrieving the vanity invite failed.
Returns
--------
Optional[:class:`Invite`]
The special vanity invite. If ``None`` then the guild does not
have a vanity invite set.
"""
# we start with { code: abc }
payload = await self._state.http.get_vanity_code(self.id)
if not payload['code']:
return None
# get the vanity URL channel since default channels aren't
# reliable or a thing anymore
data = await self._state.http.get_invite(payload['code'])
channel = self.get_channel(int(data['channel']['id']))
payload['revoked'] = False
payload['temporary'] = False
payload['max_uses'] = 0
payload['max_age'] = 0
payload['uses'] = payload.get('uses', 0)
return Invite(state=self._state, data=payload, guild=self, channel=channel)
# TODO: use MISSING when async iterators get refactored
def audit_logs(
self,
*,
limit: Optional[int] = 100,
before: Optional[SnowflakeTime] = None,
after: Optional[SnowflakeTime] = None,
oldest_first: Optional[bool] = None,
user: Snowflake = None,
action: AuditLogAction = None,
) -> AuditLogIterator:
"""Returns an :class:`AsyncIterator` that enables receiving the guild's audit logs.
You must have the :attr:`~Permissions.view_audit_log` permission to use this.
Examples
----------
Getting the first 100 entries: ::
async for entry in guild.audit_logs(limit=100):
print(f'{entry.user} did {entry.action} to {entry.target}')
Getting entries for a specific action: ::
async for entry in guild.audit_logs(action=discord.AuditLogAction.ban):
print(f'{entry.user} banned {entry.target}')
Getting entries made by a specific user: ::
entries = await guild.audit_logs(limit=None, user=guild.me).flatten()
await channel.send(f'I made {len(entries)} moderation actions.')
Parameters
-----------
limit: Optional[:class:`int`]
The number of entries to retrieve. If ``None`` retrieve all entries.
before: Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]
Retrieve entries before this date or entry.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
after: Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]
Retrieve entries after this date or entry.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
oldest_first: :class:`bool`
If set to ``True``, return entries in oldest->newest order. Defaults to ``True`` if
``after`` is specified, otherwise ``False``.
user: :class:`abc.Snowflake`
The moderator to filter entries from.
action: :class:`AuditLogAction`
The action to filter with.
Raises
-------
Forbidden
You are not allowed to fetch audit logs
HTTPException
An error occurred while fetching the audit logs.
Yields
--------
:class:`AuditLogEntry`
The audit log entry.
"""
if user is not None:
user_id = user.id
else:
user_id = None
if action:
action = action.value
return AuditLogIterator(
self, before=before, after=after, limit=limit, oldest_first=oldest_first, user_id=user_id, action_type=action
)
async def widget(self) -> Widget:
"""|coro|
Returns the widget of the guild.
.. note::
The guild must have the widget enabled to get this information.
Raises
-------
Forbidden
The widget for this guild is disabled.
HTTPException
Retrieving the widget failed.
Returns
--------
:class:`Widget`
The guild's widget.
"""
data = await self._state.http.get_widget(self.id)
return Widget(state=self._state, data=data)
async def edit_widget(self, *, enabled: bool = MISSING, channel: Optional[Snowflake] = MISSING) -> None:
"""|coro|
Edits the widget of the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
use this
.. versionadded:: 2.0
Parameters
-----------
enabled: :class:`bool`
Whether to enable the widget for the guild.
channel: Optional[:class:`~discord.abc.Snowflake`]
The new widget channel. ``None`` removes the widget channel.
Raises
-------
Forbidden
You do not have permission to edit the widget.
HTTPException
Editing the widget failed.
"""
payload = {}
if channel is not MISSING:
payload['channel_id'] = None if channel is None else channel.id
if enabled is not MISSING:
payload['enabled'] = enabled
await self._state.http.edit_widget(self.id, payload=payload)
async def chunk(self, *, cache: bool = True) -> None:
"""|coro|
Requests all members that belong to this guild. In order to use this,
:meth:`Intents.members` must be enabled.
This is a websocket operation and can be slow.
.. versionadded:: 1.5
Parameters
-----------
cache: :class:`bool`
Whether to cache the members as well.
Raises
-------
ClientException
The members intent is not enabled.
"""
if not self._state._intents.members:
raise ClientException('Intents.members must be enabled to use this.')
if not self._state.is_guild_evicted(self):
return await self._state.chunk_guild(self, cache=cache)
async def query_members(
self,
query: Optional[str] = None,
*,
limit: int = 5,
user_ids: Optional[List[int]] = None,
presences: bool = False,
cache: bool = True,
) -> List[Member]:
"""|coro|
Request members that belong to this guild whose username starts with
the query given.
This is a websocket operation and can be slow.
.. versionadded:: 1.3
Parameters
-----------
query: Optional[:class:`str`]
The string that the username's start with.
limit: :class:`int`
The maximum number of members to send back. This must be
a number between 5 and 100.
presences: :class:`bool`
Whether to request for presences to be provided. This defaults
to ``False``.
.. versionadded:: 1.6
cache: :class:`bool`
Whether to cache the members internally. This makes operations
such as :meth:`get_member` work for those that matched.
user_ids: Optional[List[:class:`int`]]
List of user IDs to search for. If the user ID is not in the guild then it won't be returned.
.. versionadded:: 1.4
Raises
-------
asyncio.TimeoutError
The query timed out waiting for the members.
ValueError
Invalid parameters were passed to the function
ClientException
The presences intent is not enabled.
Returns
--------
List[:class:`Member`]
The list of members that have matched the query.
"""
if presences and not self._state._intents.presences:
raise ClientException('Intents.presences must be enabled to use this.')
if query is None:
if query == '':
raise ValueError('Cannot pass empty query string.')
if user_ids is None:
raise ValueError('Must pass either query or user_ids')
if user_ids is not None and query is not None:
raise ValueError('Cannot pass both query and user_ids')
if user_ids is not None and not user_ids:
raise ValueError('user_ids must contain at least 1 value')
limit = min(100, limit or 5)
return await self._state.query_members(
self, query=query, limit=limit, user_ids=user_ids, presences=presences, cache=cache
)
async def change_voice_state(
self, *, channel: Optional[VocalGuildChannel], self_mute: bool = False, self_deaf: bool = False
):
"""|coro|
Changes client's voice state in the guild.
.. versionadded:: 1.4
Parameters
-----------
channel: Optional[:class:`VoiceChannel`]
Channel the client wants to join. Use ``None`` to disconnect.
self_mute: :class:`bool`
Indicates if the client should be self-muted.
self_deaf: :class:`bool`
Indicates if the client should be self-deafened.
"""
ws = self._state._get_websocket(self.id)
channel_id = channel.id if channel else None
await ws.voice_state(self.id, channel_id, self_mute, self_deaf)
async def welcome_screen(self):
"""|coro|
Returns the :class:`WelcomeScreen` of the guild.
The guild must have ``COMMUNITY`` in :attr:`~Guild.features`.
You must have the :attr:`~Permissions.manage_guild` permission in order to get this.
.. versionadded:: 2.0
Raises
-------
Forbidden
You do not have the proper permissions to get this.
HTTPException
Retrieving the welcome screen failed somehow.
NotFound
The guild doesn't has a welcome screen or community feature is disabled.
Returns
--------
:class:`WelcomeScreen`
The welcome screen of guild.
"""
data = await self._state.http.get_welcome_screen(self.id)
return WelcomeScreen(data=data, guild=self)
@overload
async def edit_welcome_screen(
self,
*,
description: Optional[str] = ...,
welcome_channels: Optional[List[WelcomeScreenChannel]] = ...,
enabled: Optional[bool] = ...,
) -> WelcomeScreen:
...
@overload
async def edit_welcome_screen(self) -> None:
...
async def edit_welcome_screen(self, **options):
"""|coro|
A shorthand for :attr:`WelcomeScreen.edit` without fetching the welcome screen.
You must have the :attr:`~Permissions.manage_guild` permission in the
guild to do this.
The guild must have ``COMMUNITY`` in :attr:`Guild.features`
Parameters
------------
description: Optional[:class:`str`]
The new description of welcome screen.
welcome_channels: Optional[List[:class:`WelcomeChannel`]]
The welcome channels. The order of the channels would be same as the passed list order.
enabled: Optional[:class:`bool`]
Whether the welcome screen should be displayed.
reason: Optional[:class:`str`]
The reason that shows up on audit log.
Raises
-------
HTTPException
Editing the welcome screen failed somehow.
Forbidden
You don't have permissions to edit the welcome screen.
NotFound
This welcome screen does not exist.
Returns
--------
:class:`WelcomeScreen`
The edited welcome screen.
"""
welcome_channels = options.get('welcome_channels', [])
welcome_channels_data = []
for channel in welcome_channels:
if not isinstance(channel, WelcomeScreenChannel):
raise TypeError('welcome_channels parameter must be a list of WelcomeScreenChannel.')
welcome_channels_data.append(channel.to_dict())
options['welcome_channels'] = welcome_channels_data
if options:
new = await self._state.http.edit_welcome_screen(self.id, options, reason=options.get('reason'))
return WelcomeScreen(data=new, guild=self)
|
py | 1a31fb1b8e8ef75ddda8ecd49909a8e1665f231b | import sys
import xlrd
import csv
from main.model.model import db_save
template = {'fecha_hora': '',
'vereda': '',
'PM2_5_CC_ICA': -9999.0,
'altitud': -9999.0,
'estado': '',
'online': '',
'longitude': -9999.0,
'barrio': '',
'ciudad': '',
'temperatura': -9999.0,
'humedad_relativa': -9999.0,
'latitude': -9999.0,
'nombre': '',
'PM2_5_last': -9999.0,
'PM2_5_mean': -9999.0,
'codigo': -9999.0}
def load_xlsx(datafile):
workbook = xlrd.open_workbook(datafile)
worksheet = workbook.sheet_by_index(0)
print(">Msg: Reading '"+datafile+"'")
print("- Filas: "+str(worksheet.nrows))
print("- Columnas: "+str(worksheet.ncols))
for fila in range(worksheet.nrows):
#Almacenará unicamente una medicion a la vez
medicion = []
for columna in range(worksheet.ncols):
medicion.append(worksheet.cell(fila,columna).value)
print(medicion)
#print("aquí se guardaría el dato")
def load_csv(datafile):
censors = []
with open(datafile,'r') as csvfile:
reader = csv.reader(csvfile)
#cont = 0
row1 = True
for row in reader:
if row1 == True:
censors = row
row1 = False
continue
#cont += 1
index = 0
date = ''
for field in row:
if index == 0:
date = field
index += 1
else:
# censors[index] = numero identificacion de sensor
# date = fecha de la medicion
# field = medicion
#print(censors[index],date,field)
if field != '':
medicion = template
medicion['nombre'] = str(censors[index])
medicion['codigo'] = int(censors[index])
medicion['fecha_hora'] = str(date[:10]) + "T" + str(date[11:])
medicion['PM2_5_last'] = float(field)
save_response = db_save('mediciones', medicion)
if save_response == False:
print("- Hubo un problema almacenando el dato: ")
print(sensor,"\n")
#print(medicion['fecha_hora'])
index += 1
#if cont == 2:
# break
def main():
for datafile in sys.argv[1:]:
ext=""
i=datafile.rfind(".")
if i == -1:
print(">Error: Los ficheros no tienen extención '"+datafile+"'\n")
else:
if datafile[i:] == ".csv":
load_csv(datafile)
else:
if datafile[i:] == ".xlsx" or datafile[i:] == ".xls" or datafile[i:] == ".xlsm":
#load_xlsx(datafile)
pass
else:
print(">Warning: Extención de fichero no soportado '"+datafile+"'\n")
main() |
py | 1a31fb7eba9425f51bc5ded28fa29c957c41e7f5 | '''
the following import is only necessary because eip.py is not in this directory
'''
import sys
sys.path.append('..')
'''
We're going to log a tag value 10
times to a text file
'''
import csv
from pylogix import PLC
import time
with PLC() as comm:
comm.IPAddress = '192.168.1.9'
with open('31_log.csv', 'w') as csv_file:
csv_file = csv.writer(csv_file, delimiter=',', quotechar='/', quoting=csv.QUOTE_MINIMAL)
for i in range(10):
ret = comm.Read('LargeArray[5]')
csv_file.writerow([ret.Value])
time.sleep(1)
|
py | 1a31fd07f594a7ae95d079903f8aea1ac56c6780 | import os
import numpy as np
from simplegrid.abstractcreature import MAX_ENERGY, Action, AbstractCreature
from simplegrid.dqn_agent import DQNAgent
from simplegrid.map_feature import MapFeature
HISTORY_FILE = 'deep_cow_history.jsonl'
WEIGHTS_FILE = 'deep_cow_model_weights.h5'
MODEL_FILE = 'deep_cow_model.json'
class DeepCow(AbstractCreature):
agent = None
COLOR = (240, 240, 20)
IS_PREDATOR = False
def __init__(self, x, y, settings, energy=None):
super().__init__(x, y, settings, energy)
self.prev_state = None
self.prev_reward = None
self.prev_action_idx = None
self.state = None
self.reward = None
self.done = None
self.action_idx = 0
@staticmethod
def to_internal_state(observation):
"""Convert state to an internal representation.
The input state is a (2 x d + 1, 2 x d + 1) matrix with for each
cell either a 1 for food, 0 for nothing or -x for another animal.
The center cell is always "us". Only the largest diamond fitting
the matrix is actually visible.
"""
size = observation.shape[0]
view_distance = size // 2
if view_distance == 1:
diamond = observation.flatten()
diamond = [diamond[3], diamond[7], diamond[5], diamond[1]]
else:
diamond = []
for x in range(size):
for y in range(size):
if 0 < abs(x - size // 2) + abs(y - size // 2) <= view_distance:
diamond.append(observation[x][y])
diamond = np.asarray(diamond)
grass = MapFeature.GRASS.to_feature_vector(diamond)
rock = MapFeature.ROCK.to_feature_vector(diamond)
water = MapFeature.ROCK.to_feature_vector(diamond)
wolves = MapFeature.WOLF.to_feature_vector(diamond)
return np.concatenate((grass, rock, water, wolves))
def step(self, observation):
if self.energy > MAX_ENERGY:
return Action.SPLIT
self.prev_state = self.state
self.prev_reward = self.reward
self.prev_action_idx = self.action_idx
self.state = self.to_internal_state(observation)
if not DeepCow.agent:
DeepCow.agent = DQNAgent.from_dimensions(len(self.state), layers=self.settings.layers, action_size=4)
self.action_idx = DeepCow.agent.act(self.state)
return Action(self.action_idx + 1)
def learn(self, reward, done):
self.reward = reward
if self.prev_state is not None and self.state is not None:
DeepCow.agent.remember(self.prev_state, self.prev_action_idx, self.prev_reward, self.state)
DeepCow.agent.replay()
if done:
DeepCow.agent.remember(self.state, self.action_idx, self.reward, None)
@classmethod
def restore_state(cls, settings):
model_file = settings.get_path(MODEL_FILE)
if model_file and os.path.isfile(model_file):
DeepCow.agent = DQNAgent.from_stored_model(model_file)
weights_file = settings.get_path(WEIGHTS_FILE)
if weights_file and os.path.isfile(weights_file):
DeepCow.agent.load_weights(weights_file)
@classmethod
def save_state(cls, settings):
weights_file = settings.get_path(WEIGHTS_FILE)
if weights_file:
cls.agent.save_weights(weights_file)
cls.agent.save_history(settings.get_path(HISTORY_FILE))
cls.agent.save_model(settings.get_path(MODEL_FILE))
|
py | 1a31fd1346d81ac6b26aad324c0e4450f3f857c2 | """Add meta field to Task table
Revision ID: a4a031f74720
Revises: 860c6ff76ea8
Create Date: 2019-06-08 14:12:10.983247
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'a4a031f74720'
down_revision = '860c6ff76ea8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('meta', postgresql.JSON(
astext_type=sa.Text()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task', 'meta')
# ### end Alembic commands ###
|
py | 1a31fde3fcfcd5295dcab3ae6876a843867fb09f | #!/usr/local/bin/python
#==========================================================================
# Download, ingest, and execute rdahmm evaluation for updated scripps datasets
# Set up a cron job to run nightly
#
# usage: cron_rdahmm.py
#
#===========================================================================
import os, subprocess, sys, glob
import urllib,string
import re
from threading import Thread
from properties import properties
cron_path = properties('cron_path')
download_path = properties('download_path') + "/WesternNorthAmerica/"
#model_path = properties('model_path')
scripps_data = properties('cron_path') + "/WesternNorthAmerica/*.tar"
scripps_cmd = properties('script_path') + "/scripps_ingest_single.py"
scripps_cmd_raw = properties('script_path') + "/scripps_ingest_single_wnam_raw.py"
eval_cmd = properties('script_path') + "/rdahmm_eval_single.py"
xml_cmd = properties('script_path') + "/create_summary_xmls.py"
json_cmd = properties('script_path') + "/create_summary_jsons.py"
class ThreadJob(Thread):
def __init__(self, tarball):
Thread.__init__(self)
self.tarball = tarball
self.dataset = string.split(tarball, "/")[-1][:-13]
def run(self):
# ingest a given tar ball
print "+++Starting process ", self.tarball, " ..."
if "Raw" in self.tarball:
cmd = scripps_cmd_raw
else:
cmd = scripps_cmd
#cmd = "echo"
p = subprocess.Popen([cmd, self.tarball], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print p.stderr
print "+++Finished process ", self.tarball
# run rdahmm evaluation on the corresponding dataset
print "+++Starting process ", self.dataset, " ..."
cmd = eval_cmd
#cmd = "echo"
p = subprocess.Popen([cmd, self.dataset], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print p.stderr
print "+++Finished process ", self.dataset
# create summary xml on the corresponding dataset
print "+++creating summary xml for ", self.dataset, " ..."
cmd = xml_cmd
#cmd = "echo"
p = subprocess.Popen([cmd, self.dataset], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print p.stderr
print "+++Finished creating summary xml for ", self.dataset
# create summary json on the corresponding dataset
print "+++creating summary json for ", self.dataset, " ..."
cmd = json_cmd
#cmd = "echo"
p = subprocess.Popen([cmd, self.dataset], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print p.stderr
print "+++Finished creating summary json for ", self.dataset
# update download_path with the latest tarball
oldtars = download_path + self.dataset + "*.tar"
if len(glob.glob(oldtars)) > 0:
cmd = "rm " + oldtars
os.system(cmd)
#print cmd
cmd = "cp " + self.tarball + " " + download_path
os.system(cmd)
#print cmd
# Get the list of current SCRIPPS datasets
# http://garner.ucsd.edu/pub/timeseries/measures/ats/WesternNorthAmerica/
# We will compare tars available on the web site with the ones we already have.
# We will only download the differences.
def list_tars(url, username="anonymous", password="[email protected]" ):
"""download html with login"""
newurl = "http://" + username + ":" + password + "@" + string.replace(url,"http://","")
opener = urllib.urlopen(newurl)
html = opener.read()
opener.close()
r = re.compile('(?<=href=").*?(?=")')
links = r.findall(html)
tars = [name for name in links if name[-4:]=='.tar' ]
return tars
url = 'http://garner.ucsd.edu/pub/timeseries/measures/ats/WesternNorthAmerica/'
tars = list_tars(url)
datasets = os.listdir(download_path)
#datasets = glob.glob(download_path + "*.tar")
#datasets = [string.split(name,"/")[-1] for name in datasets]
#print datasets
# newdatasets is the list of all updated tarballs from SCRIPPS site
newdatasets = []
for tarball in tars:
#Currently we do not handle "Strain" or "pbo" or "Raw" cases.
#if "Strain" in tarball or "pbo" in tarball or "Raw" in tarball:
if "Strain" in tarball or "pbo" in tarball:
continue
if not tarball in datasets:
newdatasets.append(tarball)
if len(newdatasets) == 0:
sys.exit("No new scripps wnam dataset available today.")
# clear working directory cron_path
cmd = "rm -r " + cron_path + "/*"
os.system(cmd)
wgetcmd = "wget -nv --user=anonymous --password='[email protected]' -P " + cron_path + " -r -nH --cut-dirs=4 --no-parent --reject 'index.html*' " + url + "%s"
for tarball in newdatasets:
cmd = wgetcmd % tarball
#print cmd
os.system(cmd)
#break
#threadjobs = []
for tarball in glob.glob(cron_path + "WesternNorthAmerica/*.tar"):
#print tarball
t = ThreadJob(tarball)
t.start()
#threadjobs.append(t)
#sys.exit()
# wait for all processing threads to finish before generating the XML
#for t in threadjobs:
# t.join()
# Create the summary XML command
#print "Finally, create the summary XML files."
#os.system(xml_cmd)
|
py | 1a31fe88e745fe92d43ab33dcea047f992ae6096 | from enum import Enum, auto
class ThreadAction(Enum):
SHARED = auto()
COPIED = auto()
NOT_ALLOWED = auto()
class ProcessAction(Enum):
COPIED = auto()
SET_TO_NONE = auto()
NOT_ALLOWED = auto()
|
bzl | 1a31ff5e80616086ad3f6198a647f16dbc5fc188 | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Partial implementation for AppleDynamicFrameworkInfo configuration."""
load(
"@bazel_skylib//lib:partial.bzl",
"partial",
)
load(
"@bazel_skylib//lib:paths.bzl",
"paths",
)
# TODO(b/161370390): Remove ctx from the args when ctx is removed from all partials.
def _framework_provider_partial_impl(
*,
ctx,
actions,
bin_root_path,
binary_provider,
bundle_name,
rule_label):
"""Implementation for the framework provider partial."""
binary_file = binary_provider.binary
# Create a directory structure that the linker can use to reference this
# framework. It follows the pattern of
# any_path/MyFramework.framework/MyFramework. The absolute path and files are
# propagated using the AppleDynamicFrameworkInfo provider.
framework_dir = paths.join("frameworks", "%s.framework" % bundle_name)
framework_file = actions.declare_file(
paths.join(framework_dir, bundle_name),
)
actions.symlink(
target_file = binary_file,
output = framework_file,
)
absolute_framework_dir = paths.join(
bin_root_path,
rule_label.package,
framework_dir,
)
# TODO(cparsons): These will no longer be necessary once apple_binary
# uses the values in the dynamic framework provider.
legacy_objc_provider = apple_common.new_objc_provider(
dynamic_framework_file = depset([framework_file]),
providers = [binary_provider.objc],
)
framework_provider = apple_common.new_dynamic_framework_provider(
binary = binary_file,
framework_dirs = depset([absolute_framework_dir]),
framework_files = depset([framework_file]),
objc = legacy_objc_provider,
)
return struct(
providers = [framework_provider],
)
def framework_provider_partial(
*,
actions,
bin_root_path,
binary_provider,
bundle_name,
rule_label):
"""Constructor for the framework provider partial.
This partial propagates the AppleDynamicFrameworkInfo provider required by
the linking step. It contains the necessary files and configuration so that
the framework can be linked against. This is only required for dynamic
framework bundles.
Args:
actions: The actions provider from `ctx.actions`.
bin_root_path: The path to the root `-bin` directory.
binary_provider: The AppleDylibBinary provider containing this target's binary.
bundle_name: The name of the output bundle.
rule_label: The label of the target being analyzed.
Returns:
A partial that returns the AppleDynamicFrameworkInfo provider used to link
this framework into the final binary.
"""
return partial.make(
_framework_provider_partial_impl,
actions = actions,
bin_root_path = bin_root_path,
binary_provider = binary_provider,
bundle_name = bundle_name,
rule_label = rule_label,
)
|
py | 1a31ff6723a8c58ab415ac6c419096c5e44fa9e9 | # -*- coding: UTF-8 -*-
import olympe
from olympe.messages.ardrone3.Piloting import TakeOff, moveBy, Landing
drone = olympe.Drone("10.202.0.1")
drone.connect()
drone(TakeOff()).wait()
drone(moveBy(10, 0, 0, 0)).wait()
drone(Landing()).wait()
drone.disconnect()
|
py | 1a31ffdc215d9b349acfa94401170c17fe3ac867 | """This package enables saving and loading of python objects to disk
while also backing to S3 storage. """
import os
import datetime
import ntpath # to extract file name from path, OS-independent
import traceback # for printing full stacktraces of errors
import concurrent.futures # for asynchronous file uploads
import pickle # for pickling files
try: # for automatic caching of return values of functions
from functools import lru_cache
except ImportError:
from functools32 import lru_cache # pylint: disable=E0401
import pandas as pd
import boto3 # to interact with AWS S3
from botocore.exceptions import ClientError
import dateutil # to make local change-time datetime objects time-aware
import yaml # to read the s3bp config
import feather # to read/write pandas dataframes as feather objects
CFG_FILE_NAME = 's3bp_cfg.yml'
DEFAULT_MAX_WORKERS = 5
EXECUTOR = None
# === Reading configuration ===
def _s3bp_cfg_file_path():
return os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
CFG_FILE_NAME))
def _get_s3bp_cfg():
try:
with open(_s3bp_cfg_file_path(), 'r') as cfg_file:
cfg = yaml.safe_load(cfg_file)
if not isinstance(cfg, dict):
cfg = {'base_dir_to_bucket_map': {}},
return cfg
except FileNotFoundError:
with open(_s3bp_cfg_file_path(), 'w') as outfile:
outfile.write(yaml.dump(
{'base_dir_to_bucket_map': {}},
default_flow_style=False
))
return _get_s3bp_cfg()
def _max_workers():
try:
return _get_s3bp_cfg()['max_workers']
except KeyError:
return DEFAULT_MAX_WORKERS
def _default_bucket():
return _get_s3bp_cfg()['default_bucket']
def _base_dir_to_bucket_map():
return _get_s3bp_cfg()['base_dir_to_bucket_map']
def _base_dirs():
return list(_get_s3bp_cfg()['base_dir_to_bucket_map'].keys())
# === Setting configuration ===
def _set_s3bp_cfg(cfg):
with open(_s3bp_cfg_file_path(), 'w') as outfile:
outfile.write(yaml.dump(cfg, default_flow_style=False))
def set_max_workers(max_workers):
"""Sets the maximum number of workers in the thread pool used to
asynchronously upload files. NOTE: Resets the current thread pool!"""
cfg = _get_s3bp_cfg()
cfg['max_workers'] = max_workers
_set_s3bp_cfg(cfg)
_get_executor(reset=True)
def set_default_bucket(bucket_name):
"""Sets the given string as the default bucket name."""
cfg = _get_s3bp_cfg()
cfg['default_bucket'] = bucket_name
_set_s3bp_cfg(cfg)
def unset_default_bucket():
"""Unsets the currently set default bucket, if set."""
cfg = _get_s3bp_cfg()
cfg.pop('default_bucket', None)
_set_s3bp_cfg(cfg)
def _parse_dir_path(dir_path):
if '~' in dir_path:
return os.path.expanduser(dir_path)
return dir_path
def set_default_base_directory(base_directory):
"""Sets the given string as the default base directory name."""
cfg = _get_s3bp_cfg()
cfg['default_base_dir'] = _parse_dir_path(base_directory)
_set_s3bp_cfg(cfg)
def map_base_directory_to_bucket(base_directory, bucket_name):
"""Maps the given directory as a base directory of the given bucket.
Arguments
---------
base_directory : str
The full path, from root, to the desired base directory.
bucket_name : str
The name of the bucket to map the given directory to.
"""
cfg = _get_s3bp_cfg()
parsed_path = _parse_dir_path(base_directory)
if not isinstance(cfg['base_dir_to_bucket_map'], dict):
cfg['base_dir_to_bucket_map'] = {}
cfg['base_dir_to_bucket_map'][parsed_path] = bucket_name
_set_s3bp_cfg(cfg)
def remove_base_directory_mapping(base_directory):
"""Remove the mapping associated with the given directory, if exists."""
cfg = _get_s3bp_cfg()
parsed_path = _parse_dir_path(base_directory)
cfg['base_dir_to_bucket_map'].pop(parsed_path, None)
_set_s3bp_cfg(cfg)
# === Getting parameters ===
def _get_executor(reset=False):
if reset:
_get_executor.executor = concurrent.futures.ThreadPoolExecutor(
_max_workers())
try:
return _get_executor.executor
except AttributeError:
_get_executor.executor = concurrent.futures.ThreadPoolExecutor(
_max_workers())
return _get_executor.executor
@lru_cache(maxsize=32)
def _get_bucket_by_name(bucket_name):
s3_rsc = boto3.resource('s3')
return s3_rsc.Bucket(bucket_name)
@lru_cache(maxsize=32)
def _get_base_dir_by_file_path_and_bucket_name(filepath, bucket_name):
try:
for directory in _base_dirs():
if (directory in filepath) and (
_base_dir_to_bucket_map()[directory] == bucket_name):
return directory
except (KeyError, AttributeError):
return None
return None
def _bucket_name_and_base_dir_by_filepath(filepath):
try:
for directory in _base_dirs():
if directory in filepath:
return _base_dir_to_bucket_map()[directory], directory
except (KeyError, AttributeError):
pass
try:
return _default_bucket(), None
except KeyError:
raise ValueError(
"No bucket name was given, and neither a default was defined "
"nor could one be interpreted from the file path. Please "
"provide one explicitly, or define an appropriate bucket.")
return None, None
def _get_key(filepath, namekey, base_directory):
if namekey or not base_directory:
return ntpath.basename(filepath)
index = filepath.find(base_directory[base_directory.rfind('/'):])
return filepath[index + 1:]
@lru_cache(maxsize=32)
def _get_bucket_and_key(filepath, bucket_name, namekey):
base_directory = None
if bucket_name is None:
bucket_name, base_directory = _bucket_name_and_base_dir_by_filepath(
filepath)
elif not namekey:
base_directory = _get_base_dir_by_file_path_and_bucket_name(
filepath, bucket_name)
os.makedirs(base_directory, exist_ok=True)
bucket = _get_bucket_by_name(bucket_name)
key = _get_key(filepath, namekey, base_directory)
return bucket, key
# === Uploading/Downloading files ===
def _parse_file_path(filepath):
if '~' in filepath:
return os.path.expanduser(filepath)
return filepath
def _file_upload_thread(bucket, filepath, key):
try:
bucket.upload_file(filepath, key)
except BaseException as exc: # pylint: disable=W0703
print(
'File upload failed with following exception:\n{}'.format(exc),
flush=True
)
def upload_file(filepath, bucket_name=None, namekey=None, wait=False):
"""Uploads the given file to S3 storage.
Arguments
---------
filepath : str
The full path, from root, to the desired file.
bucket_name (optional) : str
The name of the bucket to upload the file to. If not given, it will be
inferred from any defined base directory that is present on the path
(there is no guarentee which base directory will be used if several are
present in the given path). If base directory inferrence fails the
default bukcet will be used, if defined, else the operation will fail.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when uploading
to the bucket. If set, or if no base directory is found in the
filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
wait (optional) : bool
Defaults to False. If set to True, the function will wait on the upload
operation. Otherwise, the upload will be performed asynchronously in a
separate thread.
"""
filepath = _parse_file_path(filepath)
bucket, key = _get_bucket_and_key(filepath, bucket_name, namekey)
if wait:
bucket.upload_file(filepath, key)
else:
_get_executor().submit(_file_upload_thread, bucket, filepath, key)
def _file_time_modified(filepath):
timestamp = os.path.getmtime(filepath)
dt_obj = datetime.datetime.utcfromtimestamp(timestamp)
# this is correct only because the non-time-aware obj is in UTC!
dt_obj = dt_obj.replace(tzinfo=dateutil.tz.tzutc())
return dt_obj
def download_file(filepath, bucket_name=None, namekey=None, verbose=False):
"""Downloads the most recent version of the given file from S3, if needed.
Arguments
---------
filepath : str
The full path, from root, to the desired file.
bucket_name (optional) : str
The name of the bucket to download the file from. If not given, it
will be inferred from any defined base directory that is present on
the path (there is no guarentee which base directory will be used if
several are present in the given path). If base directory inferrence
fails the default bukcet will be used, if defined, else the operation
will fail.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when
downloading from the bucket. If set, or if no base directory is found
in the filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
verbose (optional) : bool
Defaults to False. If set to True, some informative messages will be
printed.
"""
filepath = _parse_file_path(filepath)
bucket, key = _get_bucket_and_key(filepath, bucket_name, namekey)
try:
if os.path.isfile(filepath):
if verbose:
print('File %s found on disk.' % key)
# this datetime object has tzinfo=dateutil.tz.utc()
s3_last_modified = bucket.Object(key).get()['LastModified']
if s3_last_modified > _file_time_modified(filepath):
if verbose:
print('But S3 has an updated version. Downloading...')
bucket.download_file(key, filepath)
else:
if verbose:
print('File %s NOT found on disk. Downloading...' % key)
# creating non-existing dirs on the path
if not os.path.exists(filepath):
os.makedirs(filepath[:filepath.rfind('/')])
bucket.download_file(key, filepath)
except ClientError:
if verbose:
print('Loading dataframe failed with the following exception:')
print(traceback.format_exc())
raise ValueError('No dataframe found with key %s' % key)
# === Saving/loading Python objects ===
def _pickle_serialiazer(pyobject, filepath):
pickle.dump(pyobject, open(filepath, 'wb'))
def save_object(pyobject, filepath, bucket_name=None,
serializer=_pickle_serialiazer, namekey=None, wait=False):
"""Saves the given object to S3 storage, caching it as the given file.
Arguments
---------
pyobject : object
The python object to save.
filepath : str
The full path, from root, to the desired cache file.
bucket_name (optional) : str
The name of the bucket to upload the file to. If not given, it will be
inferred from any defined base directory that is present on the path
(there is no guarentee which base directory will be used if several are
present in the given path). If base directory inferrence fails the
default bukcet will be used, if defined, else the operation will fail.
serializer (optional) : callable
A callable that takes two positonal arguments, a Python object and a
path to a file, and dumps the object to the given file. Defaults to a
wrapper of pickle.dump.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when uploading
to the bucket. If set, or if no base directory is found in the
filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
wait (optional) : bool
Defaults to False. If set to True, the function will wait on the upload
operation. Otherwise, the upload will be performed asynchronously in a
separate thread.
"""
serializer(pyobject, filepath)
upload_file(filepath, bucket_name, namekey, wait)
def _picke_deserializer(filepath):
return pickle.load(open(filepath, 'rb'))
def load_object(filepath, bucket_name=None, deserializer=_picke_deserializer,
namekey=None, verbose=False):
"""Loads the most recent version of the object cached in the given file.
Arguments
---------
filepath : str
The full path, from root, to the desired file.
bucket_name (optional) : str
The name of the bucket to download the file from. If not given, it
will be inferred from any defined base directory that is present on
the path (there is no guarentee which base directory will be used if
several are present in the given path). If base directory inferrence
fails the default bukcet will be used, if defined, else the operation
will fail.
deserializer (optional) : callable
A callable that takes one positonal argument, a path to a file, and
returns the object stored in it. Defaults to a wrapper of pickle.load.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when
downloading from the bucket. If set, or if no base directory is found
in the filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
verbose (optional) : bool
Defaults to False. If set to True, some informative messages will be
printed.
"""
download_file(filepath, bucket_name=bucket_name, namekey=namekey,
verbose=verbose)
return deserializer(filepath)
# === Saving/loading dataframes ===
def _pandas_df_csv_serializer(pyobject, filepath):
pyobject.to_csv(filepath)
def _pandas_df_excel_serializer(pyobject, filepath):
pyobject.to_excel(filepath)
def _pandas_df_feather_serializer(pyobject, filepath):
feather.write_dataframe(pyobject, filepath)
def _get_pandas_df_serializer(dformat):
dformat = dformat.lower()
if dformat == 'csv':
return _pandas_df_csv_serializer
if dformat == 'excel':
return _pandas_df_excel_serializer
if dformat == 'feather':
return _pandas_df_feather_serializer
def save_dataframe(df, filepath, bucket_name=None, dformat='csv', namekey=None,
wait=False):
"""Writes the given dataframe as a CSV file to disk and S3 storage.
Arguments
---------
df : pandas.Dataframe
The pandas Dataframe object to save.
filepath : str
The full path, from root, to the desired file.
bucket_name (optional) : str
The name of the bucket to upload the file to. If not given, it will be
inferred from any defined base directory that is present on the path
(there is no guarentee which base directory will be used if several are
present in the given path). If base directory inferrence fails the
default bukcet will be used, if defined, else the operation will fail.
dformat (optional) : str
The storage format for the Dataframe. One of 'csv','excel' and
'feather'. Defaults to 'csv'.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when uploading
to the bucket. If set, or if no base directory is found in the
filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
wait (optional) : bool
Defaults to False. If set to True, the function will wait on the upload
operation. Otherwise, the upload will be performed asynchronously in a
separate thread.
"""
save_object(df, filepath, serializer=_get_pandas_df_serializer(dformat),
bucket_name=bucket_name, namekey=namekey, wait=wait)
def _pandas_df_csv_deserializer(filepath):
return pd.read_csv(filepath)
def _pandas_df_excel_deserializer(filepath):
return pd.read_excel(filepath)
def _pandas_df_feather_deserializer(filepath):
return feather.read_dataframe(filepath)
def _get_pandf_defserializer(dformat):
dformat = dformat.lower()
if dformat == 'csv':
return _pandas_df_csv_deserializer
if dformat == 'excel':
return _pandas_df_excel_deserializer
if dformat == 'feather':
return _pandas_df_feather_deserializer
def load_dataframe(filepath, bucket_name=None, dformat='csv', namekey=None,
verbose=False):
"""Loads the most updated version of a dataframe from file, fetching it
from S3 storage if necessary.
Arguments
---------
filepath : str
The full path, from root, to the desired file.
bucket_name (optional) : str
The name of the bucket to download the file from. If not given, it
will be inferred from any defined base directory that is present on
the path (there is no guarentee which base directory will be used if
several are present in the given path). If base directory inferrence
fails the default bukcet will be used, if defined, else the operation
will fail.
dformat (optional) : str
The storage format for the Dataframe. One of 'csv','excel' and
'feather'. Defaults to 'csv'.
namekey (optional) : bool
Indicate whether to use the name of the file as the key when
downloading from the bucket. If set, or if no base directory is found
in the filepath, the file name will be used as key. Otherwise, the path
rooted at the detected base directory will be used, resulting in a
directory-like structure in the S3 bucket.
verbose (optional) : bool
Defaults to False. If set to True, some informative messages will be
printed.
"""
return load_object(
filepath, deserializer=_get_pandf_defserializer(dformat),
bucket_name=bucket_name, namekey=namekey, verbose=verbose)
|
py | 1a31fff30c1c615cc32b69cedd8cdaa101766d46 | """Dependency injector base providers unit tests."""
import unittest2 as unittest
from dependency_injector import (
containers,
providers,
errors,
)
class ProviderTests(unittest.TestCase):
def setUp(self):
self.provider = providers.Provider()
def test_is_provider(self):
self.assertTrue(providers.is_provider(self.provider))
def test_call(self):
self.assertRaises(NotImplementedError, self.provider.__call__)
def test_delegate(self):
delegate1 = self.provider.delegate()
self.assertIsInstance(delegate1, providers.Delegate)
self.assertIs(delegate1(), self.provider)
delegate2 = self.provider.delegate()
self.assertIsInstance(delegate2, providers.Delegate)
self.assertIs(delegate2(), self.provider)
self.assertIsNot(delegate1, delegate2)
def test_provider(self):
delegate1 = self.provider.provider
self.assertIsInstance(delegate1, providers.Delegate)
self.assertIs(delegate1(), self.provider)
delegate2 = self.provider.provider
self.assertIsInstance(delegate2, providers.Delegate)
self.assertIs(delegate2(), self.provider)
self.assertIsNot(delegate1, delegate2)
def test_override(self):
overriding_provider = providers.Provider()
self.provider.override(overriding_provider)
self.assertTrue(self.provider.overridden)
self.assertIs(self.provider.last_overriding, overriding_provider)
def test_double_override(self):
overriding_provider1 = providers.Object(1)
overriding_provider2 = providers.Object(2)
self.provider.override(overriding_provider1)
overriding_provider1.override(overriding_provider2)
self.assertEqual(self.provider(), overriding_provider2())
def test_overriding_context(self):
overriding_provider = providers.Provider()
with self.provider.override(overriding_provider):
self.assertTrue(self.provider.overridden)
self.assertFalse(self.provider.overridden)
def test_override_with_itself(self):
self.assertRaises(errors.Error, self.provider.override, self.provider)
def test_override_with_not_provider(self):
obj = object()
self.provider.override(obj)
self.assertIs(self.provider(), obj)
def test_reset_last_overriding(self):
overriding_provider1 = providers.Provider()
overriding_provider2 = providers.Provider()
self.provider.override(overriding_provider1)
self.provider.override(overriding_provider2)
self.assertIs(self.provider.overridden[-1], overriding_provider2)
self.assertIs(self.provider.last_overriding, overriding_provider2)
self.provider.reset_last_overriding()
self.assertIs(self.provider.overridden[-1], overriding_provider1)
self.assertIs(self.provider.last_overriding, overriding_provider1)
self.provider.reset_last_overriding()
self.assertFalse(self.provider.overridden)
self.assertIsNone(self.provider.last_overriding)
def test_reset_last_overriding_of_not_overridden_provider(self):
self.assertRaises(errors.Error, self.provider.reset_last_overriding)
def test_reset_override(self):
overriding_provider = providers.Provider()
self.provider.override(overriding_provider)
self.assertTrue(self.provider.overridden)
self.assertEqual(self.provider.overridden, (overriding_provider,))
self.provider.reset_override()
self.assertEqual(self.provider.overridden, tuple())
def test_deepcopy(self):
provider = providers.Provider()
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Provider)
def test_deepcopy_from_memo(self):
provider = providers.Provider()
provider_copy_memo = providers.Provider()
provider_copy = providers.deepcopy(
provider, memo={id(provider): provider_copy_memo})
self.assertIs(provider_copy, provider_copy_memo)
def test_deepcopy_overridden(self):
provider = providers.Provider()
overriding_provider = providers.Provider()
provider.override(overriding_provider)
provider_copy = providers.deepcopy(provider)
overriding_provider_copy = provider_copy.overridden[0]
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Provider)
self.assertIsNot(overriding_provider, overriding_provider_copy)
self.assertIsInstance(overriding_provider_copy, providers.Provider)
def test_repr(self):
self.assertEqual(repr(self.provider),
'<dependency_injector.providers.'
'Provider() at {0}>'.format(hex(id(self.provider))))
class ObjectProviderTests(unittest.TestCase):
def test_is_provider(self):
self.assertTrue(providers.is_provider(providers.Object(object())))
def test_provided_instance_provider(self):
provider = providers.Object(object())
self.assertIsInstance(provider.provided, providers.ProvidedInstance)
def test_call_object_provider(self):
obj = object()
self.assertIs(providers.Object(obj)(), obj)
def test_call_overridden_object_provider(self):
obj1 = object()
obj2 = object()
provider = providers.Object(obj1)
provider.override(providers.Object(obj2))
self.assertIs(provider(), obj2)
def test_deepcopy(self):
provider = providers.Object(1)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Object)
def test_deepcopy_from_memo(self):
provider = providers.Object(1)
provider_copy_memo = providers.Provider()
provider_copy = providers.deepcopy(
provider, memo={id(provider): provider_copy_memo})
self.assertIs(provider_copy, provider_copy_memo)
def test_deepcopy_overridden(self):
provider = providers.Object(1)
overriding_provider = providers.Provider()
provider.override(overriding_provider)
provider_copy = providers.deepcopy(provider)
overriding_provider_copy = provider_copy.overridden[0]
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Object)
self.assertIsNot(overriding_provider, overriding_provider_copy)
self.assertIsInstance(overriding_provider_copy, providers.Provider)
def test_deepcopy_doesnt_copy_provided_object(self):
# Fixes bug #231
# Details: https://github.com/ets-labs/python-dependency-injector/issues/231
some_object = object()
provider = providers.Object(some_object)
provider_copy = providers.deepcopy(provider)
self.assertIs(provider(), some_object)
self.assertIs(provider_copy(), some_object)
def test_repr(self):
some_object = object()
provider = providers.Object(some_object)
self.assertEqual(repr(provider),
'<dependency_injector.providers.'
'Object({0}) at {1}>'.format(
repr(some_object),
hex(id(provider))))
class DelegateTests(unittest.TestCase):
def setUp(self):
self.delegated = providers.Provider()
self.delegate = providers.Delegate(self.delegated)
def test_is_provider(self):
self.assertTrue(providers.is_provider(self.delegate))
def test_init_with_not_provider(self):
self.assertRaises(errors.Error, providers.Delegate, object())
def test_call(self):
delegated1 = self.delegate()
delegated2 = self.delegate()
self.assertIs(delegated1, self.delegated)
self.assertIs(delegated2, self.delegated)
def test_repr(self):
self.assertEqual(repr(self.delegate),
'<dependency_injector.providers.'
'Delegate({0}) at {1}>'.format(
repr(self.delegated),
hex(id(self.delegate))))
class DependencyTests(unittest.TestCase):
def setUp(self):
self.provider = providers.Dependency(instance_of=list)
def test_init_with_not_class(self):
self.assertRaises(TypeError, providers.Dependency, object())
def test_with_abc(self):
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
provider = providers.Dependency(collections_abc.Mapping)
provider.provided_by(providers.Factory(dict))
self.assertIsInstance(provider(), collections_abc.Mapping)
self.assertIsInstance(provider(), dict)
def test_is_provider(self):
self.assertTrue(providers.is_provider(self.provider))
def test_provided_instance_provider(self):
self.assertIsInstance(self.provider.provided, providers.ProvidedInstance)
def test_call_overridden(self):
self.provider.provided_by(providers.Factory(list))
self.assertIsInstance(self.provider(), list)
def test_call_overridden_but_not_instance_of(self):
self.provider.provided_by(providers.Factory(dict))
self.assertRaises(errors.Error, self.provider)
def test_call_not_overridden(self):
self.assertRaises(errors.Error, self.provider)
def test_deepcopy(self):
provider = providers.Dependency(int)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Dependency)
def test_deepcopy_from_memo(self):
provider = providers.Dependency(int)
provider_copy_memo = providers.Provider()
provider_copy = providers.deepcopy(
provider, memo={id(provider): provider_copy_memo})
self.assertIs(provider_copy, provider_copy_memo)
def test_deepcopy_overridden(self):
provider = providers.Dependency(int)
overriding_provider = providers.Provider()
provider.override(overriding_provider)
provider_copy = providers.deepcopy(provider)
overriding_provider_copy = provider_copy.overridden[0]
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider, providers.Dependency)
self.assertIsNot(overriding_provider, overriding_provider_copy)
self.assertIsInstance(overriding_provider_copy, providers.Provider)
def test_repr(self):
self.assertEqual(repr(self.provider),
'<dependency_injector.providers.'
'Dependency({0}) at {1}>'.format(
repr(list),
hex(id(self.provider))))
class ExternalDependencyTests(unittest.TestCase):
def setUp(self):
self.provider = providers.ExternalDependency(instance_of=list)
def test_is_instance(self):
self.assertIsInstance(self.provider, providers.Dependency)
class DependenciesContainerTests(unittest.TestCase):
class Container(containers.DeclarativeContainer):
dependency = providers.Provider()
def setUp(self):
self.provider = providers.DependenciesContainer()
self.container = self.Container()
def test_getattr(self):
has_dependency = hasattr(self.provider, 'dependency')
dependency = self.provider.dependency
self.assertIsInstance(dependency, providers.Dependency)
self.assertIs(dependency, self.provider.dependency)
self.assertTrue(has_dependency)
self.assertIsNone(dependency.last_overriding)
def test_getattr_with_container(self):
self.provider.override(self.container)
dependency = self.provider.dependency
self.assertTrue(dependency.overridden)
self.assertIs(dependency.last_overriding, self.container.dependency)
def test_providers(self):
dependency1 = self.provider.dependency1
dependency2 = self.provider.dependency2
self.assertEqual(self.provider.providers, {'dependency1': dependency1,
'dependency2': dependency2})
def test_override(self):
dependency = self.provider.dependency
self.provider.override(self.container)
self.assertTrue(dependency.overridden)
self.assertIs(dependency.last_overriding, self.container.dependency)
def test_reset_last_overriding(self):
dependency = self.provider.dependency
self.provider.override(self.container)
self.provider.reset_last_overriding()
self.assertIsNone(dependency.last_overriding)
self.assertIsNone(dependency.last_overriding)
def test_reset_override(self):
dependency = self.provider.dependency
self.provider.override(self.container)
self.provider.reset_override()
self.assertFalse(dependency.overridden)
self.assertFalse(dependency.overridden)
|
py | 1a320067d7cf5c9ffa31a438b150e91301d85d75 | import random
import threading
import time
from statistics import mean
from typing import Optional
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
PANDA_OUTPUT_VOLTAGE = 5.28
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 30*3600
MIN_ON_TIME_S = 3600
# Parameters
def get_battery_capacity():
return _read_param("/sys/class/power_supply/battery/capacity", int)
# Helpers
def _read_param(path, parser, default=0):
try:
with open(path) as f:
return parser(f.read())
except Exception:
return default
def panda_current_to_actual_current(panda_current):
# From white/grey panda schematic
return (3.3 - (panda_current * 3.3 / 4096)) / 8.25
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.car_voltage_instant_mV = 12e3 # Last value of pandaState voltage
self.integration_lock = threading.Lock()
self.ts_last_charging_ctrl = None
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = pandaState.pandaState.voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (pandaState.pandaState.pandaType in (log.PandaState.PandaType.whitePanda, log.PandaState.PandaType.greyPanda)) and (pandaState.pandaState.current > 1):
# If white/grey panda, use the integrated current measurements if the measurement is not 0
# If the measurement is 0, the current is 400mA or greater, and out of the measurement range of the panda
# This seems to be accurate to about 5%
current_power = (PANDA_OUTPUT_VOLTAGE * panda_current_to_actual_current(pandaState.pandaState.current))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t: float, current_power: float) -> None:
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self) -> int:
return int(self.power_used_uWh)
def get_car_battery_capacity(self) -> int:
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp: Optional[float]) -> bool:
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= (pandaState.pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected)
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
def charging_ctrl(self, msg, ts, to_discharge, to_charge ):
if self.ts_last_charging_ctrl is None or (ts - self.ts_last_charging_ctrl) >= 300.:
battery_changing = HARDWARE.get_battery_charging()
if self.ts_last_charging_ctrl:
if msg.deviceState.batteryPercent >= to_discharge and battery_changing:
HARDWARE.set_battery_charging(False)
elif msg.deviceState.batteryPercent <= to_charge and not battery_changing:
HARDWARE.set_battery_charging(True)
self.ts_last_charging_ctrl = ts |
py | 1a3200f75f592c6b606913a9669dc981d0d596d0 | #!/usr/bin/env python
"""
_GetCompletedByFileList_
Oracle implementation of Subscription.IsFileCompleted
"""
from WMCore.WMBS.MySQL.Subscriptions.GetCompletedByFileList import \
GetCompletedByFileList as GetCompletedByFileListMySQL
class GetCompletedByFileList(GetCompletedByFileListMySQL):
pass
|
py | 1a320122f15b6b5295eb5a13a526e4a92e6880db | #!/usr/bin/python
################################################################################
# IMPORT ROOT CERT
#
# Justin Dierking
# [email protected]
# 937 371 6026
#
# 09/15/2018 Original Construction
################################################################################
import traceback
class Task:
def __init__(self):
self.output = []
self.status = STATUS_NOT_EXECUTED
def execute(self, cli):
try:
status, stdout, stderr = cli.system("keytool -importcert -noprompt -keystore /tmp/server/server.jks -storepass secret -alias root-ca -keypass secret -file /tmp/root-ca/ca.pem", return_tuple = True)
if status:
self.output.append(str(stdout) + str(stderr))
self.status = STATUS_FAILURE
else:
self.output.append(str(stdout) + str(stderr))
self.status = STATUS_SUCCESS
except Exception:
self.output.append(traceback.format_exc())
self.status = STATUS_EXCEPTION
return self.status |
py | 1a32051fdf773d8c7f21252937176bf784f227bc | """
Provides a `scantree` function which recurses a given directory, yielding
(pathname, os.stat(pathname)) pairs.
Attempts to use the more efficient `scandir` function if this is available,
falling back to `os.listdir` otherwise.
"""
import os
import stat
try:
from os import scandir
except ImportError:
try:
from scandir import scandir
except ImportError:
scandir = None
if scandir:
def scantree(root):
for entry in scandir(root):
if entry.is_dir():
for item in scantree(entry.path):
yield item
else:
yield entry.path, entry.stat()
else:
def scantree(root):
for filename in os.listdir(root):
path = os.path.join(root, filename)
stat_result = os.stat(path)
if stat.S_ISDIR(stat_result.st_mode):
for item in scantree(path):
yield item
else:
yield path, stat_result
|
py | 1a3205678e4fd6e7ac38a4991822c9c3ddffc61d | # original implementation: https://github.com/odegeasslbc/FastGAN-pytorch/blob/main/models.py
#
# modified by Axel Sauer for "Projected GANs Converge Faster"
#
import torch.nn as nn
from pg_modules.blocks import (InitLayer, UpBlockBig, UpBlockBigCond, UpBlockSmall, UpBlockSmallCond, SEBlock, conv2d)
def normalize_second_moment(x, dim=1, eps=1e-8):
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
class DummyMapping(nn.Module):
def __init__(self):
super().__init__()
def forward(self, z, c, **kwargs):
return z.unsqueeze(1) # to fit the StyleGAN API
class FastganSynthesis(nn.Module):
def __init__(self, ngf=128, z_dim=256, nc=3, img_resolution=256, lite=False):
super().__init__()
self.img_resolution = img_resolution
self.z_dim = z_dim
# channel multiplier
nfc_multi = {2: 16, 4:16, 8:8, 16:4, 32:2, 64:2, 128:1, 256:0.5,
512:0.25, 1024:0.125}
nfc = {}
for k, v in nfc_multi.items():
nfc[k] = int(v*ngf)
# layers
self.init = InitLayer(z_dim, channel=nfc[2], sz=4)
UpBlock = UpBlockSmall if lite else UpBlockBig
self.feat_8 = UpBlock(nfc[4], nfc[8])
self.feat_16 = UpBlock(nfc[8], nfc[16])
self.feat_32 = UpBlock(nfc[16], nfc[32])
self.feat_64 = UpBlock(nfc[32], nfc[64])
self.feat_128 = UpBlock(nfc[64], nfc[128])
self.feat_256 = UpBlock(nfc[128], nfc[256])
self.se_64 = SEBlock(nfc[4], nfc[64])
self.se_128 = SEBlock(nfc[8], nfc[128])
self.se_256 = SEBlock(nfc[16], nfc[256])
self.to_big = conv2d(nfc[img_resolution], nc, 3, 1, 1, bias=True)
if img_resolution > 256:
self.feat_512 = UpBlock(nfc[256], nfc[512])
self.se_512 = SEBlock(nfc[32], nfc[512])
if img_resolution > 512:
self.feat_1024 = UpBlock(nfc[512], nfc[1024])
def forward(self, input, c, **kwargs):
# map noise to hypersphere as in "Progressive Growing of GANS"
input = normalize_second_moment(input[:, 0])
feat_4 = self.init(input)
feat_8 = self.feat_8(feat_4)
feat_16 = self.feat_16(feat_8)
feat_32 = self.feat_32(feat_16)
feat_64 = self.se_64(feat_4, self.feat_64(feat_32))
feat_128 = self.se_128(feat_8, self.feat_128(feat_64))
if self.img_resolution >= 128:
feat_last = feat_128
if self.img_resolution >= 256:
feat_last = self.se_256(feat_16, self.feat_256(feat_last))
if self.img_resolution >= 512:
feat_last = self.se_512(feat_32, self.feat_512(feat_last))
if self.img_resolution >= 1024:
feat_last = self.feat_1024(feat_last)
return self.to_big(feat_last)
class FastganSynthesisCond(nn.Module):
def __init__(self, ngf=64, z_dim=256, nc=3, img_resolution=256, num_classes=1000, lite=False):
super().__init__()
self.z_dim = z_dim
nfc_multi = {2: 16, 4:16, 8:8, 16:4, 32:2, 64:2, 128:1, 256:0.5,
512:0.25, 1024:0.125, 2048:0.125}
nfc = {}
for k, v in nfc_multi.items():
nfc[k] = int(v*ngf)
self.img_resolution = img_resolution
self.init = InitLayer(z_dim, channel=nfc[2], sz=4)
UpBlock = UpBlockSmallCond if lite else UpBlockBigCond
self.feat_8 = UpBlock(nfc[4], nfc[8], z_dim)
self.feat_16 = UpBlock(nfc[8], nfc[16], z_dim)
self.feat_32 = UpBlock(nfc[16], nfc[32], z_dim)
self.feat_64 = UpBlock(nfc[32], nfc[64], z_dim)
self.feat_128 = UpBlock(nfc[64], nfc[128], z_dim)
self.feat_256 = UpBlock(nfc[128], nfc[256], z_dim)
self.se_64 = SEBlock(nfc[4], nfc[64])
self.se_128 = SEBlock(nfc[8], nfc[128])
self.se_256 = SEBlock(nfc[16], nfc[256])
self.to_big = conv2d(nfc[img_resolution], nc, 3, 1, 1, bias=True)
if img_resolution > 256:
self.feat_512 = UpBlock(nfc[256], nfc[512])
self.se_512 = SEBlock(nfc[32], nfc[512])
if img_resolution > 512:
self.feat_1024 = UpBlock(nfc[512], nfc[1024])
self.embed = nn.Embedding(num_classes, z_dim)
def forward(self, input, c, update_emas=False):
c = self.embed(c.argmax(1))
# map noise to hypersphere as in "Progressive Growing of GANS"
input = normalize_second_moment(input[:, 0])
feat_4 = self.init(input)
feat_8 = self.feat_8(feat_4, c)
feat_16 = self.feat_16(feat_8, c)
feat_32 = self.feat_32(feat_16, c)
feat_64 = self.se_64(feat_4, self.feat_64(feat_32, c))
feat_128 = self.se_128(feat_8, self.feat_128(feat_64, c))
if self.img_resolution >= 128:
feat_last = feat_128
if self.img_resolution >= 256:
feat_last = self.se_256(feat_16, self.feat_256(feat_last, c))
if self.img_resolution >= 512:
feat_last = self.se_512(feat_32, self.feat_512(feat_last, c))
if self.img_resolution >= 1024:
feat_last = self.feat_1024(feat_last, c)
return self.to_big(feat_last)
class Generator(nn.Module):
def __init__(
self,
z_dim=256,
c_dim=0,
w_dim=0,
img_resolution=256,
img_channels=3,
ngf=128,
cond=0,
mapping_kwargs={},
synthesis_kwargs={}
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
# Mapping and Synthesis Networks
self.mapping = DummyMapping() # to fit the StyleGAN API
Synthesis = FastganSynthesisCond if cond else FastganSynthesis
self.synthesis = Synthesis(ngf=ngf, z_dim=z_dim, nc=img_channels, img_resolution=img_resolution, **synthesis_kwargs)
def forward(self, z, c, **kwargs):
w = self.mapping(z, c)
img = self.synthesis(w, c)
return img
|
py | 1a3207472b1ef9b5ae0ac53ef93dfa8da0ac7e54 | import webbrowser
from liquid import Liquid
from pathlib import Path
DATA_DIRECTORY = Path.home() / ".acm_dl_data"
SEARCH_STRING = "https://dl.acm.org/action/doSearch?LimitedContentGroupKey={key}&pageSize=50&startPage={page_id}"
def _ensure_data_directory_exists(sub_dir=None):
"""Makes sure the data directory exists and returns the data directory path"""
if sub_dir:
path = DATA_DIRECTORY / sub_dir
else:
path = DATA_DIRECTORY
if not path.exists():
path.mkdir(parents=True)
return path
def _display_results_html(pattern, search_results):
with open(Path(__file__).parent / "templates/search_result.html") as f:
ret = Liquid(f).render(tempName = f"Results for : {pattern} (found {len(search_results)})", items = search_results)
out_file = _ensure_data_directory_exists("temp") / "search_results.html"
with open(out_file, "w") as f:
f.write(ret)
webbrowser.open("file://" + str(out_file.absolute()))
|
py | 1a320794d12317b03e433d49b972d177b92989ee | """exlo direct run"""
if __name__ == '__main__':
pass
|
py | 1a32086d6f19efe75d73031079db66af5fed760f | """
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import subprocess
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt
from pygments.util import get_choice_opt
from pygments.util import get_int_opt
from pygments.util import get_list_opt
# Import this carefully
try:
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
DEFAULT_FONT_NAME_MAC = 'Menlo'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager:
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
elif sys.platform.startswith('darwin'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_MAC
self._create_mac()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
stdout=subprocess.PIPE, stderr=None)
stdout, _ = proc.communicate()
if proc.returncode == 0:
lines = stdout.splitlines()
for line in lines:
if line.startswith(b'Fontconfig warning:'):
continue
path = line.decode().strip().strip(':')
if path:
return path
return None
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _get_mac_font_path(self, font_map, name, style):
return font_map.get((name + ' ' + style).strip().lower())
def _create_mac(self):
font_map = {}
for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
'/Library/Fonts/', '/System/Library/Fonts/'):
font_map.update(
(os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
for f in os.listdir(font_dir)
if f.lower().endswith(('ttf', 'ttc')))
for name in STYLES['NORMAL']:
path = self._get_mac_font_path(font_map, self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_mac_font_path(font_map, self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except OSError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
lookuperror = None
keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
(_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'),
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ]
for keyname in keynames:
try:
key = _winreg.OpenKey(*keyname)
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
return
except FontNotFound as err:
lookuperror = err
finally:
_winreg.CloseKey(key)
except OSError:
pass
else:
# If we get here, we checked all registry keys and had no luck
# We can be in one of two situations now:
# * All key lookups failed. In this case lookuperror is None and we
# will raise a generic error
# * At least one lookup failed with a FontNotFound error. In this
# case, we will raise that as a more specific error
if lookuperror:
raise lookuperror
raise FontNotFound('Can\'t open Windows font registry key')
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_text_size(self, text):
"""
Get the text size(width, height).
"""
return self.fonts['NORMAL'].getsize(text)
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 0.10
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Courier New" on Windows, "Menlo" on Mac OS, and
"DejaVu Sans Mono" on \\*nix
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 1.2
Default: empty list
`hl_color`
Specify the color for highlighting lines.
.. versionadded:: 1.2
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
self.encoding = 'latin1' # let pygments.format() do the right thing
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, linelength):
"""
Get the X coordinate of a character position.
"""
return linelength + self.image_pad + self.line_number_width
def _get_text_pos(self, linelength, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(linelength), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_text_bg_color(self, style):
"""
Get the correct background color for the token from the style.
"""
if style['bgcolor'] is not None:
bg_color = '#' + style['bgcolor']
else:
bg_color = None
return bg_color
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxlinelength, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxlinelength) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
text_fg=self.line_number_fg,
text_bg=None,
)
def _draw_text(self, pos, text, font, text_fg, text_bg):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, text_fg, text_bg))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
maxlinelength = linelength = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(linelength, lineno),
temp,
font = self._get_style_font(style),
text_fg = self._get_text_color(style),
text_bg = self._get_text_bg_color(style),
)
temp_width, temp_hight = self.fonts.get_text_size(temp)
linelength += temp_width
maxlinelength = max(maxlinelength, linelength)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
linelength = 0
charno = 0
lineno += 1
self.maxlinelength = maxlinelength
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in range(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
if self.line_number_separator:
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxlinelength, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, text_fg, text_bg in self.drawables:
if text_bg:
text_size = draw.textsize(text=value, font=font)
draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg)
draw.text(pos, value, font=font, fill=text_fg)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
|
py | 1a320a6b6bed30abc4615190303dac6dd2e27448 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'bashate').version_string_with_vcs()
|
py | 1a320b9e4844dcfc156e455cb34456e7c6117470 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import skbio.stats.ordination
import pandas as pd
def pcoa(distance_matrix: skbio.DistanceMatrix,
number_of_dimensions: int = None) -> skbio.OrdinationResults:
if number_of_dimensions is None:
# calculate full decomposition using eigh
return skbio.stats.ordination.pcoa(distance_matrix, method='eigh',
inplace=False)
else:
# calculate the decomposition only for the `number_of_dimensions`
# using fast heuristic eigendecomposition (fsvd)
return skbio.stats.ordination.pcoa(
distance_matrix, method='fsvd',
number_of_dimensions=number_of_dimensions,
inplace=True)
def pcoa_biplot(pcoa: skbio.OrdinationResults,
features: pd.DataFrame) -> skbio.OrdinationResults:
return skbio.stats.ordination.pcoa_biplot(pcoa, features)
|
py | 1a320bcb24b02684846a6673bd20f4b0af459eec | #-*- coding: ISO-8859-1 -*-
# pysqlite2/test/__init__.py: the package containing the test suite
#
# Copyright (C) 2004-2007 Gerhard H�ring <[email protected]>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import unittest
from pysqlcipher3.test.python2 import (dbapi, dump, factory, hooks, regression,
transactions, types, userfunctions, sqlcipher)
def suite():
return unittest.TestSuite(tuple([
dbapi.suite(),
dump.suite(),
factory.suite(),
hooks.suite(),
regression.suite(),
transactions.suite(),
types.suite(),
userfunctions.suite(),
sqlcipher.suite()
])) |
py | 1a320cfa0b96dcfb4535694611cb4ebde66ec5aa | import asyncio
import json
import logging
import os.path
import random
import click
import sounddevice as sd
import soundfile as sf
from chmp.label import write_label, find_unlabeled
from chmp.app.kwdetect.aio import detect as _async_detect
from chmp.app.kwdetect.util import load_optional_model
_logger = logging.getLogger(__name__)
@click.group()
def main():
pass
@main.command()
@click.argument('target')
@click.option('--model')
def detect(target, model):
"""Continuously detect keywords and save extracted samples to disk."""
loop = asyncio.get_event_loop()
# TODO: add better exception handler
loop.set_exception_handler(print)
loop.run_until_complete(_detect(target, model))
async def _detect(target, model):
_logger.info('load model')
model = load_optional_model(model)
_logger.info('enter detection loop')
async for label in _async_detect(model, sample_target=target):
print('detected: ', label)
@main.command()
@click.argument('path')
@click.option('--labels')
def label(path, labels):
"""Generate labels in an interactive fashion."""
with open(labels, 'rt') as fobj:
labels = json.load(fobj)
label_decoding = {int(key): label for label, key in labels.items()}
label_decoding[-1] = '<repeat>'
unlabeled_files = find_unlabeled(os.path.join(path, '*.ogg'))
if not unlabeled_files:
print('No files to label :)')
return
random.shuffle(unlabeled_files)
print(f'Found {len(unlabeled_files)} unlabeled files')
print('Start labelling ...')
while unlabeled_files:
try:
fname = unlabeled_files.pop()
_label_example(fname, label_decoding)
except KeyboardInterrupt:
print('Stop labelling ...')
raise SystemExit(0)
print('No more files to label :)')
def _label_example(fname, label_decoding):
print(f'Processing: {fname}')
sample, _ = sf.read(fname)
while True:
sd.play(sample, blocking=True)
label = _get_label_from_user(label_decoding)
if label == '<skip>':
print('Skip sample')
return
elif label == '<repeat>':
continue
else:
write_label(fname, label=label, file=os.path.basename(fname))
return
def _get_label_from_user(label_decoding):
print('Chose label:', ' '.join(f'{label!r} ({code})' for code, label in label_decoding.items()))
while True:
user_input = input('Label [empty to skip]: > ')
if not user_input.strip():
return '<skip>'
try:
user_input = int(user_input)
except ValueError:
print('Invalid input ...')
else:
if user_input not in label_decoding:
print('Invalid input ...')
continue
return label_decoding[user_input]
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
py | 1a320dde9e5638650938397aedcd422581a39494 | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class Defaults(Base):
"""Default Tlv template container
The Defaults class encapsulates a list of defaults resources that are managed by the system.
A list of resources can be retrieved from the server using the Defaults.find() method.
"""
__slots__ = ()
_SDM_NAME = 'defaults'
_SDM_ATT_MAP = {
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(Defaults, self).__init__(parent, list_op)
@property
def Template(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.tlveditor.template_251f4228c795442db61593bcbbdf8694.Template): An instance of the Template class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.tlveditor.template_251f4228c795442db61593bcbbdf8694 import Template
if self._properties.get('Template', None) is not None:
return self._properties.get('Template')
else:
return Template(self)
def add(self):
"""Adds a new defaults resource on the json, only valid with config assistant
Returns
-------
- self: This instance with all currently retrieved defaults resources using find and the newly added defaults resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self):
"""Finds and retrieves defaults resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve defaults resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all defaults resources from the server.
Returns
-------
- self: This instance with matching defaults resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of defaults data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the defaults resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
py | 1a320dfc28461e95d9f6b26550781782026d47d6 | #!/usr/bin/env python
# encoding: utf-8
'''
@author: Jason Lee
@license: (C) Copyright @ Jason Lee
@contact: [email protected]
@file: 334.py
@time: 2019/6/10 19:14
@desc:
'''
class Solution:
def increasingTriplet(self, nums: List[int]) -> bool:
if len(nums) < 3:
return False
# first < second < third
first = second = float('inf')
for i in nums:
if i <= first:
first = i
elif i <= second: # 第二个数比第一个数大
second = i
else:
return True # 第三个数比前两个都大
return False
|
py | 1a320e9b0db73b7c2a5b51cea5116b4938939dfb | from gi.repository import Gtk
import face_functions
import face_recognizer
import barcode
import string
import os.path
class TableWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="FACE RECOGNIZER")
self.set_size_request(500, 300)
table = Gtk.Table(6, 3, True)
self.add(table)
hbox = Gtk.Box(spacing=6)
self.take_picture_normal = Gtk.Button(label="NORMAL")
hbox.pack_start(self.take_picture_normal, True, True, 0)
self.take_picture_normal.connect("clicked", self.on_normal_clicked)
self.take_picture_happy = Gtk.Button(label="HAPPY")
hbox.pack_start(self.take_picture_happy, True, True, 0)
self.take_picture_happy.connect("clicked", self.on_happy_clicked)
self.take_picture_surprised = Gtk.Button(label="SURPRISED")
hbox.pack_start(self.take_picture_surprised, True, True, 0)
self.take_picture_surprised.connect("clicked", self.on_surprised_clicked)
self.take_picture_wink = Gtk.Button(label="WINK")
hbox.pack_start(self.take_picture_wink, True, True, 0)
self.take_picture_wink.connect("clicked", self.on_wink_clicked)
self.take_picture_sleepy = Gtk.Button(label="SLEEPY")
hbox.pack_start(self.take_picture_sleepy, True, True, 0)
self.take_picture_sleepy.connect("clicked", self.on_sleepy_clicked)
self.take_picture_sad = Gtk.Button(label="SAD")
hbox.pack_start(self.take_picture_sad, True, True, 0)
self.take_picture_sad.connect("clicked", self.on_sad_clicked)
self.Entry_ID = Gtk.Entry()
self.Entry_ID.set_text("Enter your ID")
Detection_Button = Gtk.Button(label="Start Detector")
Detection_Button.connect("clicked", self.on_start_clicked)
Label_Admin = Gtk.Label("Admin Menu")
Label_User = Gtk.Label("User Menu")
table.attach(self.Entry_ID, 0, 1, 1, 2)
table.attach(hbox, 1, 3, 1, 2)
table.attach(Detection_Button, 0, 3, 3, 6)
table.attach(Label_Admin, 0, 3, 0, 1)
table.attach(Label_User, 0, 3, 2, 3)
def on_start_clicked(self, button):
self.recognizer = face_recognizer.train_recognizer("./Database")
img = face_functions.snap()
predicted,conf = face_recognizer.recognize_face(self.recognizer, img)
if(predicted==-1 or conf>50):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.WARNING,Gtk.ButtonsType.CANCEL, "Face not recognized.")
message.run()
message.destroy()
return
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.CANCEL, "Face recognized!")
message.format_secondary_text("Recognized as subject "+str(predicted)+" with a doubt rating of "+str(conf))
message.run()
message.destroy()
d_barcode = barcode.get_barcode(img)
if (len(d_barcode)>0): d_barcode=self.trim_barcode(d_barcode[0])
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.CANCEL, "Barcode Detection")
print(predicted)
print("Barcode data found in this picture: " + str(d_barcode))
if (len(d_barcode)==0):
message.format_secondary_text("Barcode not detected.")
elif (int(predicted)==int(d_barcode)):
message.format_secondary_text("Barcode detected:" + d_barcode + "\nMatches with face.")
else:
message.format_secondary_text("Barcode detected:" + d_barcode + "\nDoes not match face.")
message.run()
message.destroy()
#print("\"Click me\" button was clicked")
def id_is_valid(self):
text = self.Entry_ID.get_text()
if len(text)!=7:
error_message = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,Gtk.ButtonsType.CANCEL, "ID must be exactly 7 digits long!")
error_message.run()
error_message.destroy()
return False
for ch in text:
if(ch not in string.digits):
error_message = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,Gtk.ButtonsType.CANCEL, "ID must contain numbers only!")
error_message.run()
error_message.destroy()
return False
return True
def trim_barcode(self, barcode):
return barcode[:7]
def on_normal_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".normal.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".normal.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
def on_happy_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".happy.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".happy.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
def on_surprised_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".surprised.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".surprised.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
def on_wink_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".wink.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".wink.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
def on_sleepy_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".sleepy.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".sleepy.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
def on_sad_clicked(self, button):
if(self.id_is_valid()):
face_functions.take_picture("./Database/subject"+self.Entry_ID.get_text()+".sad.png")
if(os.path.isfile("./Database/subject"+self.Entry_ID.get_text()+".sad.png")):
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Picture taken")
message.run()
message.destroy()
else:
message = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "Error saving file - try again!")
message.run()
message.destroy()
win = TableWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main() |
py | 1a320edaa1b5167b0e66ca69de88cbc99e938f56 | import numpy as np
import tensorflow as tf
import yolo.config as cfg
import tensorflow.contrib.slim as slim
#slim = tf.contrib.slim
class YOLONet(object):
def __init__(self, is_training=True):
self.classes = cfg.CLASSES
self.num_class = len(self.classes)
self.image_size = cfg.IMAGE_SIZE
self.cell_size = cfg.CELL_SIZE
self.boxes_per_cell = cfg.BOXES_PER_CELL
self.output_size = (self.cell_size * self.cell_size) *\
(self.num_class + self.boxes_per_cell * 5)
self.scale = 1.0 * self.image_size / self.cell_size
# 7*7*20(表示类别)转换为相应的矩阵形式 (类别向量)
self.boundary1 = self.cell_size * self.cell_size * self.num_class
# + 7*7*2 转换为相应的矩阵形式 (尺度向量)
self.boundary2 = self.boundary1 +\
self.cell_size * self.cell_size * self.boxes_per_cell
self.object_scale = cfg.OBJECT_SCALE
self.noobject_scale = cfg.NOOBJECT_SCALE
self.class_scale = cfg.CLASS_SCALE
self.coord_scale = cfg.COORD_SCALE
self.learning_rate = cfg.LEARNING_RATE
self.batch_size = cfg.BATCH_SIZE
self.alpha = cfg.ALPHA
self.offset = np.transpose(np.reshape(np.array(
[np.arange(self.cell_size)] * self.cell_size * self.boxes_per_cell),
(self.boxes_per_cell, self.cell_size, self.cell_size)), (1, 2, 0)
)
self.images = tf.placeholder(
tf.float32, [None, self.image_size, self.image_size, 3],
name='images'
)
self.logits = self.build_network(
self.images, num_outputs=self.output_size, alpha=self.alpha,
is_training=is_training
)
if is_training:
self.labels = tf.placeholder(
tf.float32,
[None, self.cell_size, self.cell_size, 5 + self.num_class]
)
self.loss_layer(self.logits, self.labels)
self.total_loss = tf.losses.get_total_loss()
tf.summary.scalar('total_loss', self.total_loss)
def build_network(self,
images,
num_outputs,
alpha,
keep_prob=0.5,
is_training=True,
scope='yolo'):
with tf.variable_scope(scope):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
activation_fn=leaky_relu(alpha),
weights_regularizer=slim.l2_regularizer(0.0005),
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01)
):
# conv2d(input, num_output, filter_size, stride=1, padding='SAME')
# maxpool(input, kernel_size, stride=2, padding='VAILD')
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0, 0]]), name='pad_1')
net = slim.conv2d(net, 64, 7, 2, padding='VALID', scope='conv_2')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_3')
net = slim.conv2d(net, 192, 3, scope='conv_4')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_5')
net = slim.conv2d(net, 128, 1, scope='conv_6')
net = slim.conv2d(net, 256, 3, scope='conv_7')
net = slim.conv2d(net, 256, 1, scope='conv_8')
net = slim.conv2d(net, 512, 3, scope='conv_9')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_10')
net = slim.conv2d(net, 256, 1, scope='conv_11')
net = slim.conv2d(net, 512, 3, scope='conv_12')
net = slim.conv2d(net, 256, 1, scope='conv_13')
net = slim.conv2d(net, 512, 3, scope='conv_14')
net = slim.conv2d(net, 256, 1, scope='conv_15')
net = slim.conv2d(net, 512, 3, scope='conv_16')
net = slim.conv2d(net, 256, 1, scope='conv_17')
net = slim.conv2d(net, 512, 3, scope='conv_18')
net = slim.conv2d(net, 512, 1, scope='conv_19')
net = slim.conv2d(net, 1024, 3, scope='conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_21')
net = slim.conv2d(net, 512, 1, scope='conv_22')
net = slim.conv2d(net, 1024, 3, scope='conv_23')
net = slim.conv2d(net, 512, 1, scope='conv_24')
net = slim.conv2d(net, 1024, 3, scope='conv_25')
net = slim.conv2d(net, 1024, 3, scope='conv_26')
net = tf.pad(
net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]),
name='pad_27')
net = slim.conv2d(
net, 1024, 3, 2, padding='VALID', scope='conv_28')
net = slim.conv2d(net, 1024, 3, scope='conv_29')
net = slim.conv2d(net, 1024, 3, scope='conv_30')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope='flat_32')
net = slim.fully_connected(net, 512, scope='fc_33')
net = slim.fully_connected(net, 4096, scope='fc_34')
net = slim.dropout(
net, keep_prob=keep_prob, is_training=is_training,
scope='dropout_35')
net = slim.fully_connected(
net, num_outputs, activation_fn=None, scope='fc_36')
return net
def calc_iou(self, boxes1, boxes2, scope='iou'):
"""calculate ious
Args:
boxes1: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] => (x_center, y_center, w, h)
boxes2: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] => (x_center, y_center, w, h)
Return:
iou: 4-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
这里没有极大值抑制,raw output
"""
with tf.variable_scope(scope):
# transform (x_center, y_center, w, h) to (x1, y1, x2, y2)
# stack 可以从n维变成n+1维,给最后一维加箱子,再叠起来
boxes1_t = tf.stack([boxes1[..., 0] - boxes1[..., 2] / 2.0,
boxes1[..., 1] - boxes1[..., 3] / 2.0,
boxes1[..., 0] + boxes1[..., 2] / 2.0,
boxes1[..., 1] + boxes1[..., 3] / 2.0],
axis=-1)
boxes2_t = tf.stack([boxes2[..., 0] - boxes2[..., 2] / 2.0,
boxes2[..., 1] - boxes2[..., 3] / 2.0,
boxes2[..., 0] + boxes2[..., 2] / 2.0,
boxes2[..., 1] + boxes2[..., 3] / 2.0],
axis=-1)
# calculate the left up point & right down point
# 我觉得这里找的是intersection的左下角和右上角!
lu = tf.maximum(boxes1_t[..., :2], boxes2_t[..., :2])
rd = tf.minimum(boxes1_t[..., 2:], boxes2_t[..., 2:])
# intersection
intersection = tf.maximum(0.0, rd - lu)
inter_square = intersection[..., 0] * intersection[..., 1]
# calculate the boxs1 square and boxs2 square
# 未变换前的w * h
square1 = boxes1[..., 2] * boxes1[..., 3]
square2 = boxes2[..., 2] * boxes2[..., 3]
union_square = tf.maximum(square1 + square2 - inter_square, 1e-10)
return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
def loss_layer(self, predicts, labels, scope='loss_layer'):
"""
:param predicts: 卷积后得到的tensor
:param labels: 待解码的真实标注
:param scope:
:return: loss
"""
with tf.variable_scope(scope):
# 类别向量 shape为(45, 7, 7, 20)
# 这里的classes是20种类型的概率值, C个条件概率: P(Class_i | Object)
predict_classes = tf.reshape(
predicts[:, :self.boundary1],
[self.batch_size, self.cell_size, self.cell_size, self.num_class]
)
# 是confidence-score shape为(45, 7, 7, 2)
predict_scales = tf.reshape(
predicts[:, self.boundary1:self.boundary2],
[self.batch_size, self.cell_size, self.cell_size, self.boxes_per_cell]
)
# boxes 所在的位置坐标 shape为(45, 7, 7, 2, 4)
predict_boxes = tf.reshape(
predicts[:, self.boundary2:],
[self.batch_size, self.cell_size, self.cell_size, self.boxes_per_cell, 4]
)
# 将真实的 labels 转换为相应的矩阵形式
# response是7*7的矩阵,除了目标中心所在网格对应位置为1,其余为0
# response = 1_obj_i
response = tf.reshape(
labels[..., 0],
[self.batch_size, self.cell_size, self.cell_size, 1]
)
# 定位
boxes = tf.reshape(
labels[..., 1:5],
[self.batch_size, self.cell_size, self.cell_size, 1, 4]
)
# boxes 所在的位置坐标 shape (45, 7, 7, 2, 4)
boxes = tf.tile(boxes, [1, 1, 1, self.boxes_per_cell, 1]) / self.image_size
# 对类别信息进行one-hot编码,除了实际目标类别为1,其余为0 ???
classes = labels[..., 5:]
offset = tf.reshape(
tf.constant(self.offset, dtype=tf.float32),
[1, self.cell_size, self.cell_size, self.boxes_per_cell]
)
offset = tf.tile(offset, [self.batch_size, 1, 1, 1])
offset_tran = tf.transpose(offset, (0, 2, 1, 3))
# shape为 [batch_size, 7, 7, 2, 4]
# 给中心点加offset
predict_boxes_tran = tf.stack(
[(predict_boxes[..., 0] + offset) / self.cell_size,
(predict_boxes[..., 1] + offset_tran) / self.cell_size,
tf.square(predict_boxes[..., 2]),
tf.square(predict_boxes[..., 3])], axis=-1
)
# shape: batch*7*7*2
iou_predict_truth = self.calc_iou(predict_boxes_tran, boxes)
# calculate I tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
# 1_obj_ij: 第i格子,第j个bbox是否有obj
# object_mask是response加强版,在格子中细分bbox
object_mask = tf.reduce_max(iou_predict_truth, 3, keep_dims=True)
# response是Pr(object)(是否有obj,01matrix) 在这里把这个值乘上放进object_mask里,后面就只用考虑IoU了
object_mask = tf.cast((iou_predict_truth >= object_mask), tf.float32) * response
# calculate no_I tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
# 全1矩阵减1,剩下的1就是noobject
noobject_mask = tf.ones_like(object_mask, dtype=tf.float32) - object_mask
# 参数中加上平方根是对 w 和 h 进行开平方操作,原因在论文中有说明
# shape为(4, batch_size, 7, 7, 2)
boxes_tran = tf.stack(
[boxes[..., 0] * self.cell_size - offset,
boxes[..., 1] * self.cell_size - offset_tran,
tf.sqrt(boxes[..., 2]),
tf.sqrt(boxes[..., 3])], axis=-1
)
# 类别损失,predict是概率,classes是one-hot的label
class_delta = response * (predict_classes - classes)
class_loss = tf.reduce_mean(
tf.reduce_sum(tf.square(class_delta), axis=[1, 2, 3]),
name='class_loss') * self.class_scale
# 置信度损失
# object_loss
object_delta = object_mask * (predict_scales - iou_predict_truth)
object_loss = tf.reduce_mean(
tf.reduce_sum(tf.square(object_delta), axis=[1, 2, 3]),
name='object_loss') * self.object_scale
# noobject_loss
noobject_delta = noobject_mask * predict_scales
noobject_loss = tf.reduce_mean(
tf.reduce_sum(tf.square(noobject_delta), axis=[1, 2, 3]),
name='noobject_loss') * self.noobject_scale
# coord_loss,也要用到object_mask!
coord_mask = tf.expand_dims(object_mask, 4)
boxes_delta = coord_mask * (predict_boxes - boxes_tran)
coord_loss = tf.reduce_mean(
tf.reduce_sum(tf.square(boxes_delta), axis=[1, 2, 3, 4]),
name='coord_loss') * self.coord_scale
tf.losses.add_loss(class_loss)
tf.losses.add_loss(object_loss)
tf.losses.add_loss(noobject_loss)
tf.losses.add_loss(coord_loss)
tf.summary.scalar('class_loss', class_loss)
tf.summary.scalar('object_loss', object_loss)
tf.summary.scalar('noobject_loss', noobject_loss)
tf.summary.scalar('coord_loss', coord_loss)
tf.summary.histogram('boxes_delta_x', boxes_delta[..., 0])
tf.summary.histogram('boxes_delta_y', boxes_delta[..., 1])
tf.summary.histogram('boxes_delta_w', boxes_delta[..., 2])
tf.summary.histogram('boxes_delta_h', boxes_delta[..., 3])
tf.summary.histogram('iou', iou_predict_truth)
def leaky_relu(alpha):
def op(inputs):
return tf.nn.leaky_relu(inputs, alpha=alpha, name='leaky_relu')
return op
|
py | 1a320f050440cf3ab542d763fe8e51db81bc97f9 | """
Functions that can are used to modify XBlock fragments for use in the LMS and Studio
"""
import datetime
import hashlib
import json
import logging
import re
import uuid
import markupsafe
import webpack_loader.utils
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import reverse
from django.utils.html import escape
from edx_django_utils.plugins import pluggable_override
from lxml import etree, html
from opaque_keys.edx.asides import AsideUsageKeyV1, AsideUsageKeyV2
from pytz import UTC
from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.exceptions import InvalidScopeError
from xblock.scorable import ScorableXBlockMixin
from common.djangoapps import static_replace
from common.djangoapps.edxmako.shortcuts import render_to_string
from xmodule.seq_module import SequenceBlock
from xmodule.util.xmodule_django import add_webpack_to_fragment
from xmodule.vertical_block import VerticalBlock
from xmodule.x_module import (
PREVIEW_VIEWS, STUDENT_VIEW, STUDIO_VIEW,
XModule, XModuleDescriptor, shim_xmodule_js,
)
log = logging.getLogger(__name__)
def wrap_fragment(fragment, new_content):
"""
Returns a new Fragment that has `new_content` and all
as its content, and all of the resources from fragment
"""
wrapper_frag = Fragment(content=new_content)
wrapper_frag.add_fragment_resources(fragment)
return wrapper_frag
def request_token(request):
"""
Return a unique token for the supplied request.
This token will be the same for all calls to `request_token`
made on the same request object.
"""
# pylint: disable=protected-access
if not hasattr(request, '_xblock_token'):
request._xblock_token = uuid.uuid1().hex
return request._xblock_token
def wrap_xblock(
runtime_class,
block,
view,
frag,
context,
usage_id_serializer,
request_token, # pylint: disable=redefined-outer-name
display_name_only=False,
extra_data=None
):
"""
Wraps the results of rendering an XBlock view in a standard <section> with identifying
data so that the appropriate javascript module can be loaded onto it.
:param runtime_class: The name of the javascript runtime class to use to load this block
:param block: An XBlock (that may be an XModule or XModuleDescriptor)
:param view: The name of the view that rendered the fragment being wrapped
:param frag: The :class:`Fragment` to be wrapped
:param context: The context passed to the view being rendered
:param usage_id_serializer: A function to serialize the block's usage_id for use by the
front-end Javascript Runtime.
:param request_token: An identifier that is unique per-request, so that only xblocks
rendered as part of this request will have their javascript initialized.
:param display_name_only: If true, don't render the fragment content at all.
Instead, just render the `display_name` of `block`
:param extra_data: A dictionary with extra data values to be set on the wrapper
"""
if extra_data is None:
extra_data = {}
# If any mixins have been applied, then use the unmixed class
class_name = getattr(block, 'unmixed_class', block.__class__).__name__
data = {}
data.update(extra_data)
if context:
data.update(context.get('wrap_xblock_data', {}))
css_classes = [
'xblock',
f'xblock-{markupsafe.escape(view)}',
'xblock-{}-{}'.format(
markupsafe.escape(view),
markupsafe.escape(block.scope_ids.block_type),
)
]
if view == STUDENT_VIEW and getattr(block, 'HIDDEN', False):
css_classes.append('is-hidden')
if isinstance(block, (XModule, XModuleDescriptor)) or getattr(block, 'uses_xmodule_styles_setup', False):
if view in PREVIEW_VIEWS:
# The block is acting as an XModule
css_classes.append('xmodule_display')
elif view == STUDIO_VIEW:
# The block is acting as an XModuleDescriptor
css_classes.append('xmodule_edit')
css_classes.append('xmodule_' + markupsafe.escape(class_name))
if isinstance(block, (XModule, XModuleDescriptor)):
data['type'] = block.js_module_name
shim_xmodule_js(frag, block.js_module_name)
if frag.js_init_fn:
data['init'] = frag.js_init_fn
data['runtime-class'] = runtime_class
data['runtime-version'] = frag.js_init_version
data['block-type'] = block.scope_ids.block_type
data['usage-id'] = usage_id_serializer(block.scope_ids.usage_id)
data['request-token'] = request_token
data['graded'] = getattr(block, 'graded', False)
data['has-score'] = getattr(block, 'has_score', False)
if block.name:
data['name'] = block.name
template_context = {
'content': block.display_name if display_name_only else frag.content,
'classes': css_classes,
'display_name': block.display_name_with_default_escaped, # xss-lint: disable=python-deprecated-display-name
'data_attributes': ' '.join(f'data-{markupsafe.escape(key)}="{markupsafe.escape(value)}"'
for key, value in data.items()),
}
if hasattr(frag, 'json_init_args') and frag.json_init_args is not None:
template_context['js_init_parameters'] = frag.json_init_args
else:
template_context['js_init_parameters'] = ""
if isinstance(block, (XModule, XModuleDescriptor)):
# Add the webpackified asset tags
add_webpack_to_fragment(frag, class_name)
return wrap_fragment(frag, render_to_string('xblock_wrapper.html', template_context))
def wrap_xblock_aside(
runtime_class,
aside,
view,
frag,
context, # pylint: disable=unused-argument
usage_id_serializer,
request_token, # pylint: disable=redefined-outer-name
extra_data=None,
extra_classes=None
):
"""
Wraps the results of rendering an XBlockAside view in a standard <section> with identifying
data so that the appropriate javascript module can be loaded onto it.
:param runtime_class: The name of the javascript runtime class to use to load this block
:param aside: An XBlockAside
:param view: The name of the view that rendered the fragment being wrapped
:param frag: The :class:`Fragment` to be wrapped
:param context: The context passed to the view being rendered
:param usage_id_serializer: A function to serialize the block's usage_id for use by the
front-end Javascript Runtime.
:param request_token: An identifier that is unique per-request, so that only xblocks
rendered as part of this request will have their javascript initialized.
:param extra_data: A dictionary with extra data values to be set on the wrapper
:param extra_classes: A list with extra classes to be set on the wrapper element
"""
if extra_data is None:
extra_data = {}
data = {}
data.update(extra_data)
css_classes = [
f'xblock-{markupsafe.escape(view)}',
'xblock-{}-{}'.format(
markupsafe.escape(view),
markupsafe.escape(aside.scope_ids.block_type),
),
'xblock_asides-v1'
]
if extra_classes:
css_classes.extend(extra_classes)
if frag.js_init_fn:
data['init'] = frag.js_init_fn
data['runtime-class'] = runtime_class
data['runtime-version'] = frag.js_init_version
data['block-type'] = aside.scope_ids.block_type
data['usage-id'] = usage_id_serializer(aside.scope_ids.usage_id)
data['request-token'] = request_token
template_context = {
'content': frag.content,
'classes': css_classes,
'data_attributes': ' '.join(f'data-{markupsafe.escape(key)}="{markupsafe.escape(value)}"'
for key, value in data.items()),
}
if hasattr(frag, 'json_init_args') and frag.json_init_args is not None:
template_context['js_init_parameters'] = frag.json_init_args
else:
template_context['js_init_parameters'] = ""
return wrap_fragment(frag, render_to_string('xblock_wrapper.html', template_context))
def replace_jump_to_id_urls(course_id, jump_to_id_base_url, block, view, frag, context): # pylint: disable=unused-argument
"""
This will replace a link between courseware in the format
/jump_to_id/<id> with a URL for a page that will correctly redirect
This is similar to replace_course_urls, but much more flexible and
durable for Studio authored courses. See more comments in static_replace.replace_jump_to_urls
course_id: The course_id in which this rewrite happens
jump_to_id_base_url:
A app-tier (e.g. LMS) absolute path to the base of the handler that will perform the
redirect. e.g. /courses/<org>/<course>/<run>/jump_to_id. NOTE the <id> will be appended to
the end of this URL at re-write time
output: a new :class:`~web_fragments.fragment.Fragment` that modifies `frag` with
content that has been update with /jump_to_id links replaced
"""
return wrap_fragment(frag, static_replace.replace_jump_to_id_urls(frag.content, course_id, jump_to_id_base_url))
def replace_course_urls(course_id, block, view, frag, context): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the old get_html function and substitutes urls of the form /course/...
with urls that are /courses/<course_id>/...
"""
return wrap_fragment(frag, static_replace.replace_course_urls(frag.content, course_id))
def replace_static_urls(data_dir, block, view, frag, context, course_id=None, static_asset_path=''): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the old get_html function and substitutes urls of the form /static/...
with urls that are /static/<prefix>/...
"""
return wrap_fragment(frag, static_replace.replace_static_urls(
frag.content,
data_dir,
course_id,
static_asset_path=static_asset_path
))
def grade_histogram(module_id):
'''
Print out a histogram of grades on a given problem in staff member debug info.
Warning: If a student has just looked at an xmodule and not attempted
it, their grade is None. Since there will always be at least one such student
this function almost always returns [].
'''
from django.db import connection
cursor = connection.cursor()
query = """\
SELECT courseware_studentmodule.grade,
COUNT(courseware_studentmodule.student_id)
FROM courseware_studentmodule
WHERE courseware_studentmodule.module_id=%s
GROUP BY courseware_studentmodule.grade"""
# Passing module_id this way prevents sql-injection.
cursor.execute(query, [str(module_id)])
grades = list(cursor.fetchall())
grades.sort(key=lambda x: x[0]) # Add ORDER BY to sql query?
if len(grades) >= 1 and grades[0][0] is None:
return []
return grades
def sanitize_html_id(html_id):
"""
Template uses element_id in js function names, so can't allow dashes and colons.
"""
sanitized_html_id = re.sub(r'[:-]', '_', html_id)
return sanitized_html_id
def add_staff_markup(user, disable_staff_debug_info, block, view, frag, context): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the output of the old get_html function with additional information
for admin users only, including a histogram of student answers, the
definition of the xmodule, and a link to view the module in Studio
if it is a Studio edited, mongo stored course.
Does nothing if module is a SequenceBlock.
"""
if context and context.get('hide_staff_markup', False):
# If hide_staff_markup is passed, don't add the markup
return frag
# TODO: make this more general, eg use an XModule attribute instead
if isinstance(block, VerticalBlock) and (not context or not context.get('child_of_vertical', False)):
return frag
if isinstance(block, SequenceBlock) or getattr(block, 'HIDDEN', False):
return frag
block_id = block.location
if block.has_score and settings.FEATURES.get('DISPLAY_HISTOGRAMS_TO_STAFF'):
histogram = grade_histogram(block_id)
render_histogram = len(histogram) > 0
else:
histogram = None
render_histogram = False
if settings.FEATURES.get('ENABLE_LMS_MIGRATION') and hasattr(block.runtime, 'filestore'):
[filepath, filename] = getattr(block, 'xml_attributes', {}).get('filename', ['', None])
osfs = block.runtime.filestore
if filename is not None and osfs.exists(filename):
# if original, unmangled filename exists then use it (github
# doesn't like symlinks)
filepath = filename
data_dir = block.static_asset_path or osfs.root_path.rsplit('/')[-1]
giturl = block.giturl or 'https://github.com/MITx'
edit_link = f"{giturl}/{data_dir}/tree/master/{filepath}"
else:
edit_link = False
# Need to define all the variables that are about to be used
giturl = ""
data_dir = ""
source_file = block.source_file # source used to generate the problem XML, eg latex or word
# Useful to indicate to staff if problem has been released or not.
# TODO (ichuang): use _has_access_descriptor.can_load in lms.courseware.access,
# instead of now>mstart comparison here.
now = datetime.datetime.now(UTC)
is_released = "unknown"
mstart = block.start
if mstart is not None:
is_released = "<font color='red'>Yes!</font>" if (now > mstart) else "<font color='green'>Not yet</font>"
field_contents = []
for name, field in block.fields.items():
try:
field_contents.append((name, field.read_from(block)))
except InvalidScopeError:
log.warning("Unable to read field in Staff Debug information", exc_info=True)
field_contents.append((name, "WARNING: Unable to read field"))
staff_context = {
'fields': field_contents,
'xml_attributes': getattr(block, 'xml_attributes', {}),
'tags': block._class_tags, # pylint: disable=protected-access
'location': block.location,
'xqa_key': block.xqa_key,
'source_file': source_file,
'source_url': f'{giturl}/{data_dir}/tree/master/{source_file}',
'category': str(block.__class__.__name__),
'element_id': sanitize_html_id(block.location.html_id()),
'edit_link': edit_link,
'user': user,
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
'histogram': json.dumps(histogram),
'render_histogram': render_histogram,
'block_content': frag.content,
'is_released': is_released,
'can_reset_attempts': 'attempts' in block.fields,
'can_rescore_problem': hasattr(block, 'rescore'),
'can_override_problem_score': isinstance(block, ScorableXBlockMixin),
'disable_staff_debug_info': disable_staff_debug_info,
}
if isinstance(block, ScorableXBlockMixin):
staff_context['max_problem_score'] = block.max_score()
return wrap_fragment(frag, render_to_string("staff_problem_info.html", staff_context))
def get_course_update_items(course_updates, provided_index=0):
"""
Returns list of course_updates data dictionaries either from new format if available or
from old. This function don't modify old data to new data (in db), instead returns data
in common old dictionary format.
New Format: {"items" : [{"id": computed_id, "date": date, "content": html-string}],
"data": "<ol>[<li><h2>date</h2>content</li>]</ol>"}
Old Format: {"data": "<ol>[<li><h2>date</h2>content</li>]</ol>"}
"""
def _course_info_content(html_parsed):
"""
Constructs the HTML for the course info update, not including the header.
"""
if len(html_parsed) == 1:
# could enforce that update[0].tag == 'h2'
content = html_parsed[0].tail
else:
content = html_parsed[0].tail if html_parsed[0].tail is not None else ""
content += "\n".join([html.tostring(ele).decode('utf-8') for ele in html_parsed[1:]])
return content
if course_updates and getattr(course_updates, "items", None):
if provided_index and 0 < provided_index <= len(course_updates.items):
return course_updates.items[provided_index - 1]
else:
# return list in reversed order (old format: [4,3,2,1]) for compatibility
return list(reversed(course_updates.items))
course_update_items = []
if course_updates:
# old method to get course updates
# purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break.
try:
course_html_parsed = html.fromstring(course_updates.data)
except (etree.XMLSyntaxError, etree.ParserError):
log.error("Cannot parse: " + course_updates.data) # lint-amnesty, pylint: disable=logging-not-lazy
escaped = escape(course_updates.data)
# xss-lint: disable=python-concat-html
course_html_parsed = html.fromstring("<ol><li>" + escaped + "</li></ol>")
# confirm that root is <ol>, iterate over <li>, pull out <h2> subs and then rest of val
if course_html_parsed.tag == 'ol':
# 0 is the newest
for index, update in enumerate(course_html_parsed):
if len(update) > 0:
content = _course_info_content(update)
# make the id on the client be 1..len w/ 1 being the oldest and len being the newest
computed_id = len(course_html_parsed) - index
payload = {
"id": computed_id,
"date": update.findtext("h2"),
"content": content
}
if provided_index == 0:
course_update_items.append(payload)
elif provided_index == computed_id:
return payload
return course_update_items
def xblock_local_resource_url(block, uri):
"""
Returns the URL for an XBlock's local resource.
Note: when running with the full Django pipeline, the file will be accessed
as a static asset which will use a CDN in production.
"""
xblock_class = getattr(block.__class__, 'unmixed_class', block.__class__)
if settings.PIPELINE['PIPELINE_ENABLED'] or not settings.REQUIRE_DEBUG:
return staticfiles_storage.url('xblock/resources/{package_name}/{path}'.format(
package_name=xblock_resource_pkg(xblock_class),
path=uri
))
else:
return reverse('xblock_resource_url', kwargs={
'block_type': block.scope_ids.block_type,
'uri': uri,
})
def xblock_resource_pkg(block):
"""
Return the module name needed to find an XBlock's shared static assets.
This method will return the full module name that is one level higher than
the one the block is in. For instance, problem_builder.answer.AnswerBlock
has a __module__ value of 'problem_builder.answer'. This method will return
'problem_builder' instead. However, for edx-ora2's
openassessment.xblock.openassessmentblock.OpenAssessmentBlock, the value
returned is 'openassessment.xblock'.
XModules are special cased because they're local to this repo and they
actually don't share their resource files when compiled out as part of the
XBlock asset pipeline. This only covers XBlocks and XModules using the
XBlock-style of asset specification. If they use the XModule bundling part
of the asset pipeline (xmodule_assets), their assets are compiled through an
entirely separate mechanism and put into lms-modules.js/css.
"""
# XModules are a special case because they map to different dirs for
# sub-modules.
module_name = block.__module__
if module_name.startswith('xmodule.'):
return module_name
return module_name.rsplit('.', 1)[0]
def is_xblock_aside(usage_key):
"""
Returns True if the given usage key is for an XBlock aside
Args:
usage_key (opaque_keys.edx.keys.UsageKey): A usage key
Returns:
bool: Whether or not the usage key is an aside key type
"""
return isinstance(usage_key, (AsideUsageKeyV1, AsideUsageKeyV2))
def get_aside_from_xblock(xblock, aside_type):
"""
Gets an instance of an XBlock aside from the XBlock that it's decorating. This also
configures the aside instance with the runtime and fields of the given XBlock.
Args:
xblock (xblock.core.XBlock): The XBlock that the desired aside is decorating
aside_type (str): The aside type
Returns:
xblock.core.XBlockAside: Instance of an xblock aside
"""
return xblock.runtime.get_aside_of_type(xblock, aside_type)
def hash_resource(resource):
"""
Hash a :class:`web_fragments.fragment.FragmentResource
Those hash values are used to avoid loading the resources
multiple times.
"""
md5 = hashlib.md5()
for data in resource:
if isinstance(data, bytes):
md5.update(data)
elif isinstance(data, str):
md5.update(data.encode('utf-8'))
else:
md5.update(repr(data).encode('utf-8'))
return md5.hexdigest()
@pluggable_override('OVERRIDE_GET_UNIT_ICON')
def get_icon(block):
"""
A function that returns the CSS class representing an icon to use for this particular
XBlock (in the courseware navigation bar). Mostly used for Vertical/Unit XBlocks.
It can be overridden by setting `GET_UNIT_ICON_IMPL` to an alternative implementation.
"""
return block.get_icon_class()
|
py | 1a320f1d83ef2de28dffbe6a9a17759bb44b8b45 | import subprocess, os, re
from mycroft import MycroftSkill, intent_handler
class SystemControl(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
self.log.info("System Control Skill loaded")
@intent_handler('ShutDown.intent')
def handle_shut_down_intent(self, message):
self.speak_dialog('shutdown')
@intent_handler('OpenApp.intent')
def handle_open_app_intent(self, message):
app_name = message.data.get('app')
ls = subprocess.run(['ls ~/.local/share/applications/*.desktop'],
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True)
user_apps = ls.stdout.splitlines()
matches = [app for app in user_apps if app_name in app]
#print(user_apps)
#print(sorted(matches, key=len))
if matches:
with open(os.path.join('~/.local/share/applications/', sorted(matches, key=len)[0])) as f:
lines = f.readlines()
for line in lines:
path = re.match(r'^Exec=(.*)', line)
if path:
self.log.info('Executing ' + path.group(1))
launch = subprocess.run('exec ' + path.group(1),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
if not launch.stderr:
self.speak_dialog('open.app', data={'app': app_name})
else:
self.speak(launch.stderr)
else:
self.speak('I did not find ' + app_name)
def create_skill():
return SystemControl()
|
py | 1a320fa60efeebd8385e4ebaf98fd2a0fae2b823 | # Generated by Django 3.0.9 on 2020-08-25 21:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0006_course_pricing_tiers'),
]
operations = [
migrations.AddField(
model_name='pricing',
name='currency',
field=models.CharField(default='usd', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='pricing',
name='price',
field=models.DecimalField(decimal_places=2, default=17, max_digits=5),
preserve_default=False,
),
]
|
py | 1a320fadb0602fd87f1b308a6d513dde3270e5f5 | import pyxel
class Hero:
def __init__(self):
self.x = 0
self.y = 52
self.walk_counter = 0
self.state = 'idle_right'
self.models = {
'idle_right': [
[0, 0, 0, 16, 16, 0]
],
'idle_left': [
[0, 0, 0, -16, 16, 0]
],
'walk_right': [
[0, 0, 0, 16, 16, 0]
],
'walk_left': [
[0, 0, 0, -16, 16, 0]
],
'walk_down': [
[0, 0, 0, 16, 16, 0]
],
'walk_up': [
[0, 0, 0, 16, 16, 0]
]
}
def draw(self):
if self.state[:4] == 'walk':
pyxel.blt(self.x, self.y, *self.models[self.state][self.walk_counter])
else:
pyxel.blt(self.x, self.y, *self.models[self.state][self.walk_counter])
class OneBit:
def __init__(self):
pyxel.image(0).load(0, 0, 'assets.png')
self.models = {
'pine_group_1': [0, 80, 128, 32, 48, 0],
'pine_group_2': [0, 208, 48, 32, 48, 0],
'pine_single': [0, 48, 144, 32, 32, 0],
'pine_single_bare': [0, 112, 144, 32, 32, 0],
'grass_2': [0, 80, 0, 16, 16, 0],
'grass_4': [0, 80, 16, 16, 16, 0],
'tower_cone': [0, 144, 128, 16, 48, 0],
'tower_broken': [0, 208, 96, 32, 64, 0],
'rock_wall_1': [0, 16, 96, 16, 16, 0],
'rock_wall_2': [0, 32, 96, 16, 16, 0]
}
def draw(self):
pyxel.blt(40, 40, *self.models['pine_single'])
pyxel.blt(70, 0, *self.models['pine_single_bare'])
pyxel.blt(10, 176, *self.models['pine_single'])
pyxel.blt(20, 40, *self.models['grass_2'])
pyxel.blt(60, 38, *self.models['grass_2'])
pyxel.blt(20, 60, *self.models['grass_4'])
pyxel.blt(30, 80, *self.models['grass_4'])
|
py | 1a32109611d244504e6b9b805becad332d2885bb | class ROBConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "rob"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
# IMAGES_PER_GPU = 8
# Default is 2
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 4 # background + 4 class labels
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
# IMAGE_MIN_DIM = 128
# IMAGE_MAX_DIM = 128
# Default is 800 x 1024
# Use smaller anchors because our image and objects are small
# RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# DEFAULT: RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Default is 200
# Use a small epoch since the data is simple
# STEPS_PER_EPOCH = 100
# Default is 1000
STEPS_PER_EPOCH = int(5561/(GPU_COUNT*IMAGES_PER_GPU))
# use small validation steps since the epoch is small
# VALIDATION_STEPS = 5
# Max number of final detections
DETECTION_MAX_INSTANCES = 5
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.6
# Run these lines in the co-lab cell where this is imported:
# config = ROBConfig()
# config.display()
class InferenceConfig(ROBConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1 |
py | 1a3210bc237e2f0bb241c4fa25459a1d0c04eafc | # Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import common_db_mixin
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db
from neutron.plugins.nuage import nuage_models
def add_net_partition(session, netpart_id,
l3dom_id, l2dom_id,
ent_name):
net_partitioninst = nuage_models.NetPartition(id=netpart_id,
name=ent_name,
l3dom_tmplt_id=l3dom_id,
l2dom_tmplt_id=l2dom_id)
session.add(net_partitioninst)
return net_partitioninst
def delete_net_partition(session, net_partition):
session.delete(net_partition)
def delete_net_partition_by_id(session, netpart_id):
query = session.query(nuage_models.NetPartition)
query.filter_by(id=netpart_id).delete()
def get_net_partition_by_name(session, name):
query = session.query(nuage_models.NetPartition)
return query.filter_by(name=name).first()
def get_net_partition_by_id(session, id):
query = session.query(nuage_models.NetPartition)
return query.filter_by(id=id).first()
def get_net_partitions(session, filters=None, fields=None):
query = session.query(nuage_models.NetPartition)
common_db = common_db_mixin.CommonDbMixin()
query = common_db._apply_filters_to_query(query,
nuage_models.NetPartition,
filters)
return query
def get_net_partition_ids(session):
query = session.query(nuage_models.NetPartition.id)
return [netpart[0] for netpart in query]
def get_net_partition_with_lock(session, netpart_id):
query = session.query(nuage_models.NetPartition)
netpart_db = query.filter_by(id=netpart_id).with_lockmode('update').one()
return make_net_partition_dict(netpart_db)
def get_subnet_ids(session):
query = session.query(models_v2.Subnet.id)
return [subn[0] for subn in query]
def get_subnet_with_lock(session, sub_id):
query = session.query(models_v2.Subnet)
subnet_db = query.filter_by(id=sub_id).with_lockmode('update').one()
return subnet_db
def get_router_ids(session):
query = session.query(l3_db.Router.id)
return [router[0] for router in query]
def get_router_with_lock(session, router_id):
query = session.query(l3_db.Router)
router_db = query.filter_by(id=router_id).with_lockmode('update').one()
return router_db
def get_secgrp_ids(session):
query = session.query(securitygroups_db.SecurityGroup.id)
return [secgrp[0] for secgrp in query]
def get_secgrp_with_lock(session, secgrp_id):
query = session.query(securitygroups_db.SecurityGroup)
secgrp_db = query.filter_by(id=secgrp_id).with_lockmode('update').one()
return secgrp_db
def get_secgrprule_ids(session):
query = session.query(securitygroups_db.SecurityGroupRule.id)
return [secgrprule[0] for secgrprule in query]
def get_secgrprule_with_lock(session, secgrprule_id):
query = session.query(securitygroups_db.SecurityGroupRule)
secgrprule_db = (query.filter_by(id=secgrprule_id).with_lockmode(
'update').one())
return secgrprule_db
def get_port_with_lock(session, port_id):
query = session.query(models_v2.Port)
port_db = query.filter_by(id=port_id).with_lockmode('update').one()
return port_db
def get_fip_with_lock(session, fip_id):
query = session.query(l3_db.FloatingIP)
fip_db = query.filter_by(id=fip_id).with_lockmode('update').one()
return fip_db
def add_entrouter_mapping(session, np_id,
router_id,
n_l3id):
ent_rtr_mapping = nuage_models.NetPartitionRouter(net_partition_id=np_id,
router_id=router_id,
nuage_router_id=n_l3id)
session.add(ent_rtr_mapping)
def add_subnetl2dom_mapping(session, neutron_subnet_id,
nuage_sub_id,
np_id,
l2dom_id=None,
nuage_user_id=None,
nuage_group_id=None):
subnet_l2dom = nuage_models.SubnetL2Domain(subnet_id=neutron_subnet_id,
nuage_subnet_id=nuage_sub_id,
net_partition_id=np_id,
nuage_l2dom_tmplt_id=l2dom_id,
nuage_user_id=nuage_user_id,
nuage_group_id=nuage_group_id)
session.add(subnet_l2dom)
def update_subnetl2dom_mapping(subnet_l2dom,
new_dict):
subnet_l2dom.update(new_dict)
def get_update_subnetl2dom_mapping(session, new_dict):
subnet_l2dom = get_subnet_l2dom_with_lock(session, new_dict['subnet_id'])
subnet_l2dom.update(new_dict)
def update_entrtr_mapping(ent_rtr, new_dict):
ent_rtr.update(new_dict)
def get_update_entrtr_mapping(session, new_dict):
ent_rtr = get_ent_rtr_mapping_with_lock(session, new_dict['router_id'])
ent_rtr.update(new_dict)
def delete_subnetl2dom_mapping(session, subnet_l2dom):
session.delete(subnet_l2dom)
def get_subnet_l2dom_by_id(session, id):
query = session.query(nuage_models.SubnetL2Domain)
return query.filter_by(subnet_id=id).first()
def get_subnet_l2dom_with_lock(session, id):
query = session.query(nuage_models.SubnetL2Domain)
subl2dom = query.filter_by(subnet_id=id).with_lockmode('update').one()
return subl2dom
def get_ent_rtr_mapping_by_entid(session, entid):
query = session.query(nuage_models.NetPartitionRouter)
return query.filter_by(net_partition_id=entid).all()
def get_ent_rtr_mapping_by_rtrid(session, rtrid):
query = session.query(nuage_models.NetPartitionRouter)
return query.filter_by(router_id=rtrid).first()
def add_network_binding(session, network_id, network_type, physical_network,
vlan_id):
binding = nuage_models.ProviderNetBinding(
network_id=network_id,
network_type=network_type,
physical_network=physical_network,
vlan_id=vlan_id)
session.add(binding)
return binding
def get_network_binding(session, network_id):
return (session.query(nuage_models.ProviderNetBinding).
filter_by(network_id=network_id).
first())
def get_ent_rtr_mapping_with_lock(session, rtrid):
query = session.query(nuage_models.NetPartitionRouter)
entrtr = query.filter_by(router_id=rtrid).with_lockmode('update').one()
return entrtr
def get_ipalloc_for_fip(session, network_id, ip, lock=False):
query = session.query(models_v2.IPAllocation)
if lock:
# Lock is required when the resource is synced
ipalloc_db = (query.filter_by(network_id=network_id).filter_by(
ip_address=ip).with_lockmode('update').one())
else:
ipalloc_db = (query.filter_by(network_id=network_id).filter_by(
ip_address=ip).one())
return make_ipalloc_dict(ipalloc_db)
def get_all_net_partitions(session):
net_partitions = get_net_partitions(session)
return make_net_partition_list(net_partitions)
def get_all_routes(session):
routes = session.query(extraroute_db.RouterRoute)
return make_route_list(routes)
def get_route_with_lock(session, dest, nhop):
query = session.query(extraroute_db.RouterRoute)
route_db = (query.filter_by(destination=dest).filter_by(nexthop=nhop)
.with_lockmode('update').one())
return make_route_dict(route_db)
def make_ipalloc_dict(subnet_db):
return {'port_id': subnet_db['port_id'],
'subnet_id': subnet_db['subnet_id'],
'network_id': subnet_db['network_id'],
'ip_address': subnet_db['ip_address']}
def make_net_partition_dict(net_partition):
return {'id': net_partition['id'],
'name': net_partition['name'],
'l3dom_tmplt_id': net_partition['l3dom_tmplt_id'],
'l2dom_tmplt_id': net_partition['l2dom_tmplt_id']}
def make_net_partition_list(net_partitions):
return [make_net_partition_dict(net_partition) for net_partition in
net_partitions]
def make_route_dict(route):
return {'destination': route['destination'],
'nexthop': route['nexthop'],
'router_id': route['router_id']}
def make_route_list(routes):
return [make_route_dict(route) for route in routes]
def make_subnl2dom_dict(subl2dom):
return {'subnet_id': subl2dom['subnet_id'],
'net_partition_id': subl2dom['net_partition_id'],
'nuage_subnet_id': subl2dom['nuage_subnet_id'],
'nuage_l2dom_tmplt_id': subl2dom['nuage_l2dom_tmplt_id'],
'nuage_user_id': subl2dom['nuage_user_id'],
'nuage_group_id': subl2dom['nuage_group_id']}
def make_entrtr_dict(entrtr):
return {'net_partition_id': entrtr['net_partition_id'],
'router_id': entrtr['router_id'],
'nuage_router_id': entrtr['nuage_router_id']}
|
py | 1a32119c70c0d1383a924bfd29be201dc7093bcb | import time, datetime
print("Importing OpenShift/Kubernetes packages ...")
import kubernetes
import ocp_resources
import openshift
from ocp_resources.node import Node
from ocp_resources.machine import Machine
from ocp_resources.node import Node
from openshift.dynamic import DynamicClient
try:
client_k8s = DynamicClient(client=kubernetes.config.new_client_from_config())
except Exception:
client_k8s = None
print("WARNING: kubernetes not available.")
print("Importing AWS boto3 ...")
import boto3
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html
client_ec2 = boto3.client('ec2')
resource_ec2 = boto3.resource('ec2')
print("Ready.")
def wait_openshift():
first = True
print("Waiting for OpenShift cluster to be ready ...")
import urllib3
while True:
try:
global client_k8s
client_k8s = DynamicClient(client=kubernetes.config.new_client_from_config())
nodes = [m for m in Node.get(dyn_client=client_k8s)]
if len(nodes) != 0:
print(f"Found {len(nodes)} node, OpenShift Cluster is ready!")
break
except urllib3.exceptions.MaxRetryError: pass
except kubernetes.client.exceptions.ApiException: pass
time.sleep(10)
def get_machine_props():
if not client_k8s:
return None, None
machines = [m for m in Machine.get(dyn_client=client_k8s)]
if len(machines) != 1:
raise RuntimeError("Should be only one machine ...")
machine = machines[0]
cluster_name = machine.cluster_name
print(f"Cluster name: {cluster_name}")
instance = resource_ec2.Instance(machine.instance.status.providerStatus.instanceId)
instance.load()
print(f"Instance Id: {instance.id}")
zone = machine.instance.spec.providerSpec.value.placement.availabilityZone
print(f"Availability zone: {zone}")
return cluster_name, instance, zone
def get_instance_root_volume(instance):
volumes = [v for v in instance.volumes.all()]
if len(volumes) > 1:
print("WARNING: more than 1 volume found ...")
return volumes[0]
def get_cluster_snapshot(cluster_name, instance, zone):
resp = client_ec2.describe_snapshots(
Filters=[{
'Name': f'tag:kubernetes.io/cluster/{cluster_name}',
'Values': ['owned']
}])
snapshots = resp["Snapshots"]
if len(snapshots) == 0:
return None
if len(snapshots) > 1:
print("WARNING: more than 1 snapshot found ... taking the first one.")
snapshot = resource_ec2.Snapshot(snapshots[0]['SnapshotId'])
snapshot.load()
return snapshot
def await_snapshot(snapshot):
prev = ""
if snapshot.progress == "100%":
print(f"Snapshot {snapshot.id} is ready.")
while not snapshot.progress == "100%":
if prev == "":
print(f"Awaiting for the completion of snapshot {snapshot.id} ...")
print(snapshot.progress)
prev = snapshot.progress
time.sleep(10)
snapshot.reload()
if prev != snapshot.progress:
prev = snapshot.progress
print(snapshot.progress)
def human_ts():
return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
|
py | 1a3211b5dfcd3d60c7c8830747babf647e0a79aa | txt = "hello world"
for c in txt:
if c == " ":
continue
print(c)
# ! Will print every single chars from txt EXCEPT space
|
py | 1a32123fda9260e038af6861397ebaa8e2a07a28 | import os
import sys
import random
import numpy as np
import pandas as pd
import csv
import time
import gc
from IPython import get_ipython
from datetime import datetime
from sys import platform
cwd = os.getcwd() # Get current working directory
root_folder = os.sep + "HFPN-Stochastic-Version"
# Move to 'utils' from current directory position
sys.path.insert(0, cwd[:(cwd.index(root_folder)+len(root_folder))] + os.sep + "HFPN-Stochastic-Version" + os.sep)
# Import HFPN class to work with hybrid functional Petri nets
from stochastic_hfpn import HFPN
# Import initial token, firing conditions and rate functions
from PD_sHFPN_initial_tokens import *
from PD_sHFPN_rate_functions import *
from PD_sHFPN_firing_conditions import *
from PD_sHFPN_inputs import *
from visualisation import Analysis
# from AD_parameters import *
# from AD_initial_tokens import *
# from AD_rate_functions import *
# from AD_firing_conditions import *
# from AD_sHFPN_inputs import *
#Import GUI
import tkinter as tk
from tkinter import ttk
from functools import partial
import glob
from PIL import ImageTk,Image
import webbrowser as webbrowser
from tkinter import font as tkfont
from tkinter import messagebox
#Import Threading
import threading
#Make Windows Taskbar Show as MNG Icon
import ctypes
myappid = 'sHFPN GUI' # arbitrary string
if platform == 'win32':
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
#Important packages for Graph embedding
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
style.use("ggplot")
import matplotlib.pyplot as plt
class sHFPN_GUI_APP:
def __init__(self):
self.root = tk.Tk()
if platform == 'darwin':
img = tk.Image("photo", file="mng.png")
self.root.iconphoto(True, img)
if platform == 'win32':
self.root.iconbitmap(r'mngicon.ico')
self.root.title("sHFPN GUI")
self.root.geometry("800x680")
self.Left_Sidebar()
self.Safe_Exit_Required = False
def Left_Sidebar(self):
self.frame1= tk.Frame(self.root, width=175)
self.frame1.pack(side="left", fill=tk.BOTH)
self.lb = tk.Listbox(self.frame1)
self.lb['bg']= "black"
self.lb['fg']= "lime"
self.lb.pack(side="left", fill=tk.BOTH)
#***Add Different Channels***
self.lb.insert(tk.END,"", "PD Inputs","AD Inputs", "", "PD Transitions", "AD Transitions", "","Run sHFPN", "Rate Analytics", "Live-Plots", "Live-Rate Plots", "","Analysis", "Saved Runs", "Saved CSVs", "", "About")
#*** Make Main Frame that other frames will rest on:
self.frame2= tk.Frame(self.root)
self.frame2.pack(side="left", fill=tk.BOTH, expand=1)
self.frame2.grid_rowconfigure(0, weight=1)
self.frame2.grid_columnconfigure(0, weight=1)
#Preload PD Places and Transitions
self.PD_Places()
self.PD_Continuous_Transitions()
self.PD_Discrete_Transitions()
#Preload AD Places and Transitions
# self.AD_Places()
# self.AD_Continuous_Transitions()
# self.AD_Discrete_Transitions()
#Preload All GUI Pages
self.PD_Inputs_Page()
self.Run_sHFPN_Page()
self.AD_Inputs_Page()
self.PD_Transitions_Page()
# self.AD_Transitions_Page()
self.Live_Rate_analytics_Page()
self.Live_Graph()
self.Live_Graph_Rates()
self.Analysis_page()
self.show_saved_runs()
self.Saved_Csvs_page()
self.About_Page()
#change the selectbackground of "empty" items to black
# self.lb.itemconfig(0, selectbackground="black")
# self.lb.itemconfig(3, selectbackground="black")
# self.lb.itemconfig(7, selectbackground="black")
# self.lb.itemconfig(10, selectbackground="black")
#***Callback Function to execute if items in Left_Sidebars are selected
def callback(event):
selection = event.widget.curselection()
if selection:
index=selection[0] #selection is a tuple, first item of tuple gives index
item_name=event.widget.get(index)
if item_name == "PD Inputs":
self.frame3.tkraise()
if item_name =="AD Inputs":
self.AD_frame3.tkraise()
if item_name == "PD Transitions":
self.PD_frame1.tkraise()
if item_name == "AD Transitions":
self.AD_frame1.tkraise()
if item_name == "Run sHFPN":
self.lb.itemconfig(7,bg="black")
self.frame4.tkraise()
if item_name == "Rate Analytics":
self.frame9.tkraise()
if item_name == "Live-Plots":
self.frame8.tkraise()
self.lb.itemconfig(9,{'bg':'black'})
if item_name == "Live-Rate Plots":
self.frame10.tkraise()
self.lb.itemconfig(10,{'bg':'black'})
if item_name == "Analysis":
self.frame5.tkraise()
if item_name == "Saved Runs":
#Destroy frame to update and remake frame.
self.frame6.destroy()
self.show_saved_runs()
self.frame6.tkraise()
if item_name == "Saved CSVs":
self.frame11.tkraise()
if item_name == "About":
self.frame7.tkraise()
self.lb.bind("<<ListboxSelect>>", callback)
def Live_Graph_Rates(self):
self.frame10=tk.Frame(self.frame2)
self.frame10.grid(row=0, column=0, sticky="nsew")
def Live_Rate_analytics_Page(self):
self.frame9 = tk.Frame(self.frame2)
self.frame9.grid(row=0,column=0, sticky="nsew")
#
self.PD_rate_canvas = tk.Canvas(self.frame9)
self.PD_rate_canvas.pack(side="left", fill=tk.BOTH, expand=1)
self.PD_rate_scrollbar = ttk.Scrollbar(self.frame9, orient=tk.VERTICAL, command=self.PD_rate_canvas.yview)
self.PD_rate_scrollbar.pack(side="left", fill=tk.Y)
self.PD_rate_canvas.configure(yscrollcommand=self.PD_rate_scrollbar.set)
self.PD_rate_canvas.bind('<Configure>', lambda e: self.PD_rate_canvas.configure(scrollregion= self.PD_rate_canvas.bbox("all")))
#Create another frame inside the canvas
self.PD_frame_in_rate_canvas = tk.Frame(self.PD_rate_canvas)
self.PD_rate_canvas.create_window((0,0), window=self.PD_frame_in_rate_canvas, anchor="nw")
#***Select item in Listbox and Display Corresponding output in Right_Output
#self.lb.bind("<<ListboxSelect>>", Lambda x: show)
def AD_Transitions_Page(self):
self.AD_frame1 = tk.Frame(self.frame2)
self.AD_frame1.grid(row=0,column=0,sticky="nsew")
self.AD_trans_canvas = tk.Canvas(self.AD_frame1)
self.AD_trans_canvas.pack(side="left", fill=tk.BOTH, expand=1)
self.AD_trans_scrollbar = ttk.Scrollbar(self.AD_frame1, orient=tk.VERTICAL, command=self.AD_trans_canvas.yview)
self.AD_trans_scrollbar.pack(side="left", fill=tk.Y)
self.AD_trans_canvas.configure(yscrollcommand=self.AD_trans_scrollbar.set)
self.AD_trans_canvas.bind('<Configure>', lambda e: self.AD_trans_canvas.configure(scrollregion= self.AD_trans_canvas.bbox("all")))
#Create another frame inside the canvas
self.AD_frame_in_canvas = tk.Frame(self.AD_trans_canvas)
self.AD_trans_canvas.create_window((0,0), window=self.AD_frame_in_canvas, anchor="nw")
self.AD_transitions_buttons_dict = {}
self.AD_transitions_entry_box_dict = {}
self.AD_transitions_consumption_checkboxes_dict = {}
self.AD_transitions_production_checkboxes_dict = {}
self.AD_transitions_entry_box_Discrete_SD = {}
self.AD_consump_checkbox_variables_dict={}
self.AD_produc_checkbox_variables_dict={}
#Headers
transition_header_button = tk.Button(self.AD_frame_in_canvas, text="Transition", state=tk.DISABLED)
transition_header_button.grid(row=0, column=1)
#SD Header
SD_header_button = tk.Button(self.AD_frame_in_canvas, text="Transition SD", state=tk.DISABLED)
SD_header_button.grid(row=0, column=2)
#DelaySD Header
DelaySD_header_button = tk.Button(self.AD_frame_in_canvas, text="Delay SD", state=tk.DISABLED)
DelaySD_header_button.grid(row=0, column=3)
#Collect Rate Analytics Header
collect_rate_header_button = tk.Button(self.AD_frame_in_canvas, text="Collect Consumption Rate Analytics", state=tk.DISABLED)
collect_rate_header_button.grid(row=0, column=4)
collect_rate_header_button_product = tk.Button(self.AD_frame_in_canvas, text="Collect Production Rate Analytics", state=tk.DISABLED)
collect_rate_header_button_product.grid(row=0, column=5)
for index, transition in enumerate(self.AD_pn.transitions):
#dict keys should be the index
index_str = str(index)
#Grid Transitions
self.AD_transitions_buttons_dict[index_str]=tk.Button(self.AD_frame_in_canvas, text=transition, state=tk.DISABLED)
self.AD_transitions_buttons_dict[index_str].grid(row=index+1, column=1, pady=10,padx=10)
#Transition SD Entry Boxes
self.AD_transitions_entry_box_dict[index_str] = tk.Entry(self.AD_frame_in_canvas, width=5)
self.AD_transitions_entry_box_dict[index_str].grid(row=index+1, column=2, pady=10, padx=10)
default_stochastic_parameter = self.AD_pn.transitions[transition].stochastic_parameters[0] #takes the default stochastic parameter that was preset
self.AD_transitions_entry_box_dict[index_str].insert(tk.END, default_stochastic_parameter)
#Checkboxes Collect Rates Consumption
consump_integer_y_n = self.AD_pn.transitions[transition].collect_rate_analytics[0]
if consump_integer_y_n == "yes":
consump_value = 1
if consump_integer_y_n == "no":
consump_value = 0
self.AD_consump_checkbox_variables_dict[index_str] = tk.IntVar(value=consump_value)
self.AD_transitions_consumption_checkboxes_dict[index_str] = tk.Checkbutton(self.AD_frame_in_canvas, variable=self.AD_consump_checkbox_variables_dict[index_str])
self.AD_transitions_consumption_checkboxes_dict[index_str].grid(row=index+1, column=4,pady=10, padx=10)
#Checkboxes Collect Rates Production
prod_integer_y_n = self.AD_pn.transitions[transition].collect_rate_analytics[1]
if prod_integer_y_n == "yes":
prod_value = 1
if prod_integer_y_n == "no":
prod_value = 0
self.AD_produc_checkbox_variables_dict[index_str] = tk.IntVar(value=prod_value)
self.AD_transitions_production_checkboxes_dict[index_str] = tk.Checkbutton(self.AD_frame_in_canvas, variable=self.AD_produc_checkbox_variables_dict[index_str])
self.AD_transitions_production_checkboxes_dict[index_str].grid(row=index+1, column=5,pady=10, padx=10)
#Collect Rate Analytics Defaul
if self.AD_pn.transitions[transition].DiscreteFlag =="yes":
self.AD_transitions_entry_box_Discrete_SD[index_str] = tk.Entry(self.AD_frame_in_canvas, width=5)
self.AD_transitions_entry_box_Discrete_SD[index_str].grid(row=index+1, column=3, pady=10, padx=10)
default_stochastic_parameter = self.AD_pn.transitions[transition].stochastic_parameters[1] #Takes the Discrete Transition Stochastic Parameter now
self.AD_transitions_entry_box_Discrete_SD[index_str].insert(tk.END, default_stochastic_parameter)
def PD_Transitions_Page(self):
self.PD_frame1 = tk.Frame(self.frame2)
self.PD_frame1.grid(row=0,column=0,sticky="nsew")
self.PD_trans_canvas = tk.Canvas(self.PD_frame1)
self.PD_trans_canvas.pack(side="left", fill=tk.BOTH, expand=1)
self.PD_trans_scrollbar = ttk.Scrollbar(self.PD_frame1, orient=tk.VERTICAL, command=self.PD_trans_canvas.yview)
self.PD_trans_scrollbar.pack(side="left", fill=tk.Y)
self.PD_trans_canvas.configure(yscrollcommand=self.PD_trans_scrollbar.set)
self.PD_trans_canvas.bind('<Configure>', lambda e: self.PD_trans_canvas.configure(scrollregion= self.PD_trans_canvas.bbox("all")))
#Create another frame inside the canvas
self.PD_frame_in_canvas = tk.Frame(self.PD_trans_canvas)
self.PD_trans_canvas.create_window((0,0), window=self.PD_frame_in_canvas, anchor="nw")
self.transitions_buttons_dict = {}
self.transitions_entry_box_dict = {}
self.transitions_consumption_checkboxes_dict = {}
self.transitions_production_checkboxes_dict = {}
self.transitions_entry_box_Discrete_SD = {}
self.consump_checkbox_variables_dict={}
self.produc_checkbox_variables_dict={}
#Headers
transition_header_button = tk.Button(self.PD_frame_in_canvas, text="Transition", state=tk.DISABLED)
transition_header_button.grid(row=0, column=1)
#SD Header
SD_header_button = tk.Button(self.PD_frame_in_canvas, text="Transition SD", state=tk.DISABLED)
SD_header_button.grid(row=0, column=2)
#DelaySD Header
DelaySD_header_button = tk.Button(self.PD_frame_in_canvas, text="Delay SD", state=tk.DISABLED)
DelaySD_header_button.grid(row=0, column=3)
#Collect Rate Analytics Header
collect_rate_header_button = tk.Button(self.PD_frame_in_canvas, text="Collect Consumption Rate Analytics", state=tk.DISABLED)
collect_rate_header_button.grid(row=0, column=4)
collect_rate_header_button_product = tk.Button(self.PD_frame_in_canvas, text="Collect Production Rate Analytics", state=tk.DISABLED)
collect_rate_header_button_product.grid(row=0, column=5)
for index, transition in enumerate(self.PD_pn.transitions):
#dict keys should be the index
index_str = str(index)
#Grid Transitions
self.transitions_buttons_dict[index_str]=tk.Button(self.PD_frame_in_canvas, text=transition, state=tk.DISABLED)
self.transitions_buttons_dict[index_str].grid(row=index+1, column=1, pady=10,padx=10)
#Transition SD Entry Boxes
self.transitions_entry_box_dict[index_str] = tk.Entry(self.PD_frame_in_canvas, width=5)
self.transitions_entry_box_dict[index_str].grid(row=index+1, column=2, pady=10, padx=10)
default_stochastic_parameter = self.PD_pn.transitions[transition].stochastic_parameters[0] #takes the default stochastic parameter that was preset
self.transitions_entry_box_dict[index_str].insert(tk.END, default_stochastic_parameter)
#Checkboxes Collect Rates Consumption
consump_integer_y_n = self.PD_pn.transitions[transition].collect_rate_analytics[0]
if consump_integer_y_n == "yes":
consump_value = 1
if consump_integer_y_n == "no":
consump_value = 0
self.consump_checkbox_variables_dict[index_str] = tk.IntVar(value=consump_value)
self.transitions_consumption_checkboxes_dict[index_str] = tk.Checkbutton(self.PD_frame_in_canvas, variable=self.consump_checkbox_variables_dict[index_str])
self.transitions_consumption_checkboxes_dict[index_str].grid(row=index+1, column=4,pady=10, padx=10)
#Checkboxes Collect Rates Production
prod_integer_y_n = self.PD_pn.transitions[transition].collect_rate_analytics[1]
if prod_integer_y_n == "yes":
prod_value = 1
if prod_integer_y_n == "no":
prod_value = 0
self.produc_checkbox_variables_dict[index_str] = tk.IntVar(value=prod_value)
self.transitions_production_checkboxes_dict[index_str] = tk.Checkbutton(self.PD_frame_in_canvas, variable=self.produc_checkbox_variables_dict[index_str])
self.transitions_production_checkboxes_dict[index_str].grid(row=index+1, column=5,pady=10, padx=10)
#Collect Rate Analytics Defaul
if self.PD_pn.transitions[transition].DiscreteFlag =="yes":
self.transitions_entry_box_Discrete_SD[index_str] = tk.Entry(self.PD_frame_in_canvas, width=5)
self.transitions_entry_box_Discrete_SD[index_str].grid(row=index+1, column=3, pady=10, padx=10)
default_stochastic_parameter = self.PD_pn.transitions[transition].stochastic_parameters[1] #Takes the Discrete Transition Stochastic Parameter now
self.transitions_entry_box_Discrete_SD[index_str].insert(tk.END, default_stochastic_parameter)
def AD_Places(self):
self.AD_pn = HFPN()
### Cholesterol Homeostasis
self.AD_pn.add_place(it_p_ApoEchol_extra,place_id="p_ApoEchol_extra", label="ApoE-cholesterol complex (extracellular)", continuous=True)
# Cholesterol in different organelles
self.AD_pn.add_place(it_p_chol_LE,place_id="p_chol_LE", label="Cholesterol (late endosome)", continuous=True)
self.AD_pn.add_place(it_p_chol_mito,place_id="p_chol_mito", label="Cholesterol (mitochondria)", continuous=True)
self.AD_pn.add_place(it_p_chol_ER,place_id="p_chol_ER", label="Cholesterol (ER)", continuous=True)
self.AD_pn.add_place(it_p_chol_PM,place_id="p_chol_PM", label="Cholesterol (Plasma Membrane)", continuous=True)
# Oxysterols
self.AD_pn.add_place(it_p_24OHchol_extra,place_id="p_24OHchol_extra", label="24-hydroxycholesterol (extracellular)", continuous=True)
self.AD_pn.add_place(it_p_24OHchol_intra,place_id="p_24OHchol_intra", label="24-hydroxycholesterol (intracellular)", continuous=True)
self.AD_pn.add_place(it_p_27OHchol_extra,place_id="p_27OHchol_extra", label="27-hydroxycholesterol (extracellular)", continuous=True)
self.AD_pn.add_place(it_p_27OHchol_intra,place_id="p_27OHchol_intra", label="27-hydroxycholesterol (intracellular)", continuous=True)
self.AD_pn.add_place(it_p_7HOCA,place_id="p_7HOCA", label="7-HOCA", continuous=True)
self.AD_pn.add_place(it_p_preg,place_id="p_preg", label="Pregnenolon", continuous=True)
## Tau Places
self.AD_pn.add_place(it_p_GSK3b_inact, 'p_GSK3b_inact', 'Inactive GSK3 beta kinase', continuous = True)
self.AD_pn.add_place(it_p_GSK3b_act, 'p_GSK3b_act', 'Active GSK3 beta kinase', continuous = True)
self.AD_pn.add_place(it_p_tauP, 'p_tauP', 'Phosphorylated tau', continuous = True)
self.AD_pn.add_place(it_p_tau, 'p_tau', 'Unphosphorylated tau (microtubule)', continuous = True)
## AB Places
self.AD_pn.add_place(it_p_asec, 'p_asec', 'Alpha secretase', continuous = True)
self.AD_pn.add_place(it_p_APP_pm, 'p_APP_pm', 'APP (plasma membrane)', continuous = True) # input
self.AD_pn.add_place(it_p_sAPPa, 'p_sAPPa', 'Soluble APP alpha', continuous = True)
self.AD_pn.add_place(it_p_CTF83, 'p_CTF83', 'CTF83', continuous = True)
self.AD_pn.add_place(it_p_APP_endo, 'p_APP_endo', 'APP (endosome)', continuous = True)
self.AD_pn.add_place(it_p_bsec, 'p_bsec', 'Beta secretase', continuous = True)
self.AD_pn.add_place(it_p_sAPPb, 'p_sAPPb', 'Soluble APP beta', continuous = True)
self.AD_pn.add_place(it_p_CTF99, 'p_CTF99', 'CTF99', continuous = True)
self.AD_pn.add_place(it_p_gsec, 'p_gsec', 'Gamma secretase', continuous = True)
self.AD_pn.add_place(it_p_AICD, 'p_AICD', 'AICD', continuous = True)
self.AD_pn.add_place(it_p_Ab, 'p_Ab', 'Amyloid beta peptide', continuous = True)
self.AD_pn.add_place(it_p_Abconc, 'p_Abconc', 'Amyloid beta peptide concentration', continuous = True)
self.AD_pn.add_place(it_p_ApoE, 'p_ApoE', 'ApoE genotype', continuous = True) # gene, risk factor in AD
self.AD_pn.add_place(it_p_age, 'p_age', 'Age risk factor', continuous = True)
self.AD_pn.add_place(it_p_CD33, 'p_CD33', 'CD33 mutation', continuous = True) # 80 years old, risk factor in AD for BACE1 activity increase
# 80 years old, risk factor in AD for BACE1 activity increase
##AB aggregation places
self.AD_pn.add_place(it_p_Ab_S, place_id="p_Ab_S", label="Nucleated Ab", continuous = True)
self.AD_pn.add_place(it_p_Ab_P, place_id="p_Ab_P", label="Ab oligomer", continuous = True)
self.AD_pn.add_place(it_p_Ab_M, place_id="p_Ab_M", label="Ab fibril (mass)", continuous = True)
# ER retraction and collapse
# Monomeric RTN3 (cycling between axonal and perinuclear regions)
self.AD_pn.add_place(it_p_RTN3_axon, place_id="p_RTN3_axon", label="Monomeric RTN3 (axonal)", continuous=True)
self.AD_pn.add_place(it_p_RTN3_PN, place_id="p_RTN3_PN", label="Monomeric RTN3 (perinuclear)", continuous=True)
# HMW RTN3 (cycling between different cellular compartments)
self.AD_pn.add_place(it_p_RTN3_HMW_cyto, place_id="p_RTN3_HMW_cyto", label="HMW RTN3 (cytosol)", continuous=True)
self.AD_pn.add_place(it_p_RTN3_HMW_auto, place_id="p_RTN3_HMW_auto", label="HMW RTN3 (autophagosome)", continuous=True)
self.AD_pn.add_place(it_p_RTN3_HMW_lyso, place_id="p_RTN3_HMW_lyso", label="HMW RTN3 (degraded in lysosome)", continuous=True)
self.AD_pn.add_place(it_p_RTN3_HMW_dys1, place_id="p_RTN3_HMW_dys1", label="HMW RTN3 (type I/III dystrophic neurites)", continuous=True)
self.AD_pn.add_place(it_p_RTN3_HMW_dys2, place_id="p_RTN3_HMW_dys2", label="HMW RTN3 (type II dystrophic neurites)", continuous=True)
# Energy metabolism: ATP consumption
self.AD_pn.add_place(it_p_ATP, place_id="p_ATP", label="ATP", continuous=True)
self.AD_pn.add_place(it_p_ADP, place_id="p_ADP", label="ADP", continuous=True)
self.AD_pn.add_place(it_p_cas3, place_id="p_cas3", label="Active caspase 3", continuous=True)
self.AD_pn.add_place(it_p_reduc_mito, place_id="p_reduc_mito", label="Reducing agents (mitochondria)", continuous=True)
self.AD_pn.add_place(it_p_ROS_mito, place_id="p_ROS_mito", label="ROS (mitochondria)", continuous=True)
self.AD_pn.add_place(it_p_H2O_mito, place_id="p_H2O_mito", label="H2O (mitochondria)", continuous=True)
##calcium
self.AD_pn.add_place(it_p_Ca_cyto, "p_Ca_cyto", "Calcium (cytosol)", continuous = True)
self.AD_pn.add_place(it_p_Ca_mito, "p_Ca_mito", "Calcium (mitochondria)", continuous = True)
self.AD_pn.add_place(it_p_Ca_ER, "p_Ca_ER", "Calcium (ER)", continuous = True)
# Discrete on/of-switches calcium pacemaking
self.AD_pn.add_place(1, "p_Ca_extra", "on1 - Calcium (extracellular)", continuous = False)
self.AD_pn.add_place(0, "p_on2","on2", continuous = False)
self.AD_pn.add_place(0, "p_on3","on3", continuous = False)
self.AD_pn.add_place(0, "p_on4","on4", continuous = False)
def PD_Places(self):
# Initialize an empty HFPN
self.PD_pn = HFPN()
# # Cholesterol homeostasis
self.PD_pn.add_place(PD_it_p_chol_PM, "p_chol_PM","Chol - perinuclear region", continuous = True)
self.PD_pn.add_place(PD_it_p_chol_LE, "p_chol_LE", "Chol - late endosome", continuous = True)
self.PD_pn.add_place(PD_it_p_chol_ER, "p_chol_ER", "Chol - ER", continuous = True)
self.PD_pn.add_place(PD_it_p_chol_mito, "p_chol_mito", "Chol - mitochondria", continuous = True)
self.PD_pn.add_place(PD_it_p_27OHchol_extra, "p_27OHchol_extra","27-OH chol - extracellular", continuous = True)
self.PD_pn.add_place(PD_it_p_27OHchol_intra, "p_27OHchol_intra","27-OH chol - intracellular", continuous = True)
self.PD_pn.add_place(PD_it_p_ApoEchol_extra, "p_ApoEchol_extra","ApoE - extracellular", continuous = True)
self.PD_pn.add_place(PD_it_p_ApoEchol_EE, "p_ApoEchol_EE","ApoE - Early endosome", continuous = True)
self.PD_pn.add_place(PD_it_p_7HOCA, "p_7HOCA","7-HOCA", continuous = True)
self.PD_pn.add_place(PD_it_p_preg,place_id="p_preg", label="Pregnenolon", continuous=True)
self.PD_pn.add_place(PD_it_p_24OHchol_extra,place_id="p_24OHchol_extra", label="24OHchol extra", continuous=True)
self.PD_pn.add_place(PD_it_p_24OHchol_intra,place_id="p_24OHchol_intra", label="24OHchol intra", continuous=True)
# # PD specific places in cholesterol homeostasis
self.PD_pn.add_place(PD_it_p_GBA1, "p_GBA1","GBA1", continuous = False)
self.PD_pn.add_place(PD_it_p_SNCA_act_extra, "p_SNCA_act_extra","a-synuclein - extracellular", continuous = True)
self.PD_pn.add_place(PD_it_p_SNCAApoEchol_extra, "p_SNCAApoEchol_extra","a-synuclein-ApoE complex - extracellular", continuous = True)
self.PD_pn.add_place(PD_it_p_SNCAApoEchol_intra, "p_SNCAApoEchol_intra","a-synuclein-ApoE complex - intracellular", continuous = True)
# # Energy metabolism
self.PD_pn.add_place(PD_it_p_ROS_mito, "p_ROS_mito", "ROS - mitochondria", continuous = True)
self.PD_pn.add_place(PD_it_p_H2O_mito, "p_H2O_mito", "H2O - mitochondria", continuous = True)
self.PD_pn.add_place(PD_it_p_reducing_agents, "p_reducing_agents", "Reducing agents - mitochondria", continuous = True)
self.PD_pn.add_place(PD_it_p_cas3, "p_cas3","caspase 3 - mitochondria", continuous = True)
self.PD_pn.add_place(PD_it_p_DJ1, "p_DJ1","DJ1 mutant", continuous = True)
# # Calcium homeostasis
self.PD_pn.add_place(PD_it_p_Ca_cyto, "p_Ca_cyto", "Ca - cytosole", continuous = True)
self.PD_pn.add_place(PD_it_p_Ca_mito, "p_Ca_mito","Ca - mitochondria", continuous = True)
self.PD_pn.add_place(PD_it_p_Ca_ER, "p_Ca_ER", "Ca - ER", continuous = True)
self.PD_pn.add_place(PD_it_p_ADP, "p_ADP","ADP - Calcium ER import", continuous = True)
self.PD_pn.add_place(PD_it_p_ATP, "p_ATP","ATP - Calcium ER import", continuous = True)
# # Discrete on/of-switches calcium pacemaking
self.PD_pn.add_place(1, "p_Ca_extra", "on1 - Ca - extracellular", continuous = False)
self.PD_pn.add_place(0, "p_on2","on2", continuous = False)
self.PD_pn.add_place(0, "p_on3","on3", continuous = False)
self.PD_pn.add_place(0, "p_on4","on4", continuous = False)
# Lewy bodies
self.PD_pn.add_place(PD_it_p_SNCA_act, "p_SNCA_act","SNCA - active", continuous = True)
self.PD_pn.add_place(PD_it_p_VPS35, "p_VPS35", "VPS35", continuous = True)
self.PD_pn.add_place(PD_it_p_SNCA_inact, "p_SNCA_inact", "SNCA - inactive", continuous = True)
self.PD_pn.add_place(PD_it_p_SNCA_olig, "p_SNCA_olig", "SNCA - Oligomerised", continuous = True)
self.PD_pn.add_place(PD_it_p_LB, "p_LB", "Lewy body", continuous = True)
self.PD_pn.add_place(PD_it_p_Fe2, "p_Fe2", "Fe2 iron pool", continuous = True)
# Late endosome pathology
self.PD_pn.add_place(PD_it_p_LRRK2_mut, "p_LRRK2_mut","LRRK2 - mutated", continuous = True)
# Monomeric RTN3 (cycling between axonal and perinuclear regions)
self.PD_pn.add_place(PD_it_p_RTN3_axon, place_id="p_RTN3_axon", label="monomeric RTN3 (axonal)", continuous=True)
self.PD_pn.add_place(PD_it_p_RTN3_PN, place_id="p_RTN3_PN", label="monomeric RTN3 (perinuclear)", continuous=True)
# HMW RTN3 (cycling between different cellular compartments)
self.PD_pn.add_place(PD_it_p_RTN3_HMW_cyto, place_id="p_RTN3_HMW_cyto", label="HMW RTN3 (cytosol)", continuous=True)
self.PD_pn.add_place(PD_it_p_RTN3_HMW_auto, place_id="p_RTN3_HMW_auto", label="HMW RTN3 (autophagosome)", continuous=True)
self.PD_pn.add_place(PD_it_p_RTN3_HMW_lyso, place_id="p_RTN3_HMW_lyso", label="HMW RTN3 (degraded in lysosome)", continuous=True)
self.PD_pn.add_place(PD_it_p_RTN3_HMW_dys1, place_id="p_RTN3_HMW_dys1", label="HMW RTN3 (type I/III dystrophic neurites)", continuous=True)
self.PD_pn.add_place(PD_it_p_RTN3_HMW_dys2, place_id="p_RTN3_HMW_dys2", label="HMW RTN3 (type II dystrophic neurites)", continuous=True)
# Two places that are NOT part of this subpathway, but are temporarily added for establishing proper connections
# They will be removed upon merging of subpathways
self.PD_pn.add_place(PD_it_p_tau, place_id="p_tau", label = "Unphosphorylated tau", continuous = True)
self.PD_pn.add_place(PD_it_p_tauP, place_id="p_tauP", label = "Phosphorylated tau", continuous = True)
# Drug places
self.PD_pn.add_place(PD_it_p_NPT200, place_id="p_NPT200", label = "Drug NPT200", continuous = True)
self.PD_pn.add_place(PD_it_p_DNL151, place_id="p_DNL151", label = "Drug DNL151", continuous = True)
self.PD_pn.add_place(PD_it_p_LAMP2A, place_id="p_LAMP2A", label = "Drug LAMP2A", continuous = True)
def PD_Continuous_Transitions(self):
## Define transitions
# Cholesterol Endocytosis
self.PD_pn.add_transition_with_speed_function( #1
transition_id = "t_LDLR_endocyto",
label = "LDLR endocyto",
input_place_ids = ["p_ApoEchol_extra", "p_chol_ER","p_LB"],
firing_condition = PD_fc_t_LDLR_endocyto,
reaction_speed_function = PD_r_t_LDLR_endocyto,
consumption_coefficients = [0,0,0],
output_place_ids = ["p_ApoEchol_EE"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = ["no","no"])
# # Cleavage of cholesteryl esters
self.PD_pn.add_transition_with_speed_function( #2
transition_id = "t_ApoEchol_cleav",
label = "ApoE-chol cleav",
input_place_ids = ["p_ApoEchol_EE"],
firing_condition = PD_fc_t_ApoEchol_cleav,
reaction_speed_function = PD_r_t_ApoEchol_cleav,
consumption_coefficients = [1],
output_place_ids = ["p_chol_LE"],
production_coefficients = [354],
stochastic_parameters = [cholSD],
collect_rate_analytics = PD_collect_rate_analytics)
# Transport Cholesterol from LE to ER
self.PD_pn.add_transition_with_speed_function( #3
transition_id = "t_chol_trans_LE_ER",
label = "Chol transport LE-ER",
input_place_ids = ["p_chol_LE"],
firing_condition = PD_fc_t_chol_trans_LE_ER,
reaction_speed_function = PD_r_t_chol_trans_LE_ER,
consumption_coefficients = [1],
output_place_ids = ["p_chol_ER"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = ["no","no"])
# Transport Cholesterol from LE to mito
self.PD_pn.add_transition_with_speed_function( #4
transition_id = "t_chol_trans_LE_mito",
label = "Chol transport LE-mito",
input_place_ids = ["p_chol_LE"],
firing_condition = PD_fc_t_chol_trans_LE_mito,
reaction_speed_function = PD_r_t_chol_trans_LE_mito,
consumption_coefficients = [1],
output_place_ids = ["p_chol_mito"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = ["no","no"])
# Transport Cholesterol from LE to PM
self.PD_pn.add_transition_with_speed_function( #5
transition_id = "t_chol_trans_LE_PM",
label = "Chol transport LE-PM",
input_place_ids = ["p_chol_LE"],
firing_condition = PD_fc_t_chol_trans_LE_PM,
reaction_speed_function = PD_r_t_chol_trans_LE_PM,
consumption_coefficients = [1],
output_place_ids = ["p_chol_PM"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = ["no","no"])
# Transport Cholesterol from PM to ER
self.PD_pn.add_transition_with_speed_function( #6
transition_id = "t_chol_trans_PM_ER",
label = "Chol transport PM-ER",
input_place_ids = ["p_chol_PM"],
firing_condition = PD_fc_t_chol_trans_PM_ER,
reaction_speed_function = PD_r_t_chol_trans_PM_ER,
consumption_coefficients = [1],
output_place_ids = ["p_chol_ER"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = PD_collect_rate_analytics)
# Transport Cholesterol from ER to PM
self.PD_pn.add_transition_with_speed_function( #7
transition_id = "t_chol_trans_ER_PM",
label = "Chol transport ER-PM",
input_place_ids = ["p_chol_ER"],
firing_condition = PD_fc_t_chol_trans_ER_PM,
reaction_speed_function = PD_r_t_chol_trans_ER_PM,
consumption_coefficients = [1],
output_place_ids = ["p_chol_PM"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = ["no","no"])
# Transport Cholesterol from ER to mito
self.PD_pn.add_transition_with_speed_function( #8
transition_id = "t_chol_trans_ER_mito",
label = "Chol transport ER-mito",
input_place_ids = ["p_chol_ER"],
firing_condition = PD_fc_t_chol_trans_ER_mito,
reaction_speed_function = PD_r_t_chol_trans_ER_mito,
consumption_coefficients = [1],
output_place_ids = ["p_chol_mito"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = ["no","no"])
# Metabolisation of chol by CYP27A1
self.PD_pn.add_transition_with_michaelis_menten( #9
transition_id = "t_CYP27A1_metab",
label = "Chol metab CYP27A1",
Km = Km_t_CYP27A1_metab,
vmax = vmax_t_CYP27A1_metab,
input_place_ids = ["p_chol_mito"],
substrate_id = "p_chol_mito",
consumption_coefficients = [1],
output_place_ids = ["p_27OHchol_intra"],
production_coefficients = [1],
vmax_scaling_function = lambda a : chol_mp,
stochastic_parameters = [cholSD],
collect_rate_analytics = ["no","no"])
# Metabolism of chol by CYP11A1
self.PD_pn.add_transition_with_michaelis_menten( #10
transition_id = "t_CYP11A1_metab",
label = "Chol metab CYP11A1",
Km = Km_t_CYP11A1_metab,
vmax = vmax_t_CYP11A1_metab,
input_place_ids = ["p_chol_mito"],
substrate_id = "p_chol_mito",
consumption_coefficients = [1],
output_place_ids = ["p_preg"],
production_coefficients = [1],
vmax_scaling_function = lambda a : chol_mp,
stochastic_parameters = [cholSD],
collect_rate_analytics = PD_collect_rate_analytics)
# Metabolisation of 27OHchol by CYP7B1
self.PD_pn.add_transition_with_michaelis_menten( #11
transition_id = "t_CYP7B1_metab",
label = "27OHchol metab CYP7B1",
Km = Km_t_CYP7B1_metab,
vmax = vmax_t_CYP7B1_metab,
input_place_ids = ["p_27OHchol_intra"],
substrate_id = "p_27OHchol_intra",
consumption_coefficients = [1],
output_place_ids = ["p_7HOCA"],
production_coefficients = [1],
vmax_scaling_function = lambda a : chol_mp,
stochastic_parameters = [cholSD],
collect_rate_analytics = PD_collect_rate_analytics)
# Endocytosis of 27OHchol
self.PD_pn.add_transition_with_speed_function( #12
transition_id = "t_27OHchol_endocyto",
label = "27OHchol endocyto",
input_place_ids = ["p_27OHchol_extra"],
firing_condition = PD_fc_t_27OHchol_endocyto,
reaction_speed_function = PD_r_t_27OHchol_endocyto,
consumption_coefficients = [1],
output_place_ids = ["p_27OHchol_intra", "p_27OHchol_extra"],
production_coefficients = [1,1],
stochastic_parameters = [cholSD],
collect_rate_analytics = PD_collect_rate_analytics)
# Metabolisation of chol by CYP46A1
self.PD_pn.add_transition_with_michaelis_menten( #13
transition_id = "t_CYP46A1_metab",
label = "Chol metab CYP46A1",
Km = Km_t_CYP46A1_metab,
vmax = vmax_t_CYP46A1_metab,
input_place_ids = ["p_chol_ER"],
substrate_id = "p_chol_ER",
consumption_coefficients = [1],
output_place_ids = ["p_24OHchol_intra"],
production_coefficients = [1],
vmax_scaling_function = lambda a : chol_mp,
stochastic_parameters = [cholSD],
collect_rate_analytics = PD_collect_rate_analytics)
# Exocytosis of 24OHchol
self.PD_pn.add_transition_with_speed_function( #14
transition_id = "t_24OHchol_exocyto",
label = "24OHchol exocyto",
input_place_ids = ["p_24OHchol_intra"],
firing_condition = PD_fc_t_24OHchol_exocyto,
reaction_speed_function = PD_r_t_24OHchol_exocyto,
consumption_coefficients = [1],
output_place_ids = ["p_24OHchol_extra"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = PD_collect_rate_analytics)
# Transport of Chol into ECM
self.PD_pn.add_transition_with_speed_function( #15
transition_id = "t_chol_trans_PM_ECM",
label = "Chol transport PM-ECM",
input_place_ids = ["p_chol_PM", "p_24OHchol_intra"],
firing_condition = PD_fc_t_chol_trans_PM_ECM,
reaction_speed_function = PD_r_t_chol_trans_PM_ECM,
consumption_coefficients = [1,0],
output_place_ids = [],
production_coefficients = [],
stochastic_parameters = [cholSD],
collect_rate_analytics = ["yes", "no"])
# PD specific
#This transition is proven false, it should be removed.
self.PD_pn.add_transition_with_speed_function( #16
transition_id = 't_SNCA_bind_ApoEchol_extra',
label = 'Extracellular binding of SNCA to chol',
input_place_ids = ['p_ApoEchol_extra','p_SNCA_act'],
firing_condition = PD_fc_t_SNCA_bind_ApoEchol_extra,
reaction_speed_function = PD_r_t_SNCA_bind_ApoEchol_extra,
consumption_coefficients = [0,30],
output_place_ids = ['p_SNCA_olig'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = ["no","yes"])
self.PD_pn.add_transition_with_speed_function( #17
transition_id = 't_chol_LE_upreg',
label = 'Upregulation of chol in LE',
input_place_ids = ['p_GBA1'],
firing_condition = PD_fc_t_chol_LE_upreg,
reaction_speed_function = PD_r_t_chol_LE_upreg,
consumption_coefficients = [0], # GBA1 is an enzyme
output_place_ids = ['p_chol_LE'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = PD_collect_rate_analytics)
# # Calcium homeostasis
self.PD_pn.add_transition_with_speed_function( #18
transition_id = 't_Ca_imp',
label = 'L-type Ca channel',
input_place_ids = ['p_Ca_extra'],
firing_condition = PD_fc_t_Ca_imp,
reaction_speed_function = PD_r_t_Ca_imp,
consumption_coefficients = [0], # Need to review this
output_place_ids = ['p_Ca_cyto'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = ["no","yes"]) # Need to review this
self.PD_pn.add_transition_with_speed_function( #19
transition_id = 't_mCU',
label = 'Ca import into mitochondria via mCU',
input_place_ids = ['p_Ca_cyto','p_Ca_mito'],
firing_condition = PD_fc_t_mCU,
reaction_speed_function = PD_r_t_mCU,
consumption_coefficients = [1,0],
output_place_ids = ['p_Ca_mito'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = ["no","yes"])
self.PD_pn.add_transition_with_speed_function( #20
transition_id = 't_MAM',
label = 'Ca transport from ER to mitochondria',
input_place_ids = ['p_Ca_ER','p_Ca_mito'],
firing_condition = PD_fc_t_MAM,
reaction_speed_function = PD_r_t_MAM,
consumption_coefficients = [1,0],
output_place_ids = ['p_Ca_mito'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = ["no","yes"])
self.PD_pn.add_transition_with_speed_function( #21
transition_id = 't_RyR_IP3R',
label = 'Ca export from ER',
input_place_ids = ['p_Ca_extra','p_Ca_ER'],
firing_condition = PD_fc_t_RyR_IP3R,
reaction_speed_function = PD_r_t_RyR_IP3R,
consumption_coefficients = [0,1],
output_place_ids = ['p_Ca_cyto'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = ["no","yes"])
self.PD_pn.add_transition_with_speed_function( #22
transition_id = 't_SERCA',
label = 'Ca import to ER',
input_place_ids = ['p_Ca_cyto','p_ATP'],
firing_condition = PD_fc_t_SERCA,
reaction_speed_function = PD_r_t_SERCA,
consumption_coefficients = [1,1], #!!! Need to review this 0 should be 1
output_place_ids = ['p_Ca_ER','p_ADP'],
production_coefficients = [1,1],
stochastic_parameters = [SD],
collect_rate_analytics = ["no","yes"]) # Need to review this
self.PD_pn.add_transition_with_speed_function( #23
transition_id = 't_NCX_PMCA',
label = 'Ca efflux to extracellular space',
input_place_ids = ['p_Ca_cyto','p_on3'],
firing_condition = lambda a: a['p_on3']==1,
reaction_speed_function = PD_r_t_NCX_PMCA,
consumption_coefficients = [1,0],
output_place_ids = [],
production_coefficients = [],
stochastic_parameters = [SD],
collect_rate_analytics = ["yes","no"])
self.PD_pn.add_transition_with_speed_function( #24
transition_id = 't_mNCLX',
label = 'Ca export from mitochondria via mNCLX',
input_place_ids = ['p_Ca_mito','p_LRRK2_mut'],
firing_condition = PD_fc_t_mNCLX,
reaction_speed_function = PD_r_t_mNCLX,
consumption_coefficients = [1,0],
output_place_ids = ['p_Ca_cyto'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = ["no","yes"])
# # Discrete on/off-switches calcium pacemaking
# Link to energy metabolism in that it needs ATP replenishment
self.PD_pn.add_transition_with_mass_action( #29
transition_id = 't_NaK_ATPase',
label = 'NaK ATPase',
rate_constant = k_t_NaK_ATPase,
input_place_ids = ['p_ATP', 'p_on3'],
firing_condition = lambda a: a['p_on3']==1,
consumption_coefficients = [1,0],
output_place_ids = ['p_ADP'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
# Lewy bodies pathology
self.PD_pn.add_transition_with_speed_function( #30
transition_id = 't_SNCA_degr',
label = 'SNCA degradation by CMA',
input_place_ids = ['p_SNCA_act','p_VPS35','p_LRRK2_mut','p_27OHchol_intra','p_DJ1', 'p_DNL151', 'p_LAMP2A'],
firing_condition = PD_fc_t_SNCA_degr,
reaction_speed_function = PD_r_t_SNCA_degr,
consumption_coefficients = [1,0,0,0,0,0,0],
output_place_ids = ['p_SNCA_inact'],
production_coefficients = [1],
stochastic_parameters = [0],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#31
transition_id = 't_SNCA_aggr',
label = 'SNCA aggregation',
input_place_ids = ['p_SNCA_act','p_Ca_cyto','p_ROS_mito', 'p_tauP', 'p_NPT200'],
firing_condition = PD_fc_t_SNCA_aggr,
reaction_speed_function = PD_r_t_SNCA_aggr,
consumption_coefficients = [30,0,0,0,0], #should be reviewed if Ca is consumed
output_place_ids = ['p_SNCA_olig'],
production_coefficients = [1],
stochastic_parameters = [0],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#32
transition_id = 't_SNCA_fibril',
label = 'SNCA fibrillation',
input_place_ids = ['p_SNCA_olig'],
firing_condition = PD_fc_t_SNCA_fibril,
reaction_speed_function = PD_r_t_SNCA_fibril,
consumption_coefficients = [100],
output_place_ids = ['p_LB'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#33
transition_id = 't_IRE',
label = 'IRE',
input_place_ids = ['p_Fe2'],
firing_condition = PD_fc_t_IRE,
reaction_speed_function = PD_r_t_IRE,
consumption_coefficients = [0],
output_place_ids = ['p_SNCA_act'],
production_coefficients = [1],
stochastic_parameters = [0],
collect_rate_analytics = dont_collect)
# Energy metabolism
self.PD_pn.add_transition_with_speed_function(#34
transition_id = 't_ATP_hydro_mito',
label = 'ATP hydrolysis in mitochondria',
input_place_ids = ['p_ATP'],
firing_condition = PD_fc_t_ATP_hydro_mito,
reaction_speed_function = PD_r_t_ATP_hydro_mito,
consumption_coefficients = [1],
output_place_ids = ['p_ADP'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#35
transition_id = 't_ROS_metab',
label = 'ROS neutralisation',
input_place_ids = ['p_ROS_mito','p_chol_mito','p_LB','p_DJ1'],
firing_condition = PD_fc_t_ROS_metab,
reaction_speed_function = PD_r_t_ROS_metab,
consumption_coefficients = [1,0,0,0],
output_place_ids = ['p_H2O_mito'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
# #Link of krebs to calcium homeostasis
self.PD_pn.add_transition_with_speed_function(#36
transition_id = 't_krebs',
label = 'Krebs cycle',
input_place_ids = ['p_ADP','p_Ca_mito'],
firing_condition = PD_fc_t_krebs,
reaction_speed_function = PD_r_t_krebs,
consumption_coefficients = [1,0], # Need to review this
output_place_ids = ['p_reducing_agents','p_ATP'],
production_coefficients = [4,1],
stochastic_parameters = [SD],
collect_rate_analytics = PD_collect_rate_analytics)
#Link of ETC to calcium and cholesterol
self.PD_pn.add_transition_with_speed_function(#37
transition_id = 't_ETC',
label = 'Electron transport chain',
input_place_ids = ['p_reducing_agents', 'p_ADP', 'p_Ca_mito', 'p_chol_mito','p_ROS_mito','p_LRRK2_mut'],
firing_condition = PD_fc_t_ETC,
reaction_speed_function = PD_r_t_ETC,
consumption_coefficients = [22/3,22,0,0,0,0], # Need to review this
output_place_ids = ['p_ATP', 'p_ROS_mito'],
production_coefficients = [22,0.005],
stochastic_parameters = [SD],
collect_rate_analytics = PD_collect_rate_analytics)
# # Output transitions: Cas3 for apoptosis
self.PD_pn.add_transition_with_speed_function(#38
transition_id = 't_mito_dysfunc',
label = 'Mitochondrial complex 1 dysfunction',
input_place_ids = ['p_ROS_mito'],
firing_condition = PD_fc_t_mito_dysfunc,
reaction_speed_function = PD_r_t_mito_dysfunc,
consumption_coefficients = [1],
output_place_ids = ['p_cas3'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = PD_collect_rate_analytics)
self.PD_pn.add_transition_with_speed_function(#39
transition_id = 't_cas3_inact',
label = 'Caspase 3 degredation',
input_place_ids = ['p_cas3'],
firing_condition = PD_fc_t_cas3_inact,
reaction_speed_function = PD_r_t_cas3_inact,
consumption_coefficients = [1], # Need to review this
output_place_ids = [],
production_coefficients = [],
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
# Late endosome pathology
self.PD_pn.add_transition_with_michaelis_menten(#40
transition_id = 't_phos_tau',
label = 'Phosphorylation of tau',
Km = Km_t_phos_tau,
vmax = kcat_t_phos_tau,
input_place_ids = ['p_tau', 'p_SNCA_act'],
substrate_id = 'p_tau',
consumption_coefficients = [1, 0],
output_place_ids = ['p_tauP'],
production_coefficients = [1],
vmax_scaling_function = PD_vmax_scaling_t_phos_tau,
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_michaelis_menten(#41
transition_id = 't_dephos_tauP',
label = 'Dephosphorylation of tau protein',
Km = Km_t_dephos_tauP,
vmax = vmax_t_dephos_tauP,
input_place_ids = ['p_tauP', 'p_Ca_cyto'],
substrate_id = 'p_tauP',
consumption_coefficients = [1, 0],
output_place_ids = ['p_tau'],
production_coefficients = [1],
vmax_scaling_function = PD_vmax_scaling_t_dephos_tauP,
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#42
transition_id = 't_RTN3_exp',
label = 'Expression rate of RTN3',
input_place_ids = [],
firing_condition = PD_fc_t_RTN3_exp,
reaction_speed_function = PD_r_t_RTN3_exp,
consumption_coefficients = [],
output_place_ids = ['p_RTN3_PN'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#43
transition_id = 't_LE_retro',
label = 'retrograde transport of LEs & ER',
input_place_ids = ['p_ATP','p_chol_LE','p_RTN3_axon', 'p_tau','p_LRRK2_mut','p_LB'],
firing_condition = PD_fc_t_LE_retro,
reaction_speed_function = PD_r_t_LE_retro,
consumption_coefficients = [ATPcons_t_LE_trans, 0, 1, 0,0,0],
output_place_ids = ['p_ADP','p_RTN3_PN'],
production_coefficients = [ATPcons_t_LE_trans, 1],
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#44
transition_id = 't_LE_antero',
label = 'anterograde transport of LEs & ER',
input_place_ids = ['p_ATP','p_RTN3_PN', 'p_tau'], # didn't connect p_tau yet
firing_condition = PD_fc_t_LE_antero,
reaction_speed_function = PD_r_t_LE_antero, # get later from NPCD
consumption_coefficients = [ATPcons_t_LE_trans, 1, 0], # tune these coefficients based on PD
output_place_ids = ['p_ADP','p_RTN3_axon'],
production_coefficients = [ATPcons_t_LE_trans, 1],# tune these coefficients based on PD
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#45
transition_id = 't_RTN3_aggregation',
label = 'aggregation of monomeric RTN3 into HMW RTN3',
input_place_ids = ['p_RTN3_axon', 'p_RTN3_PN'],
firing_condition = PD_fc_t_RTN3_aggregation, # tune aggregation limit later
reaction_speed_function = PD_r_t_RTN3_aggregation,
consumption_coefficients = [1, 1],
output_place_ids = ['p_RTN3_HMW_cyto'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#46
transition_id = 't_RTN3_auto',
label = 'functional autophagy of HMW RTN3',
input_place_ids = ['p_RTN3_HMW_cyto', 'p_RTN3_axon'],
firing_condition = PD_fc_t_RTN3_auto,
reaction_speed_function = PD_r_t_RTN3_auto,
consumption_coefficients = [1, 0],
output_place_ids = ['p_RTN3_HMW_auto'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#47
transition_id = 't_RTN3_lyso',
label = 'functional delivery of HMW RTN3 to the lysosome',
input_place_ids = ['p_RTN3_HMW_auto', 'p_tau'],
firing_condition = PD_fc_t_RTN3_lyso,
reaction_speed_function = PD_r_t_RTN3_lyso,
consumption_coefficients = [1, 0],
output_place_ids = ['p_RTN3_HMW_lyso'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#48
transition_id = 't_RTN3_dys_auto',
label = 'dysfunctional autophagy of HMW RTN3',
input_place_ids = ['p_RTN3_HMW_cyto', 'p_RTN3_axon'],
firing_condition = PD_fc_t_RTN3_dys_auto,
reaction_speed_function = PD_r_t_RTN3_dys_auto,
consumption_coefficients = [1, 0],
output_place_ids = ['p_RTN3_HMW_dys1'],
production_coefficients = [1],# tune later when data are incorporated
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
self.PD_pn.add_transition_with_speed_function(#49
transition_id = 't_RTN3_dys_lyso',
label = 'dysfunctional delivery of HMW RTN3 to the lysosome',
input_place_ids = ['p_RTN3_HMW_auto', 'p_RTN3_HMW_dys1', 'p_tau'],
firing_condition = PD_fc_t_RTN3_dys_lyso,
reaction_speed_function = PD_r_t_RTN3_dys_lyso,
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_RTN3_HMW_dys2'],
production_coefficients = [1],# tune later when data are incorporated
stochastic_parameters = [SD],
collect_rate_analytics = dont_collect)
def PD_Discrete_Transitions(self):
self.PD_pn.add_transition_with_speed_function( #25
transition_id = 't_A',
label = 'A',
input_place_ids = ['p_on4'],
firing_condition = lambda a: a['p_on4']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_Ca_extra'],
production_coefficients = [1],
stochastic_parameters = [CaSD,DelaySD],
collect_rate_analytics = ["no","no"],
delay=0.5)
self.PD_pn.add_transition_with_speed_function( #26
transition_id = 't_B',
label = 'B',
input_place_ids = ['p_Ca_extra'],
firing_condition = lambda a: a['p_Ca_extra']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_on2'],
production_coefficients = [1],
stochastic_parameters = [CaSD,DelaySD],
collect_rate_analytics = ["no","no"],
delay=0.5)
self.PD_pn.add_transition_with_speed_function( #27
transition_id = 't_C',
label = 'C',
input_place_ids = ['p_on2'],
firing_condition = lambda a: a['p_on2']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_on3'],
production_coefficients = [1],
stochastic_parameters = [CaSD,0],
collect_rate_analytics = ["no","no"],
delay=0)
self.PD_pn.add_transition_with_speed_function( #28
transition_id = 't_D',
label = 'D',
input_place_ids = ['p_on3'],
firing_condition = lambda a: a['p_on3']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_on4'],
production_coefficients = [1],
stochastic_parameters = [CaSD,DelaySD],
collect_rate_analytics = ["no","no"],
delay=0.5)
self.PD_pn.add_transition_with_speed_function(#50
transition_id = 't_MDV_Generation_basal',
label = "Mitochondrially Derived Vesicles production",
input_place_ids = ['p_chol_mito', 'p_ROS_mito'],
firing_condition = lambda a: a['p_chol_mito']>3.71e9,
reaction_speed_function = lambda a: 0.0011088*a['p_chol_mito']*(1/3500)*1, #This is a fraction of a single mitochondrion, and must divide by no. of mitochondria (1/3500)
consumption_coefficients =[0,0], #[1,0], turn on
output_place_ids = ['p_chol_LE'],
production_coefficients = [0],#[1], turn off
stochastic_parameters = [SD, cholSD],
collect_rate_analytics = ["no","no"],
delay = function_for_MDV_delay) #WARNING, DELAY IS CURRENTLY WRONG AND TOO SLOW, CHANGE TO 1/cholmp IN RATE FUNCTIONS
self.PD_pn.add_transition_with_speed_function(#51
transition_id = 't_Mitophagy',
label = "Mitochondria Cholesterol Transfer to Lysosomes",
input_place_ids = ['p_chol_mito'],
firing_condition = lambda a: a['p_chol_mito']>3.71e9,#Only fires if its higher than equilibrium value
reaction_speed_function = lambda a: (1/3500)*a['p_chol_mito']*1,
consumption_coefficients =[0],
output_place_ids = ['p_chol_LE'],
production_coefficients = [0],
stochastic_parameters = [SD, cholSD],
collect_rate_analytics = ["no","no"],
delay = 310/chol_mp) #1 Mitophagy event every 310 seconds, (Arias-Fuenzalida et al., 2019). However, mitophagy increases due to other events, and this needs to be modelled in the future.
def AD_parameters(self):
# multiplicative rate factors for increasing rates of slow modules
self.AD_Abeta_multiplier = 100
tau_multiplier = 10
chol_multiplier = 300
ER_multiplier = 10
# SD = 0.1
# SDCalcium = 0.1
neurone_cell_volume = 9008e-15 # L
avagadros_constant = 6.022e23 # mol-1
# Cholesterol homeostasis
# downregulation via chol in ER, linear approximation y = m*x+n
m_t_LDLR_endocyto = - 1.0682
n_t_LDLR_endocyto = 2.0682
fr_t_CYP46A1_metab = 0.08 # CYP46A1-accessible portion of ER cholesterol (to scale Km)
Km_t_CYP46A1_metab = 5.70 * 10 ** 6 / fr_t_CYP46A1_metab
vmax_t_CYP46A1_metab = 3.46 * 10 ** 3
st_t_CYP27A1_metab = 0.13158 # CYP27A1-accessible portion of mitochondrial cholesterol (to scale Km)
Km_t_CYP27A1_metab = 4.77 * 10 ** 7 / st_t_CYP27A1_metab
vmax_t_CYP27A1_metab = 2.56 * 10 ** 3
Km_t_CYP7B1_metab = 2.02 * 10 ** 7
vmax_t_CYP7B1_metab = 4.32 * 10 ** 3
st_t_CYP11A1_metab = 0.13158 # CYP11A1-accessible portion of mitochondrial cholesterol (to scale Km)
Km_t_CYP11A1_metab = 7.59 * 10 ** 7 / st_t_CYP11A1_metab # CHANGED BASED ON SOURCE 2 DATA TO SEE IF IT'S BETTER
vmax_t_CYP11A1_metab = 6.35 * 10 ** 4
Km_t_ApoEchol_cleav = 1.39 * 10 ** 7
vmax_t_ApoEchol_cleav = 1.86 * 10 ** 5
Km_t_LDLR_endocyto = 1.30 * 10 ** 6
vmax_t_LDLR_endocyto = 3.61633 * 10 ** 4
k_t_EE_mat = 0.000924196 # s^-1
k_t_chol_trans_LE_ER = 2.55357 * 10 ** (-4) # s^-1
k_t_chol_trans_LE_mito = 2.36 * 10 ** (-6) # s^-1
k_t_chol_trans_LE_PM = 0.002406761 # s^-1
k_t_chol_trans_ER_PM = 1.725 * 10 ** (-3) # s^-1
k_t_chol_trans_PM_ER = 1.56 * 10 ** (-6) # s^-1
k_t_chol_trans_ER_mito = 1.1713 * 10 ** (-4) # s^-1
k_t_27OHchol_endocyto = 2.65627 * 10 ** 2 # constant rate molecules/second, vary to represent different dietary cholesterol intakes
k_t_chol_trans_PM_ECM = 8.2859 * 10 ** (-5) # s^-1
# upregulation via 24-OHC, linear approximation y = m*x+n
m_t_chol_trans_PM_ECM = 0.2356
n_t_chol_trans_PM_ECM = 0.7644
k_t_24OHchol_exocyto = 7.47488 * 10 ** (-6) # s^-1
disease_multiplier_27OH = 1 # set to true
# ER Retraction & Collapse
beta_t_LE_retro = 1.667 #conversion factor of rate of retrograde transport to have it equal to anterograde transport in healthy cells
dist_t_LE_trans = 75e4 #distance in nm from perinuclear region to axon
mchol_t_LE_retro = 2.27e-9 # scaling effect of cholesterol on retro transport
nchol_t_LE_retro = 1 - mchol_t_LE_retro * it_p_chol_LE # scaling effect of cholesterol on retro transport
vmax_t_LE_retro = 892 #Vmax in nm/s
Km_t_LE_retro = 3510864000 #K_M in particles of ATP
vmax_t_LE_antero = 814 #Vmax in nm/s
Km_t_LE_antero = 614040000 #K_M in particles of ATP
ATPcons_t_LE_trans = 0 # dist_t_LE_trans / 8 # each step of the motor consumes 1 ATP & travels 8 nm; total ATP consumed = number of steps
k_t_RTN3_exp = 113.3
Ab_t_RTN3_aggregation = 641020
dec_t_RTN3_aggregation = 0.762
k_t_RTN3_auto = 0.011111111
k_t_RTN3_lyso = 0.000826667
mitprop_t_RTN3_dys_auto = 0.885
# Abeta Pathology
k_t_asec_exp = 96.8
mchol_t_asec_exp = 7.19184e-9
nchol_t_asec_exp = -1.86
k_t_asec_degr = 1.60e-5
k_t_APP_endocyto = 9.67e-5
dis_t_APP_endocyto = 0.0832033 # Compatible with the ApoE4 0/1 input representing 0 alleles & 2 alleles
k_t_APP_exp = 45000
dis_t_APP_exp = 0.25 # representing Apoe4 contribution to parameter change
m_t_APP_exp = 0.5/(693.444*it_p_ROS_mito)
n_t_APP_exp = 1 - it_p_ROS_mito * m_t_APP_exp
k_t_APP_endo_event = .0001435
k_t_bsec_exp = 11.138
mchol_t_bsec_exp = 1.52842e-8
nchol_t_bsec_exp = 0.532332
nRTN_t_bsec_exp = 1.78571
mRTN_t_bsec_exp = -(nRTN_t_bsec_exp-1)/it_p_RTN3_axon
mROS_t_bsec_exp = .5/it_p_ROS_mito
nROS_t_bsec_exp = 0.5
k_t_bsec_degr = 1.655e-5
mchol_t_APP_bsec_cleav = 8.13035e-12
nchol_t_APP_bsec_cleav = 0.312985106
age_t_APP_bsec_cleav = 0.44
k_t_gsec_exp = 53.92
k_t_gsec_degr = 1.6e-5 # assume same as asec and bsec for now - may update later
k_t_Ab_degr = 0.00188
Km_t_APP_asec_cleav = 19034084
kcat_t_APP_asec_cleav = 0.0474783
Km_t_APP_bsec_cleav = 37972323
kcat_t_APP_bsec_cleav = 0.002
Km_t_CTF99_gsec_cleav = 169223
kcat_t_CTF99_gsec_cleav = 0.00167
Km_t_Ab_elon = 17343360
Vmax_t_Ab_elon = 1.108
# Tau Pathology
k_t_actv_GSK3b = 8.33e-3
m_t_act_GSK3b = 4.07e-7 # TODO: tune this, increase m to increase effect
n_t_act_GSK3b = 1 - m_t_act_GSK3b * it_p_Ab
dis_t_act_GSK3b = 0.433
k_t_inactv_GSK3b = 7.95e-3
Km_t_phos_tau = 9.22e7
kcat_t_phos_tau = 0.146464095
Km_t_dephos_tauP = 6.29e7
vmax_t_dephos_tauP = 1.17*1.1e6 # uM/min/ 20 units per mL PP-2A, TODO: conevert unit
k_t_p_GSK3b_deg = 100*1.6e-5 # (standard protein degradation rate)
k_t_p_GSK3b_exp = k_t_p_GSK3b_deg * it_p_GSK3b_inact
# Calcium Homeostasis
k_t_NCX_PMCA = 10 #multiplied by 10 compared to Gabi's paper (Gabriel, 2020)
k_t_NaK_ATPase= 0.70
k_t_mCU1=(1*1e6)/(17854326) #rate mCU /average Ca_cyto in homeostasis
k_t_mCU2=(5000)/(17854326) #rate mCU /average Ca_cyto in homeostasis
#k_t_mNCLX=(5000)/(3.6*1e7) #rate mCU /average Ca_cyto in homeostasis
k_t_mNCLX=0.066666667
k_t_MAM=1e6/1.8e9 #rate MAM
k_t_SERCA_no_ATP=0.05638 #(1e6+100)/17854326#0.05638 #100/1785#4#3#2#6#/(5.407*1e9)
k_t_SERCA_ATP=k_t_SERCA_no_ATP/5.42e9 #rate mCU /average ATP in homeostasis
k_t_RyR_IP3R = 100/(1.8*1e9) #rate mCU /average Ca_ER in homeostasis
# Energy metabolism
k_t_krebs = (1.63*10**(-7))*2968656.262/3e7
k_t_ATP_hydro_mito = 1.92*10**(-2)
k_t_ETC = 2.48*10**(-5)*2968656.262/3e7
m_t_ETC_inhib_Ab = -1.6438e-6 # -7.5786*10**(-7)
n_t_ETC_inhib_Ab = 1.0559681024 #1 - m_t_ETC_inhib_Ab * it_p_Ab
k_t_ROS_metab = 5.875*10**10
k_t_mito_dysfunc = 1.0495e2 # s^-1 For time step of 0.01 s, change to 1.037984e2
m_t_mito_dysfunc = 3.1855e-5
n_t_mito_dysfunc = 0.61
m_t_mito_dysfunc_Ab = 1.27 * 10 ** (-7)
n_t_mito_dysfunc_Ab = 0.9957
k_t_cas3_inact = 7.96721 * 10 ** (-3) # s^-1
k_t_ROS_gener_Ab = 8.4e-1 # s^-1 maximum is 7e3
def AD_Continuous_Transitions(self):
## Transitions
# Cholesterol Endocytosis
self.AD_pn.add_transition_with_speed_function(
transition_id = "t_LDLR_endocyto",
label = "LDLR endocyto",
input_place_ids = ["p_ApoEchol_extra", "p_chol_ER"],
firing_condition = fc_t_LDLR_endocyto,
reaction_speed_function = r_t_LDLR_endocyto,
consumption_coefficients = [0,0],
output_place_ids = ["p_chol_LE"],
production_coefficients = [354],
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Transport Cholesterol from LE to ER
self.AD_pn.add_transition_with_speed_function(
transition_id = "t_chol_trans_LE_ER",
label = "Chol transport LE-ER",
input_place_ids = ["p_chol_LE"],
firing_condition = fc_t_chol_trans_LE_ER,
reaction_speed_function = r_t_chol_trans_LE_ER,
consumption_coefficients = [1],
output_place_ids = ["p_chol_ER"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# # Transport Cholesterol from LE to mito
self.AD_pn.add_transition_with_speed_function(
transition_id = "t_chol_trans_LE_mito",
label = "Chol transport LE-mito",
input_place_ids = ["p_chol_LE"],
firing_condition = fc_t_chol_trans_LE_mito,
reaction_speed_function = r_t_chol_trans_LE_mito,
consumption_coefficients = [1],
output_place_ids = ["p_chol_mito"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Transport Cholesterol from LE to PM
self.AD_pn.add_transition_with_speed_function(
transition_id = "t_chol_trans_LE_PM",
label = "Chol transport LE-PM",
input_place_ids = ["p_chol_LE"],
firing_condition = fc_t_chol_trans_LE_PM,
reaction_speed_function = r_t_chol_trans_LE_PM,
consumption_coefficients = [1],
output_place_ids = ["p_chol_PM"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Transport Cholesterol from PM to ER
self.AD_pn.add_transition_with_speed_function(
transition_id = "t_chol_trans_PM_ER",
label = "Chol transport PM-ER",
input_place_ids = ["p_chol_PM"],
firing_condition = fc_t_chol_trans_PM_ER,
reaction_speed_function = r_t_chol_trans_PM_ER,
consumption_coefficients = [1],
output_place_ids = ["p_chol_ER"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Transport Cholesterol from ER to PM
self.AD_pn.add_transition_with_speed_function(
transition_id = "t_chol_trans_ER_PM",
label = "Chol transport ER-PM",
input_place_ids = ["p_chol_ER"],
firing_condition = fc_t_chol_trans_ER_PM,
reaction_speed_function = r_t_chol_trans_ER_PM,
consumption_coefficients = [1],
output_place_ids = ["p_chol_PM"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Transport Cholesterol from ER to mito
self.AD_pn.add_transition_with_speed_function(
transition_id = "t_chol_trans_ER_mito",
label = "Chol transport ER-mito",
input_place_ids = ["p_chol_ER"],
firing_condition = fc_t_chol_trans_ER_mito,
reaction_speed_function = r_t_chol_trans_ER_mito,
consumption_coefficients = [1],
output_place_ids = ["p_chol_mito"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Metabolisation of chol by CYP27A1
self.AD_pn.add_transition_with_michaelis_menten(
transition_id = "t_CYP27A1_metab",
label = "Chol metab CYP27A1",
Km = Km_t_CYP27A1_metab,
vmax = vmax_t_CYP27A1_metab,
input_place_ids = ["p_chol_mito"],
substrate_id = "p_chol_mito",
consumption_coefficients = [1],
output_place_ids = ["p_27OHchol_intra"],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_CYP27A1_metab,
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Metabolism of chol by CYP11A1
self.AD_pn.add_transition_with_michaelis_menten(
transition_id = "t_CYP11A1_metab",
label = "Chol metab CYP11A1",
Km = Km_t_CYP11A1_metab,
vmax = vmax_t_CYP11A1_metab,
input_place_ids = ["p_chol_mito"],
substrate_id = "p_chol_mito",
consumption_coefficients = [1],
output_place_ids = ["p_preg"],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_CYP11A1_metab,
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Metabolisation of 27OHchol by CYP7B1
self.AD_pn.add_transition_with_michaelis_menten(
transition_id = "t_CYP7B1_metab",
label = "27OHchol metab CYP7B1",
Km = Km_t_CYP7B1_metab,
vmax = vmax_t_CYP7B1_metab,
input_place_ids = ["p_27OHchol_intra"],
substrate_id = "p_27OHchol_intra",
consumption_coefficients = [1],
output_place_ids = ["p_7HOCA"],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_CYP7B1_metab,
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Endocytosis of 27OHchol
self.AD_pn.add_transition_with_speed_function(
transition_id = "t_27OHchol_endocyto",
label = "27OHchol endocyto",
input_place_ids = ["p_27OHchol_extra"],
firing_condition = fc_t_27OHchol_endocyto,
reaction_speed_function = r_t_27OHchol_endocyto,
consumption_coefficients = [1],
output_place_ids = ["p_27OHchol_intra", "p_27OHchol_extra"],
production_coefficients = [1,1],
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Metabolisation of chol by CYP46A1
self.AD_pn.add_transition_with_michaelis_menten(
transition_id = "t_CYP46A1_metab",
label = "Chol metab CYP46A1",
Km = Km_t_CYP46A1_metab,
vmax = vmax_t_CYP46A1_metab,
input_place_ids = ["p_chol_ER"],
substrate_id = "p_chol_ER",
consumption_coefficients = [1],
output_place_ids = ["p_24OHchol_intra"],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_CYP46A1_metab,
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Exocytosis of 24OHchol
self.AD_pn.add_transition_with_speed_function(
transition_id = "t_24OHchol_exocyto",
label = "24OHchol exocyto",
input_place_ids = ["p_24OHchol_intra"],
firing_condition = fc_t_24OHchol_exocyto,
reaction_speed_function = r_t_24OHchol_exocyto,
consumption_coefficients = [1],
output_place_ids = ["p_24OHchol_extra"],
production_coefficients = [1],
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
# Transport of Chol into ECM
self.AD_pn.add_transition_with_speed_function(
transition_id = "t_chol_trans_PM_ECM",
label = "Chol transport PM-ECM",
input_place_ids = ["p_chol_PM", "p_24OHchol_intra"],
firing_condition = fc_t_chol_trans_PM_ECM,
reaction_speed_function = r_t_chol_trans_PM_ECM,
consumption_coefficients = [1,0],
output_place_ids = [],
production_coefficients = [],
stochastic_parameters = [cholSD],
collect_rate_analytics = collect_rate_analytics)
#tau
## Transitions
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_GSK3b_exp_deg',
label = 'GSK3beta expression and degradation',
input_place_ids = ['p_GSK3b_inact'],
firing_condition = fc_t_GSK3b_exp_deg,
reaction_speed_function = r_t_GSK3b_exp_deg,
consumption_coefficients = [0],
output_place_ids = ['p_GSK3b_inact'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_actv_GSK3b',
label = 'GSK3beta activation',
input_place_ids = ['p_GSK3b_inact', 'p_ApoE', 'p_Ab'],
firing_condition = fc_t_actv_GSK3b,
reaction_speed_function = r_t_actv_GSK3b,
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_GSK3b_act'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_inactv_GSK3b',
label = 'GSK3beta inactivation',
input_place_ids = ['p_GSK3b_act'],
firing_condition = fc_t_inactv_GSK3b,
reaction_speed_function = r_t_inactv_GSK3b,
consumption_coefficients = [1],
output_place_ids = ['p_GSK3b_inact'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_michaelis_menten(
transition_id = 't_phos_tau',
label = 'Phosphorylation of tau',
Km = Km_t_phos_tau,
vmax = kcat_t_phos_tau,
input_place_ids = ['p_tau', 'p_GSK3b_act', 'p_cas3'],
substrate_id = 'p_tau',
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_tauP'],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_phos_tau,
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_michaelis_menten(
transition_id = 't_dephos_tauP',
label = 'Dephosphorylation of tau protein',
Km = Km_t_dephos_tauP,
vmax = vmax_t_dephos_tauP,
input_place_ids = ['p_tauP', 'p_Ca_cyto'],
substrate_id = 'p_tauP',
consumption_coefficients = [1, 0],
output_place_ids = ['p_tau'],
production_coefficients = [1],
vmax_scaling_function = vmax_scaling_t_dephos_tauP,
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
## AB Transitions
self.AD_pn.add_transition_with_michaelis_menten(
transition_id = 't_APP_asec_cleav',
label = 'Alpha cleavage of APP',
Km = Km_t_APP_asec_cleav,
vmax = kcat_t_APP_asec_cleav,
input_place_ids = ['p_APP_pm', 'p_asec', 'p_chol_PM'],
substrate_id = 'p_APP_pm',
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_sAPPa', 'p_CTF83'],
production_coefficients = [1, 1],
vmax_scaling_function = vmax_scaling_t_APP_asec_cleav,
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_asec_exp',
label = 'Alpha secretase expression',
input_place_ids = ['p_24OHchol_intra'],
firing_condition = fc_t_asec_exp,
reaction_speed_function = r_t_asec_exp,
consumption_coefficients = [0],
output_place_ids = ['p_asec'], # none
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_asec_degr',
label = 'Alpha secretase degradation',
input_place_ids = ['p_asec'],
firing_condition = fc_t_asec_degr,
reaction_speed_function = r_t_asec_degr,
consumption_coefficients = [1],
output_place_ids = [], # none
production_coefficients = [],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)# none
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_APP_exp',
label = 'APP expression rate',
input_place_ids = ['p_ApoE', 'p_ROS_mito'],
firing_condition = fc_t_APP_exp,
reaction_speed_function = r_t_APP_exp,
consumption_coefficients = [0, 0],
output_place_ids = ['p_APP_pm'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_APP_endocyto',
label = 'endocytosis',
input_place_ids = ['p_APP_pm', 'p_ApoE'],
firing_condition = fc_t_APP_endocyto,
reaction_speed_function = r_t_APP_endocyto,
consumption_coefficients = [1, 0],
output_place_ids = ['p_APP_endo'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_APP_endo_event',
label = 'APP-utilizing cellular events',
input_place_ids = ['p_APP_endo'],
firing_condition = fc_t_APP_endo_event,
reaction_speed_function = r_t_APP_endo_event,
consumption_coefficients = [1],
output_place_ids = [],
production_coefficients = [],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_michaelis_menten(
transition_id = 't_APP_bsec_cleav',
label = 'Beta cleavage of APP',
Km = Km_t_APP_bsec_cleav,
vmax = kcat_t_APP_bsec_cleav,
input_place_ids = ['p_APP_endo', 'p_bsec', 'p_chol_PM', 'p_age'],
substrate_id = 'p_APP_endo',
consumption_coefficients = [1, 0, 0, 0],
output_place_ids = ['p_sAPPb', 'p_CTF99'],
production_coefficients = [1, 1],
vmax_scaling_function = vmax_scaling_t_APP_bsec_cleav,
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_bsec_exp',
label = 'Beta secretase expression',
input_place_ids = ['p_ROS_mito', 'p_27OHchol_intra', 'p_RTN3_axon'],
firing_condition = fc_t_bsec_exp,
reaction_speed_function = r_t_bsec_exp,
consumption_coefficients = [0, 0, 0],
output_place_ids = ['p_bsec'], # none
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)# none
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_bsec_degr',
label = 'Beta secretase degradation',
input_place_ids = ['p_bsec'],
firing_condition = fc_t_bsec_degr,
reaction_speed_function = r_t_bsec_degr,
consumption_coefficients = [1],
output_place_ids = [], # none
production_coefficients = [],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)# none
self.AD_pn.add_transition_with_michaelis_menten(
transition_id = 't_CTF99_gsec_cleav',
label = 'Gamma secretase cleavage of CTF99',
Km = Km_t_CTF99_gsec_cleav,
vmax = kcat_t_CTF99_gsec_cleav,
input_place_ids = ['p_CTF99', 'p_gsec', 'p_chol_PM'],
substrate_id = 'p_CTF99',
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_Abconc', 'p_Ab', 'p_AICD'],
production_coefficients = [conversion, 1, 1],
vmax_scaling_function = vmax_scaling_t_CTF99_gsec_cleav,
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_gsec_exp',
label = 'Gamma secretase expression',
input_place_ids = ['p_ROS_mito'],
firing_condition = fc_t_gsec_exp,
reaction_speed_function = r_t_gsec_exp,
consumption_coefficients = [0],
output_place_ids = ['p_gsec'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_gsec_degr',
label = 'Gamma secretase degradation',
input_place_ids = ['p_gsec'],
firing_condition = fc_t_gsec_degr,
reaction_speed_function = r_t_gsec_degr,
consumption_coefficients = [1],
output_place_ids = [], # none
production_coefficients = [],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)# none
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_Ab_degr',
label = 'Ab degradation',
input_place_ids = ['p_Ab', 'p_Abconc'],
firing_condition = fc_t_Ab_degr,
reaction_speed_function = r_t_Ab_degr,
consumption_coefficients = [1, conversion],
output_place_ids = [],
production_coefficients = [],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)# TODO - fix ratio
#AB aggregation module
#AB Aggregation transitions
self.AD_pn.add_transition_with_speed_function(transition_id = 't_Ab_nuc1',
label = "Ab primary nucleation",
input_place_ids = ['p_Ab', 'p_Abconc'],
firing_condition = fc_t_Ab_nuc1,
reaction_speed_function = r_t_Ab_nuc1,
consumption_coefficients = [1/conversion, 1],
output_place_ids = ['p_Ab_S'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(transition_id = 't_Ab_dis1',
label = "Ab dissociation1",
input_place_ids = ['p_Ab_S'],
firing_condition = fc_t_Ab_dis1,
reaction_speed_function = r_t_Ab_dis1,
consumption_coefficients = [1],
output_place_ids = ['p_Ab', 'p_Abconc'],
production_coefficients = [1/conversion, 1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(transition_id = 't_Ab_elon',
label = "Ab elongation",
input_place_ids = ['p_Ab_S'],
firing_condition = fc_t_Ab_elon,
reaction_speed_function = r_t_Ab_elon,
consumption_coefficients = [1],
output_place_ids = ['p_Ab_P'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(transition_id = 't_Ab_fib',
label = "Ab fibrillation",
input_place_ids = ['p_Ab_P', 'p_Ab', 'p_Abconc'],
firing_condition = fc_t_Ab_fib,
reaction_speed_function = r_t_Ab_fib,
consumption_coefficients = [0, 0, 0],
output_place_ids = ['p_Ab_M'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(transition_id = 't_Ab_M_frag',
label = "Ab fibril fragmentation",
input_place_ids = ['p_Ab_M'],
firing_condition = fc_t_Ab_M_frag,
reaction_speed_function = r_t_Ab_M_frag,
consumption_coefficients = [1],
output_place_ids = ['p_Ab_P'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(transition_id = 't_Ab_M_phag',
label = "Ab fibril phagocytosis",
input_place_ids = ['p_Ab_P', 'p_age', 'p_CD33'],
firing_condition = fc_t_Ab_P_phag,
reaction_speed_function = r_t_Ab_P_phag,
consumption_coefficients = [1, 0, 0],
output_place_ids = [],
production_coefficients = [],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_RTN3_exp',
label = 'Expression rate of RTN3',
input_place_ids = [],
firing_condition = fc_t_RTN3_exp,
reaction_speed_function = r_t_RTN3_exp,
consumption_coefficients = [],
output_place_ids = ['p_RTN3_PN'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_LE_retro',
label = 'retrograde transport of LEs & ER',
input_place_ids = ['p_ATP','p_chol_LE','p_RTN3_axon', 'p_tau'], # didn't connect p_tau or p_chol_LE yet
firing_condition = fc_t_LE_retro,
reaction_speed_function = r_t_LE_retro, # get later from PD
consumption_coefficients = [ATPcons_t_LE_trans, 0, 1, 0], # tune these coefficients based on PD
output_place_ids = ['p_ADP','p_RTN3_PN'],
production_coefficients = [ATPcons_t_LE_trans, 1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)# tune these coefficients based on PD
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_LE_antero',
label = 'anterograde transport of LEs & ER',
input_place_ids = ['p_ATP','p_RTN3_PN', 'p_tau'], # didn't connect p_tau yet
firing_condition = fc_t_LE_antero,
reaction_speed_function = r_t_LE_antero, # get later from NPCD
consumption_coefficients = [ATPcons_t_LE_trans, 1, 0], # tune these coefficients based on PD
output_place_ids = ['p_ADP','p_RTN3_axon'],
production_coefficients = [ATPcons_t_LE_trans, 1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)# tune these coefficients based on PD
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_RTN3_aggregation',
label = 'aggregation of monomeric RTN3 into HMW RTN3',
input_place_ids = ['p_RTN3_axon', 'p_RTN3_PN', 'p_Ab'],
firing_condition = fc_t_RTN3_aggregation, # tune aggregation limit later
reaction_speed_function = r_t_RTN3_aggregation,
consumption_coefficients = [1, 1, 0],
output_place_ids = ['p_RTN3_HMW_cyto'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_RTN3_auto',
label = 'functional autophagy of HMW RTN3',
input_place_ids = ['p_RTN3_HMW_cyto', 'p_RTN3_axon'],
firing_condition = fc_t_RTN3_auto,
reaction_speed_function = r_t_RTN3_auto,
consumption_coefficients = [1, 0],
output_place_ids = ['p_RTN3_HMW_auto'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_RTN3_lyso',
label = 'functional delivery of HMW RTN3 to the lysosome',
input_place_ids = ['p_RTN3_HMW_auto', 'p_tau'],
firing_condition = fc_t_RTN3_lyso,
reaction_speed_function = r_t_RTN3_lyso,
consumption_coefficients = [1, 0],
output_place_ids = ['p_RTN3_HMW_lyso'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_RTN3_dys_auto',
label = 'dysfunctional autophagy of HMW RTN3',
input_place_ids = ['p_RTN3_HMW_cyto', 'p_RTN3_axon'],
firing_condition = fc_t_RTN3_dys_auto,
reaction_speed_function = r_t_RTN3_dys_auto,
consumption_coefficients = [1, 0],
output_place_ids = ['p_RTN3_HMW_dys1'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)# tune later when data are incorporated
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_RTN3_dys_lyso',
label = 'dysfunctional delivery of HMW RTN3 to the lysosome',
input_place_ids = ['p_RTN3_HMW_auto', 'p_RTN3_HMW_dys1', 'p_tau'],
firing_condition = fc_t_RTN3_dys_lyso,
reaction_speed_function = r_t_RTN3_dys_lyso,
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_RTN3_HMW_dys2'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)# tune later when
# Transitions
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_krebs',
label = 'Krebs cycle',
input_place_ids = ['p_ADP', 'p_Ca_mito', "p_Ab"],
firing_condition = fc_t_krebs,
reaction_speed_function = r_t_krebs,
consumption_coefficients = [1, 0, 0],
output_place_ids = ['p_reduc_mito', 'p_ATP'],
production_coefficients = [4,1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_ATP_hydro_mito',
label = 'ATP hydrolysis by cellular processes',
input_place_ids = ['p_ATP'],
firing_condition = fc_t_ATP_hydro_mito,
reaction_speed_function = r_t_ATP_hydro_mito,
consumption_coefficients = [1],
output_place_ids = ['p_ADP'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_ETC',
label = 'Electron transport chain',
input_place_ids = ['p_reduc_mito', 'p_ADP', 'p_Ca_mito', 'p_ROS_mito', 'p_chol_mito', "p_Ab"],
firing_condition = fc_t_ETC,
reaction_speed_function = r_t_ETC,
consumption_coefficients = [22/3.96, 440, 0, 0, 0, 0],
output_place_ids = ['p_ATP', 'p_ROS_mito'],
production_coefficients = [440, 0.06],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_ROS_metab',
label = 'Neutralization of ROS',
input_place_ids = ['p_ROS_mito', 'p_chol_mito'],
firing_condition = fc_t_ROS_metab,
reaction_speed_function = r_t_ROS_metab,
consumption_coefficients = [1, 0],
output_place_ids = ['p_H2O_mito'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
# Output transitions: Cas3 for apoptosis
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_mito_dysfunc',
label = 'Mitochondrial complex 1 dysfunction',
input_place_ids = ['p_ROS_mito','p_Ab'],
firing_condition = fc_t_mito_dysfunc,
reaction_speed_function = r_t_mito_dysfunc,
consumption_coefficients = [1,0],
output_place_ids = ['p_cas3'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
# Cas3 inactivation
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_cas3_inact',
label = 'Caspase 3 inactivation',
input_place_ids = ['p_cas3'],
firing_condition = fc_t_cas3_inact,
reaction_speed_function = r_t_cas3_inact,
consumption_coefficients = [1],
output_place_ids = [],
production_coefficients = [],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_ROS_gener_Ab',
label = 'ROS generation by Abeta',
input_place_ids = ['p_Ab'],
firing_condition = fc_t_ROS_gener_Ab,
reaction_speed_function = r_t_ROS_gener_Ab,
consumption_coefficients = [0],
output_place_ids = ["p_ROS_mito"],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
# Add transitions
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_Ca_imp',
label = 'VGCC/NMDA import channels',
input_place_ids = ['p_Ca_extra'],
firing_condition = fc_t_Ca_imp,
reaction_speed_function = r_t_Ca_imp,
consumption_coefficients = [0],
output_place_ids = ['p_Ca_cyto'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_mCU',
label = 'Ca import into mitochondria via mCU',
input_place_ids = ['p_Ca_cyto', 'p_Ca_mito'],
firing_condition = fc_t_mCU,
reaction_speed_function = r_t_mCU,
consumption_coefficients = [1,0],
output_place_ids = ['p_Ca_mito'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_MAM',
label = 'Ca transport from ER to mitochondria',
input_place_ids = ['p_Ca_ER', 'p_Ca_mito'],
firing_condition = fc_t_MAM,
reaction_speed_function = r_t_MAM,
consumption_coefficients = [1,0],
output_place_ids = ['p_Ca_mito'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_RyR_IP3R',
label = 'Ca export from ER',
input_place_ids = ['p_Ca_extra', 'p_Ca_ER'],
firing_condition = fc_t_RyR_IP3R,
reaction_speed_function = r_t_RyR_IP3R,
consumption_coefficients = [0,1],
output_place_ids = ['p_Ca_cyto'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_SERCA',
label = 'Ca import to ER',
input_place_ids = ['p_Ca_cyto','p_ATP'],
firing_condition = fc_t_SERCA,
reaction_speed_function = r_t_SERCA,
consumption_coefficients = [1,0.5],
output_place_ids = ['p_Ca_ER','p_ADP'],
production_coefficients = [1,0.5],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_NCX_PMCA',
label = 'Ca efflux to extracellular space',
input_place_ids = ['p_Ca_cyto','p_on3'],
firing_condition = lambda a: a['p_on3']==1,
reaction_speed_function = r_t_NCX_PMCA,
consumption_coefficients = [1,0],
output_place_ids = [],
production_coefficients = [],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_mNCLX',
label = 'Ca export from mitochondria via mNCLX',
input_place_ids = ['p_Ca_mito'],
firing_condition = fc_t_mNCLX,
reaction_speed_function = r_t_mNCLX,
consumption_coefficients = [1],
output_place_ids = ['p_Ca_cyto'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
# Link to energy metabolism in that it needs ATP replenishment
self.AD_pn.add_transition_with_mass_action(
transition_id = 't_NaK_ATPase',
label = 'NaK ATPase',
rate_constant = k_t_NaK_ATPase,
input_place_ids = ['p_ATP', 'p_on3'],
firing_condition = lambda a: a['p_on3']==1,
consumption_coefficients = [1,0],
output_place_ids = ['p_ADP'],
production_coefficients = [1],
stochastic_parameters = [SD],
collect_rate_analytics = collect_rate_analytics)
def AD_Discrete_Transitions(self):
# # Discrete on/of-switches calcium pacemaking
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_A',
label = 'A',
input_place_ids = ['p_on4'],
firing_condition = lambda a: a['p_on4']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_Ca_extra'],
production_coefficients = [1],
stochastic_parameters = [CaSD, DelaySD],
delay=0.5,
collect_rate_analytics = ["no","no"])
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_B',
label = 'B',
input_place_ids = ['p_Ca_extra'],
firing_condition = lambda a: a['p_Ca_extra']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_on2'],
production_coefficients = [1],
stochastic_parameters = [CaSD, DelaySD],
delay=0.5,
collect_rate_analytics = ["no","no"])
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_C',
label = 'C',
input_place_ids = ['p_on2'],
firing_condition = lambda a: a['p_on2']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_on3'],
production_coefficients = [1],
stochastic_parameters = [CaSD, DelaySD],
delay=0,
collect_rate_analytics = ["no","no"])
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_D',
label = 'D',
input_place_ids = ['p_on3'],
firing_condition = lambda a: a['p_on3']==1,
reaction_speed_function = lambda a: 1,
consumption_coefficients = [1],
output_place_ids = ['p_on4'],
production_coefficients = [1],
stochastic_parameters = [CaSD, DelaySD],
delay=0.5,
collect_rate_analytics = ["no","no"])
self.AD_pn.add_transition_with_speed_function(
transition_id = 't_MDV_Generation_basal',
label = 't_MDV_Generation_basal',
reaction_speed_function = lambda a : 1,
input_place_ids = [],
firing_condition = lambda a : True,
consumption_coefficients = [],
output_place_ids = [],
production_coefficients = [],
stochastic_parameters = [SD, DelaySD],
delay=0.5,
collect_rate_analytics = collect_rate_analytics)
def make_scrollbar_sHFPN(self):
self.canvas = tk.Canvas(self.frame4)
self.canvas.pack(side="left", fill=tk.BOTH, expand=1)
self.scrollbar = ttk.Scrollbar(self.frame4, orient=tk.VERTICAL, command =self.canvas.yview)
self.scrollbar.pack(side="left", fill=tk.Y)
self.canvas.configure(yscrollcommand=self.scrollbar.set)
self.canvas.bind('<Configure>', lambda e: self.canvas.configure(scrollregion= self.canvas.bbox("all")))
#Create another frame inside the canvas
self.frame_in_canvas = tk.Frame(self.canvas)
self.canvas.create_window((0,0), window=self.frame_in_canvas, anchor="nw")
# def make_scrollbar_AD_sHFPN(self): #might be redundant
# self.AD_canvas = tk.Canvas(self.AD_frame1)
# self.AD_canvas.pack(side="left", fill=tk.BOTH, expand=1)
# self.AD_scrollbar = ttk.Scrollbar(self.AD_frame1, orient=tk.VERTICAL, command =self.canvas.yview)
# self.AD_scrollbar.pack(side="left", fill=tk.Y)
# self.AD_canvas.configure(yscrollcommand=self.AD_scrollbar.set)
# self.AD_canvas.bind('<Configure>', lambda e: self.AD_canvas.configure(scrollregion= self.AD_canvas.bbox("all")))
# #Create another frame inside the canvas
# self.AD_frame_in_canvas = tk.Frame(self.AD_canvas)
# self.AD_canvas.create_window((0,0), window=self.AD_frame_in_canvas, anchor="nw")
def make_scrollbar_Analysis(self):
self.canvas2 = tk.Canvas(self.frame5)
self.scrollbar2 = ttk.Scrollbar(self.frame5, orient=tk.VERTICAL, command =self.canvas2.yview)
self.scrollbar2.pack(side="right", fill=tk.Y)
self.scrollbarhori = ttk.Scrollbar(self.frame5, orient=tk.HORIZONTAL, command =self.canvas2.xview)
self.scrollbarhori.pack(side=tk.BOTTOM, fill=tk.X)
self.canvas2.configure(yscrollcommand=self.scrollbar2.set, xscrollcommand=self.scrollbarhori.set)
self.canvas2.bind('<Configure>', lambda e: self.canvas2.configure(scrollregion= self.canvas2.bbox("all")))
self.canvas2.pack(side="left", fill=tk.BOTH, expand=1)
#Create another frame inside the canvas2
self.frame_in_canvas_Analysis = tk.Frame(self.canvas2)
self.canvas2.create_window((0,0), window=self.frame_in_canvas_Analysis, anchor="nw")
self.frame_in_canvas_Analysis.config(background="grey42")
def AD_make_scrollbar_Inputs_Page(self):
self.AD_canvas3 = tk.Canvas(self.AD_frame3)
self.AD_canvas3.pack(side="left", fill=tk.BOTH, expand=1)
self.AD_scrollbar3 = ttk.Scrollbar(self.AD_frame3, orient=tk.VERTICAL, command =self.AD_canvas3.yview)
self.AD_scrollbar3.pack(side="left", fill=tk.Y)
self.AD_canvas3.configure(yscrollcommand=self.AD_scrollbar3.set)
self.AD_canvas3.bind('<Configure>', lambda e: self.AD_canvas3.configure(scrollregion= self.AD_canvas3.bbox("all")))
#Create another frame inside the canvas
self.AD_frame3_in_canvas_Inputs = tk.Frame(self.AD_canvas3)
self.AD_canvas3.create_window((0,0), window=self.AD_frame3_in_canvas_Inputs, anchor="nw")
def AD_Inputs_Page(self):
self.AD_frame3=tk.Frame(self.frame2)
#self.frame3.pack(side="left", fill=tk.BOTH,expand=1)
self.AD_frame3.grid(row=0,column=0,sticky="nsew")
self.AD_make_scrollbar_Inputs_Page()
#Inputs Labels and Entry Boxes
#*Run Save Name*
self.AD_Label_run_save_name = tk.Label(self.AD_frame3_in_canvas_Inputs, text="Run Save Name")
self.AD_Label_run_save_name.grid(row=0,column=0)
self.AD_Label_run_save_name_e = tk.Entry(self.AD_frame3_in_canvas_Inputs)
self.AD_Label_run_save_name_e.grid(row=0,column=1)
self.AD_Label_run_save_name_e.insert(tk.END, "sHFPN_Save_Name")
#*Number of Timesteps*
self.AD_Label_no_timesteps = tk.Label(self.AD_frame3_in_canvas_Inputs, text="Number of Timesteps")
self.AD_Label_no_timesteps.grid(row=1,column=0)
self.AD_Label_no_timesteps_e = tk.Entry(self.AD_frame3_in_canvas_Inputs)
self.AD_Label_no_timesteps_e.grid(row=1,column=1)
self.AD_Label_no_timesteps_e.insert(tk.END, "100")
self.AD_Label_Help_no_timesteps = tk.Label(self.frame_in_canvas_Inputs, text="Only input increments of 1000")
self.AD_Label_Help_no_timesteps.grid(row=1, column=2)
#*Timestep Size*
self.AD_Label_timestep_size = tk.Label(self.AD_frame3_in_canvas_Inputs, text="Timestep Size (s)")
self.AD_Label_timestep_size.grid(row=2,column=0)
self.AD_Label_timestep_size_e = tk.Entry(self.AD_frame3_in_canvas_Inputs)
self.AD_Label_timestep_size_e.grid(row=2,column=1)
self.AD_Label_timestep_size_e.insert(tk.END, "0.001")
#*Mutations Header*
Mutations_Header = tkfont.Font(family='Helvetica', size=10, weight="bold", slant="italic")
self.Label_Header_Mutations = tk.Label(self.AD_frame3_in_canvas_Inputs, text="Mutations and Risk Factors", font= Mutations_Header)
self.Label_Header_Mutations.grid(row=6, column=1)
#*ApoE4 Mutation
self.AD_ApoE4_Mutation = tk.Label(self.AD_frame3_in_canvas_Inputs, text="ApoE4")
self.AD_ApoE4_Mutation.grid(row=7, column=0)
self.AD_ApoE4_var = tk.IntVar()
self.AD_ApoE4_Mutation_checkbox = tk.Checkbutton(self.AD_frame3_in_canvas_Inputs, variable=self.AD_ApoE4_var)
self.AD_ApoE4_Mutation_checkbox.grid(row=7, column=1)
#CD33 mutation
self.AD_CD33_Mutation = tk.Label(self.AD_frame3_in_canvas_Inputs, text="CD33")
self.AD_CD33_Mutation.grid(row=8, column=0)
self.AD_CD33_var = tk.IntVar()
self.AD_CD33_Mutation_checkbox = tk.Checkbutton(self.AD_frame3_in_canvas_Inputs, variable=self.AD_CD33_var)
self.AD_CD33_Mutation_checkbox.grid(row=8, column=1)
#Aged
self.AD_Aged_risk = tk.Label(self.AD_frame3_in_canvas_Inputs, text="Aged")
self.AD_Aged_risk.grid(row=9, column=0)
self.AD_Aged_var = tk.IntVar()
self.AD_Aged_risk_checkbox = tk.Checkbutton(self.AD_frame3_in_canvas_Inputs, variable=self.AD_Aged_var)
self.AD_Aged_risk_checkbox.grid(row=9, column=1)
def AD_save_entry_inputs(self):
self.AD_HFPN_run_save_name =self.AD_Label_run_save_name_e.get()
self.AD_HFPN_number_of_timesteps = self.AD_Label_no_timesteps_e.get()
self.AD_HFPN_timestep_size = self.AD_Label_timestep_size_e.get()
# self.AD_HFPN_CholSD = self.AD_Label_CholSD_e.get()
# self.AD_HFPN_CalciumSD = self.AD_Label_Calcium_e.get()
print("Inputs Saved")
self.AD_button_1.config(state="normal", text="Run sHFPN")
self.AD_button_1.config(state="normal", text="Run AD sHFPN")
self.AD_button_6.config(state=tk.DISABLED)
#*Save Inputs Button*
self.AD_button_6 = tk.Button(self.AD_frame3_in_canvas_Inputs, text = "Save Inputs", cursor="hand2", command=partial(AD_save_entry_inputs, self))
self.AD_button_6.grid(row=20, column=1, pady=20)
self.AD_Label_Save_Inputs_Button_info = tk.Label(self.AD_frame3_in_canvas_Inputs, text="Double check your inputs")
self.AD_Label_Save_Inputs_Button_info.grid(row=20, column=2)
def make_scrollbar_Inputs_Page(self):
self.canvas3 = tk.Canvas(self.frame3)
self.canvas3.pack(side="left", fill=tk.BOTH, expand=1)
self.scrollbar3 = ttk.Scrollbar(self.frame3, orient=tk.VERTICAL, command =self.canvas3.yview)
self.scrollbar3.pack(side="left", fill=tk.Y)
self.canvas3.configure(yscrollcommand=self.scrollbar3.set)
self.canvas3.bind('<Configure>', lambda e: self.canvas3.configure(scrollregion= self.canvas3.bbox("all")))
#Create another frame inside the canvas2
self.frame_in_canvas_Inputs = tk.Frame(self.canvas3)
self.canvas3.create_window((0,0), window=self.frame_in_canvas_Inputs, anchor="nw")
def PD_Inputs_Page(self):
self.frame3=tk.Frame(self.frame2)
#self.frame3.pack(side="left", fill=tk.BOTH,expand=1)
self.frame3.grid(row=0,column=0,sticky="nsew")
self.make_scrollbar_Inputs_Page()
#Inputs Labels and Entry Boxes
#*Run Save Name*
self.Label_run_save_name = tk.Label(self.frame_in_canvas_Inputs, text="Run Save Name")
self.Label_run_save_name.grid(row=0,column=0)
self.Label_run_save_name_e = tk.Entry(self.frame_in_canvas_Inputs)
self.Label_run_save_name_e.grid(row=0,column=1)
self.Label_run_save_name_e.insert(tk.END, "sHFPN_Save_Name")
#*Number of Timesteps*
self.Label_no_timesteps = tk.Label(self.frame_in_canvas_Inputs, text="Number of Timesteps")
self.Label_no_timesteps.grid(row=1,column=0)
self.Label_no_timesteps_e = tk.Entry(self.frame_in_canvas_Inputs)
self.Label_no_timesteps_e.grid(row=1,column=1)
self.Label_no_timesteps_e.insert(tk.END, "6000000")
self.Label_Help_no_timesteps = tk.Label(self.frame_in_canvas_Inputs, text="Only input increments of 1000")
self.Label_Help_no_timesteps.grid(row=1, column=2)
#*Timestep Size*
self.Label_timestep_size = tk.Label(self.frame_in_canvas_Inputs, text="Timestep Size (s)")
self.Label_timestep_size.grid(row=2,column=0)
self.Label_timestep_size_e = tk.Entry(self.frame_in_canvas_Inputs)
self.Label_timestep_size_e.grid(row=2,column=1)
self.Label_timestep_size_e.insert(tk.END, "0.001")
#*SD Header*
SD_font = tkfont.Font(family='Helvetica', size=10, weight="bold", slant="italic")
self.Label_Header = tk.Label(self.frame_in_canvas_Inputs, text="Adjust Transition Stochasticity Levels", font=SD_font)
self.Label_Header.grid(row=3, column=1, pady=20)
#*CholSD*
self.Label_CholSD = tk.Label(self.frame_in_canvas_Inputs, text="CholSD (0 to 1)")
self.Label_CholSD.grid(row=4,column=0)
self.Label_CholSD_e = tk.Entry(self.frame_in_canvas_Inputs)
self.Label_CholSD_e.grid(row=4,column=1)
self.Label_CholSD_e.insert(tk.END, "0.1")
#*Calcium Module SD*
self.Label_Calcium = tk.Label(self.frame_in_canvas_Inputs, text="Calcium Module SD (0 to 1)")
self.Label_Calcium.grid(row=5,column=0)
self.Label_Calcium_e = tk.Entry(self.frame_in_canvas_Inputs)
self.Label_Calcium_e.grid(row=5,column=1)
self.Label_Calcium_e.insert(tk.END, "0.1")
#*Mutations Header*
self.Mutations_Header = tkfont.Font(family='Helvetica', size=10, weight="bold", slant="italic")
self.Label_Header_Mutations = tk.Label(self.frame_in_canvas_Inputs, text="Mutations", font=self.Mutations_Header)
self.Label_Header_Mutations.grid(row=6, column=1)
#*LRRK2 Mutation
self.LRRK2_Mutation = tk.Label(self.frame_in_canvas_Inputs, text="LRRK2")
self.LRRK2_Mutation.grid(row=7, column=0)
self.LRRK2_var = tk.IntVar()
self.LRRK2_Mutation_checkbox = tk.Checkbutton(self.frame_in_canvas_Inputs, variable=self.LRRK2_var)
self.LRRK2_Mutation_checkbox.grid(row=7, column=1)
#*GBA1 Mutation
self.GBA1_Mutation = tk.Label(self.frame_in_canvas_Inputs, text="GBA1")
self.GBA1_Mutation.grid(row=8, column=0)
self.GBA1_var = tk.IntVar()
self.GBA1_Mutation_checkbox = tk.Checkbutton(self.frame_in_canvas_Inputs, variable=self.GBA1_var)
self.GBA1_Mutation_checkbox.grid(row=8, column=1)
#*VPS35 Mutation
self.VPS35_Mutation = tk.Label(self.frame_in_canvas_Inputs, text="VPS35")
self.VPS35_Mutation.grid(row=9, column=0)
self.VPS35_var = tk.IntVar()
self.VPS35_Mutation_checkbox = tk.Checkbutton(self.frame_in_canvas_Inputs, variable=self.VPS35_var)
self.VPS35_Mutation_checkbox.grid(row=9, column=1)
#*DJ1 Mutation
self.DJ1_Mutation = tk.Label(self.frame_in_canvas_Inputs, text="DJ1")
self.DJ1_Mutation.grid(row=10, column=0)
self.DJ1_var = tk.IntVar()
self.DJ1_Mutation_checkbox = tk.Checkbutton(self.frame_in_canvas_Inputs, variable=self.DJ1_var)
self.DJ1_Mutation_checkbox.grid(row=10, column=1)
#*Therapeutics Header*
self.Therapeutics_Header = tkfont.Font(family='Helvetica', size=10, weight="bold", slant="italic")
self.Label_Header_Therapeutics = tk.Label(self.frame_in_canvas_Inputs, text="Therapeutics", font=self.Therapeutics_Header)
self.Label_Header_Therapeutics.grid(row=11, column=1)
#NPT200
self.PD_NPT200 = tk.Label(self.frame_in_canvas_Inputs, text="NPT200")
self.PD_NPT200.grid(row=12, column=0)
self.PD_NPT200_var = tk.IntVar()
self.PD_NPT200_checkbox = tk.Checkbutton(self.frame_in_canvas_Inputs, variable=self.PD_NPT200_var)
self.PD_NPT200_checkbox.grid(row=12, column=1)
#DNL151
self.PD_DNL151 = tk.Label(self.frame_in_canvas_Inputs, text="DNL151")
self.PD_DNL151.grid(row=13, column=0)
self.PD_DNL151_var = tk.IntVar()
self.PD_DNL151_checkbox = tk.Checkbutton(self.frame_in_canvas_Inputs, variable=self.PD_DNL151_var)
self.PD_DNL151_checkbox.grid(row=13, column=1)
#LAMP2A
self.PD_LAMP2A = tk.Label(self.frame_in_canvas_Inputs, text="LAMP2A")
self.PD_LAMP2A.grid(row=14, column=0)
self.PD_LAMP2A_var = tk.IntVar()
self.PD_LAMP2A_checkbox = tk.Checkbutton(self.frame_in_canvas_Inputs, variable=self.PD_LAMP2A_var)
self.PD_LAMP2A_checkbox.grid(row=14, column=1)
def save_entry_inputs(self):
self.HFPN_run_save_name =self.Label_run_save_name_e.get()
self.HFPN_number_of_timesteps = self.Label_no_timesteps_e.get()
self.HFPN_timestep_size = self.Label_timestep_size_e.get()
self.HFPN_CholSD = self.Label_CholSD_e.get()
self.HFPN_CalciumSD = self.Label_Calcium_e.get()
print("Inputs Saved")
self.button_1.config(state="normal", text="Run sHFPN")
self.lb.itemconfig(7, bg="red")
self.button_6.config(state=tk.DISABLED)
#*Save Inputs Button*
self.button_6 = tk.Button(self.frame_in_canvas_Inputs, text = "Save Inputs", cursor="hand2", command=partial(save_entry_inputs, self))
self.button_6.grid(row=20, column=1, pady=20)
self.Label_Save_Inputs_Button_info = tk.Label(self.frame_in_canvas_Inputs, text="Double check your inputs")
self.Label_Save_Inputs_Button_info.grid(row=20, column=2)
def About_Page(self):
self.frame7=tk.Frame(self.frame2)
self.frame7.grid(row=0, column=0, sticky="nsew")
self.button_4 = tk.Button(self.frame7, text="Link to Website")
def Open_Link(url):
webbrowser.open_new(url)
self.button_4.config(cursor="hand2",command= partial(Open_Link, "https://www.ceb-mng.org/"))
self.button_4.pack()
self.button_5 = tk.Button(self.frame7, text="Twitter", cursor="hand2", command = partial(Open_Link, "https://twitter.com/mng_ceb"))
self.button_5.pack(side="top")
self.About_Image = ImageTk.PhotoImage(Image.open("AboutPage.png"))
self.Image_as_Label = tk.Label(self.frame7)
self.Image_as_Label.config(image=self.About_Image)
self.Image_as_Label.pack()
self.BSL_font = tkfont.Font(family='Helvetica', size=7, slant="italic")
self.Label_BSL = tk.Label(self.frame7, text="Please email B.S. Lockey at [email protected] for issues.", font=self.BSL_font)
self.Label_BSL.pack()
def Live_Graph(self):
self.frame8=tk.Frame(self.frame2)
self.frame8.grid(row=0, column=0, sticky="nsew")
#Label
# self.Label_Neuronal_Healthbar = tk.Label(self.frame8, text="Under Construction...")
# self.Label_Neuronal_Healthbar.pack()
#Embedded Graphs (PROBABLY HAVE TO APPEND THIS TO SELF LATER, SO CAN BE ACCESSED)
# self.f = Figure(figsize=(5,5), dpi=100)
# self.a = self.f.add_subplot(111)
# self.a.plot([1,2,3,4,5,6,7,8],[1,2,3,4,5,6,7,8])
# self.Neuronal_Healthbar_canvas = FigureCanvasTkAgg(self.f, self.frame8)
# self.Neuronal_Healthbar_canvas.draw()
# self.Neuronal_Healthbar_canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)#I can also choose to grid it so its more compact for later, when I want to plot multiple plots.
# toolbar = NavigationToolbar2Tk(self.Neuronal_Healthbar_canvas, self.frame8)
# toolbar.update()
# self.Neuronal_Healthbar_canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
def Run_sHFPN_Page(self):
#PD Button
self.frame4=tk.Frame(self.frame2)
self.frame4.grid(row=0,column=0,sticky="nsew")
self.button_1 = tk.Button(self.frame4, text="Save PD Inputs", state=tk.DISABLED, command= threading.Thread(target = partial(self.run_sHFPN)).start)
self.button_1.config(cursor="hand2")
self.button_1.pack(side=tk.TOP)
#AD Button
self.AD_button_1 = tk.Button(self.frame4, text="Save AD Inputs", state=tk.DISABLED, command= threading.Thread(target = partial(self.run_AD_sHFPN)).start)
self.AD_button_1.config(cursor="hand2")
self.AD_button_1.pack(side=tk.TOP)
self.make_scrollbar_sHFPN()
def Analysis_page(self):
self.frame5=tk.Frame(self.frame2)
#self.frame5.pack(side="left", fill=tk.BOTH,expand=1)
self.frame5.grid(row=0,column=0,sticky="nsew")
self.frame5.config(bg="grey42")
self.run_save_name_entry = tk.Entry(self.frame5, width=50, bg="black", fg="violet", borderwidth="5")
self.run_save_name_entry.pack()
def insert_run_name(self):
self.run_save_name_entry.delete(0, tk.END)
self.run_save_name_entry.insert(tk.END, self.saved_run_string)
def Go_to_saved_runs(self):
self.frame6.destroy()
self.show_saved_runs()
self.frame6.tkraise()
self.button_saved_run.config(text="Input Last Hovered Saved Run",state=tk.DISABLED, command=partial(insert_run_name, self))
self.button_saved_run = tk.Button(self.frame5, text="Go To Saved Runs", command=partial(Go_to_saved_runs, self))
self.button_saved_run.pack()
def save_entry(self):
"saves run_save_name entry"
if self.run_save_name_entry.get() =="":
self.run_save_name_entry.config(bg="red")
else:
self.run_save_name =self.run_save_name_entry.get()
self.button_2.config(state="normal", text="Run Analysis")
print(self.run_save_name)
self.button3.config(state=tk.DISABLED)
self.run_save_name_entry.config(bg="black")
self.button_saved_run.config(state=tk.DISABLED)
self.button3 = tk.Button(self.frame5, text="Enter run_save_name", command = partial(save_entry, self))
self.button3.config(cursor="hand2")
self.button3.pack()
def GUI_plot(place_id, analysis, File, simulation_time_step, desired_plotting_steps, max_time_step):
place_label =""
plot_title = place_id
desired_plotting_steps = int(self.desired_plotting_steps_entry_box.get())
if desired_plotting_steps>max_time_step: #in case user inputs more timesteps than available
desired_plotting_steps = max_time_step
if desired_plotting_steps %2==0: #if even, subtract 1, avoid errors.(only works if odd for some reason)
desired_plotting_steps = desired_plotting_steps-1
t=np.arange(0,desired_plotting_steps*simulation_time_step+simulation_time_step,simulation_time_step) #(start,end,step) end in seconds. end = 1000 with ts=0.001 means you have 1000000 datapoints.
t=t[0::int(self.nth_datapoint_entry_box.get())] #takes every nth data point. But still need to make sure the Axes are right somehow.
#truncate t by 1
fig,ax=plt.subplots()
data = self.analysis[File].mean_token_history_for_places([place_id])[0:desired_plotting_steps+1]
data = data[0::int(self.nth_datapoint_entry_box.get())]
if self.Analysis_print_mean_value_var:
print("Mean: ", np.mean(data))
option2 = self.Analysis_plot_rolling_only_var.get()
if option2==0:
if place_label == "":
ax.plot(t, data, label = File, color="black")
else:
ax.plot(t, data, label = File+' - '+place_label, color="black")
self.Analysis_rolling_average_decision=self.Analysis_rolling_average.get()
da_window_size = int(self.Analysis_RAWS_entry.get())
if self.Analysis_rolling_average_decision ==1:
data2 = data.ravel()
data2 = np.convolve(data2, np.ones(da_window_size), "valid")/da_window_size
an_array = np.empty(da_window_size-1)
an_array[:] = np.NaN
data2 = np.insert(data2, 0, an_array)
plt.plot(t, data2, color="red")
Analysis.standardise_plot(ax, title = plot_title, xlabel = "Time (s)",ylabel = "Molecule count")
if self.Analysis_y_threshold_entry.get() != "":
plt.axhline(y=float(self.Analysis_y_threshold_entry.get()), linestyle='--', color ='red', label = self.Analysis_y_threshold_graph_label.get())
if self.Analysis_plotting_text_list_entry.get() == "":
ax.legend()
else:
L = ax.legend()
L.get_texts()[0].set_text(self.Analysis_plotting_text_list_entry.get())
if self.Analysis_x_lim_entry.get() != "":
split_list = self.Analysis_x_lim_entry.get().split(", ")
x_lim_0 = float(split_list[0])
x_lim_1 = float(split_list[1])
plt.xlim([x_lim_0,x_lim_1])
if self.Analysis_y_lim_entry.get() != "":
split_list = self.Analysis_y_lim_entry.get().split(", ")
y_lim_0 = float(split_list[0])
y_lim_1 = float(split_list[1])
plt.ylim([y_lim_0,y_lim_1])
if self.Analysis_Plot_Title.get() != "":
plt.title(self.Analysis_Plot_Title.get())
if self.Analysis_Plot_Y_Axis_Label_Entry.get() != "":
plt.ylabel(self.Analysis_Plot_Y_Axis_Label_Entry.get())
plt.show()
if self.Export_var.get() ==1:
thename = "../saved-csvs/"+str(self.CSV_save_name.get())+".csv"
with open(thename, 'w', newline='') as csvfile:
token_header = place_id+' Tokens'
fieldnames = ['Time (s)', token_header]
thewriter = csv.DictWriter(csvfile, fieldnames=fieldnames)
thewriter.writeheader()
for thetime, token in zip(t,data):
thewriter.writerow({'Time (s)':thetime, token_header:token[0]})
print("CSV Saved at " + thename)
self.csv_listbox.destroy()
self.csv_list_box_function()
self.update_truth_list()
def GUI_Plot_Rate(col, analysis, File, simulation_time_step, desired_plotting_steps, max_time_step, CONS_OR_PROD):
place_label =""
plot_title = col
desired_plotting_steps = int(self.desired_plotting_steps_entry_box.get())
if desired_plotting_steps>max_time_step: #in case user inputs more timesteps than available
desired_plotting_steps = max_time_step
if desired_plotting_steps %2==0: #if even, subtract 1, avoid errors.(only works if odd for some reason)
desired_plotting_steps = desired_plotting_steps-1
t=np.arange(0,desired_plotting_steps*simulation_time_step+simulation_time_step,simulation_time_step) #(start,end,step) end in seconds. end = 1000 with ts=0.001 means you have 1000000 datapoints.
t=t[0::int(self.nth_datapoint_entry_box.get())] #takes every nth data point. But still need to make sure the Axes are right somehow.
fig,ax=plt.subplots()
if CONS_OR_PROD == "Cons":
data = self.Analysis_dataframe_cons[[col]][0:desired_plotting_steps+1]
data = data[0::int(self.nth_datapoint_entry_box.get())]
if self.Analysis_print_mean_value_var:
print("Mean: ", np.mean(data))
if CONS_OR_PROD == "Prod":
data = self.Analysis_dataframe_prod[[col]][0:desired_plotting_steps+1]
data = data[0::int(self.nth_datapoint_entry_box.get())]
if self.Analysis_print_mean_value_var:
print("Mean: ", np.mean(data))
self.Analysis_rolling_average_decision=self.Analysis_rolling_average.get()
da_window_size = int(self.Analysis_RAWS_entry.get())
option2 = self.Analysis_plot_rolling_only_var.get()
if option2==0:
if place_label == "":
ax.plot(t, data, label = File, color="black")
else:
ax.plot(t, data, label = File+' - '+place_label, color="black")
if self.Analysis_rolling_average_decision ==1:
data2 = data.rolling(window=da_window_size).mean()
plt.plot(t, data2, color="red", label="Rolling Mean")
Analysis.standardise_plot(ax, title = plot_title, xlabel = "Time (s)",ylabel = "Molecule count")
if self.Analysis_y_threshold_entry.get() != "":
plt.axhline(y=float(self.Analysis_y_threshold_entry.get()), linestyle='--', color ='red', label = self.Analysis_y_threshold_graph_label.get())
if self.Analysis_plotting_text_list_entry.get() == "":
ax.legend()
else:
L = ax.legend()
L.get_texts()[0].set_text(self.Analysis_plotting_text_list_entry.get())
if self.Analysis_x_lim_entry.get() != "":
split_list = self.Analysis_x_lim_entry.get().split(", ")
x_lim_0 = float(split_list[0])
x_lim_1 = float(split_list[1])
plt.xlim([x_lim_0,x_lim_1])
if self.Analysis_y_lim_entry.get() != "":
split_list = self.Analysis_y_lim_entry.get().split(", ")
y_lim_0 = float(split_list[0])
y_lim_1 = float(split_list[1])
plt.ylim([y_lim_0,y_lim_1])
if self.Analysis_Plot_Title.get() != "":
plt.title(self.Analysis_Plot_Title.get())
if self.Analysis_Plot_Y_Axis_Label_Entry.get() != "":
plt.ylabel(self.Analysis_Plot_Y_Axis_Label_Entry.get())
plt.show()
if self.Export_var.get() ==1:
thename = "../saved-csvs/"+str(self.CSV_save_name.get())+".csv"
with open(thename, 'w', newline='') as csvfile:
token_header = col+' Tokens'
fieldnames = ['Time (s)', token_header]
thewriter = csv.DictWriter(csvfile, fieldnames=fieldnames)
thewriter.writeheader()
for thetime, token in zip(t,data.iloc[:,0]):
thewriter.writerow({'Time (s)':thetime, token_header:token})
print("CSV Saved at " + thename)
self.csv_listbox.destroy()
self.csv_list_box_function()
self.update_truth_list()
def run_Analysis(self):
self.button_2.config(text="Please Wait, Loading...", state=tk.DISABLED)
self.button_saved_run.forget()
self.button3.forget()
self.run_save_name_entry.forget()
run_save_name = self.run_save_name
self.analysis = {}
start_time = datetime.now()
#File1 = '200k_sHFPN_Healthy_SD_01_DelaySD_01_run3_V3_TRANSITION'
#File2 = '6e6_sHFPN_Healthy_SD_0_DelaySD_02'
File3 = run_save_name
#analysis[File1] = Analysis.load_from_file(File1)
#analysis[File2] = Analysis.load_from_file(File2)
self.analysis[File3] = Analysis.load_from_file(File3)
execution_time = datetime.now()-start_time
print('\n\nLoad-in Time:', execution_time)
print("")
simulation_time_step=self.analysis[File3].time_step
desired_plotting_steps=self.analysis[File3].final_time_step
max_time_step = self.analysis[File3].final_time_step
print(simulation_time_step)
list_of_place_names = []
for place in self.analysis[File3].place_ids:
list_of_place_names.append(place)
tk.Button(self.frame_in_canvas_Analysis, text = "Places", bg="grey").grid(row=0, column=0, pady=10, padx=10)
#PLACE TOKENS
for index, place_id in enumerate(list_of_place_names):
tk.Button(self.frame_in_canvas_Analysis, cursor="cross", text=place_id, command=partial(GUI_plot, place_id, self.analysis, File3, simulation_time_step, desired_plotting_steps, max_time_step), bg="DodgerBlue3").grid(row=index+1, column=0, pady=10, padx=10)#pass value as an argument to plot
self.canvas2.configure(scrollregion= self.canvas2.bbox("all"))
#CONSUMPTION
tk.Button(self.frame_in_canvas_Analysis, text="Consumption Rate", bg="grey").grid(row=0, column=1, pady=10,padx=10)
self.Analysis_dataframe_cons = self.analysis[File3].df_for_rate_analytics_cons
CONS_OR_PROD = "Cons"
for index, col in enumerate(self.Analysis_dataframe_cons.columns):
tk.Button(self.frame_in_canvas_Analysis, cursor="cross", text=col, command=partial(GUI_Plot_Rate, col, self.analysis, File3, simulation_time_step, desired_plotting_steps, max_time_step, CONS_OR_PROD), bg="DodgerBlue3").grid(row=index+1, column=1,pady=10, padx=10)
#Production
tk.Button(self.frame_in_canvas_Analysis, text="Production Rate", bg="grey").grid(row=0, column=2, pady=10,padx=10)
self.Analysis_dataframe_prod = self.analysis[File3].df_for_rate_analytics_prod
CONS_OR_PROD = "Prod"
for index, col in enumerate(self.Analysis_dataframe_prod.columns):
tk.Button(self.frame_in_canvas_Analysis, cursor="cross", text=col, command=partial(GUI_Plot_Rate, col, self.analysis, File3, simulation_time_step, desired_plotting_steps, max_time_step, CONS_OR_PROD), bg="DodgerBlue3").grid(row=index+1, column=2,pady=10, padx=10)
self.button_2.config(text="Restart Session to Run Another Analysis", state=tk.DISABLED)
#Saved Name Label
SD_font = tkfont.Font(family='Helvetica', size=10, weight="bold")
self.Label1321 = tk.Label(self.frame_in_canvas_Analysis, text="File Name:")
self.Label1321.grid(row=0, column=3, pady=10, padx=10)
self.Analysis_title_header_label = tk.Label(self.frame_in_canvas_Analysis, text= run_save_name, font=SD_font)
self.Analysis_title_header_label.grid(row=0, column=4, pady=10,padx=10)
#Desired Plotting Steps
self.desired_plotting_steps_label = tk.Label(self.frame_in_canvas_Analysis, text = "Desired Plotting Steps")
self.desired_plotting_steps_label.grid(row=1, column=3, pady=10,padx=10)
self.desired_plotting_steps_entry_box = tk.Entry(self.frame_in_canvas_Analysis, bg="DodgerBlue3")
self.desired_plotting_steps_entry_box.grid(row=1,column=4)
self.desired_plotting_steps_entry_box.insert(tk.END, desired_plotting_steps)
self.root.geometry("1250x660") #readjust size to make scrollbar visible
#Plot every nth datapoint
self.nth_datapoint_label = tk.Label(self.frame_in_canvas_Analysis, text = "Plot Every nth Data Point")
self.nth_datapoint_label.grid(row=2, column =3, pady=10,padx=10)
self.nth_datapoint_entry_box = tk.Entry(self.frame_in_canvas_Analysis, bg="DodgerBlue3")
self.nth_datapoint_entry_box.grid(row=2, column=4)
self.nth_datapoint_entry_box.insert(tk.END, 1)
def Export_Enable_Disable(self):
if self.Export_Enable_Disable_decision == 0:
self.CSV_save_name.config(state="normal")
self.Export_Enable_Disable_decision = 1
else:
self.CSV_save_name.config(state = tk.DISABLED)
self.Export_Enable_Disable_decision = 0
#Export to CSV
self.Export_Enable_Disable_decision = 0
self.Export_label = tk.Label(self.frame_in_canvas_Analysis, text="Export to CSV")
self.Export_label.grid(row=3, column=3, pady=10,padx=10)
self.Export_var = tk.IntVar()
self.Export_Checkbutton = tk.Checkbutton(self.frame_in_canvas_Analysis, variable=self.Export_var, command=partial(Export_Enable_Disable, self), fg="Black", selectcolor="grey42", relief=tk.GROOVE, highlightcolor="DodgerBlue3", bg="DodgerBlue3", activeforeground = "red", activebackground="red")
self.Export_Checkbutton.grid(row=3, column=4, pady=10, padx=10)
#Export to CSV Save Name
self.CSV_save_name_label = tk.Label(self.frame_in_canvas_Analysis, text="CSV Save Name")
self.CSV_save_name_label.grid(row=4, column=3, pady=10, padx=10)
self.CSV_save_name = tk.Entry(self.frame_in_canvas_Analysis, bg="DodgerBlue3")
self.CSV_save_name.grid(row=4, column=4, pady=10, padx=10)
self.CSV_save_name.insert(tk.END, "CSV_Save_Name")
self.CSV_save_name.config(state = tk.DISABLED)
def on_click_listbox(event):
index=self.csv_listbox.curselection()
seltext=self.csv_listbox.get(index)
self.csv_string = seltext
#MDV button
def calculate_MDV_delay_function(self, analysis, File3):
test = self.analysis[File3].delay_list_MDVs
print(test)
print(np.mean(test))
self.MDV_button = tk.Button(self.frame_in_canvas_Analysis, text="Calculate MDV Delay", cursor="hand2", command=partial(calculate_MDV_delay_function, self, self.analysis, File3), bg="grey")
self.MDV_button.grid(row=5,column=3, padx=10,pady=10)
def calculate_calcium_Delay_function(self,analysis,File3):
test = self.analysis[File3].delay_list_t_A
test2 =self.analysis[File3].delay_list_t_B
test3 = self.analysis[File3].delay_list_t_D
# print(test, test2, test3)
print("t_A:", np.mean(test),"t_B:", np.mean(test2),"t_D:", np.mean(test3))
print(f"File: {File3}")
place_id = 'p_on4'
self.calc_and_print_mean_sd_calcium(File3, place_id)
place_id = 'p_Ca_extra'
self.calc_and_print_mean_sd_calcium(File3, place_id)
place_id = 'p_on3'
self.calc_and_print_mean_sd_calcium(File3, place_id)
print("Done")
self.Ca_button = tk.Button(self.frame_in_canvas_Analysis, text="Calculate Ca Delay", cursor="hand2", command=partial(calculate_calcium_Delay_function, self, self.analysis, File3), bg="grey")
self.Ca_button.grid(row=5,column=4, padx=10,pady=10)
#Rolling Average
self.Analysis_Empty_Label111 = tk.Label(self.frame_in_canvas_Analysis, text="")
self.Analysis_Empty_Label111.grid(row=6,column=3, pady=10,padx=10)
def Analysis_Enable_Disable(self):
if self.Analysis_enabled==0:
self.Analysis_plot_rolling_only_checkbutton.config(state="normal")
self.Analysis_RAWS_entry.config(state="normal")
self.Analysis_enabled=1
else:
self.Analysis_plot_rolling_only_checkbutton.config(state=tk.DISABLED)
self.Analysis_RAWS_entry.config(state=tk.DISABLED)
self.Analysis_enabled=0
self.Analysis_enabled=0
SD_font = tkfont.Font(family='Helvetica', size=10, weight="bold")
self.Analysis_Rolling_Header = tk.Label(self.frame_in_canvas_Analysis, text="Rolling Averages", font=SD_font)
self.Analysis_Rolling_Header.grid(row=7, column=3, padx=10,pady=10)
self.Analysis_rolling_average = tk.IntVar()
self.Analysis_rolling_average_checkbox = tk.Checkbutton(self.frame_in_canvas_Analysis, var=self.Analysis_rolling_average, text="Rolling Average?", command=partial(Analysis_Enable_Disable, self))
self.Analysis_rolling_average_checkbox.grid(row = 7, column=4, padx=10, pady=10)
self.Analysis_plot_rolling_only_var = tk.IntVar()
self.Analysis_plot_rolling_only_checkbutton = tk.Checkbutton(self.frame_in_canvas_Analysis, var=self.Analysis_plot_rolling_only_var, text="Plot Rolling Average Only?", state=tk.DISABLED)
self.Analysis_plot_rolling_only_checkbutton.grid(row=7,column=5, padx=10,pady=10)
self.Analysis_RAWS = tk.Label(self.frame_in_canvas_Analysis, text="Rolling Average Window Size")
self.Analysis_RAWS.grid(row=8, column=3, padx=10,pady=10)
self.Analysis_RAWS_entry = tk.Entry(self.frame_in_canvas_Analysis)
self.Analysis_RAWS_entry.insert(tk.END, 100)
self.Analysis_RAWS_entry.config(state=tk.DISABLED)
self.Analysis_RAWS_entry.grid(row=8, column=4, padx=10,pady=10)
self.Label_Threshold_header = tk.Label(self.frame_in_canvas_Analysis, text="Add y threshold")
self.Label_Threshold_header.grid(row=9, column=3, padx=10,pady=10)
self.Analysis_y_threshold_entry = tk.Entry(self.frame_in_canvas_Analysis)
self.Analysis_y_threshold_entry.grid(row=9, column=4, padx=10, pady=10)
self.Label_Threshold_header2 = tk.Label(self.frame_in_canvas_Analysis, text="Threshold Label")
self.Label_Threshold_header2.grid(row=10, column=3, padx=10,pady=10)
self.Analysis_y_threshold_graph_label = tk.Entry(self.frame_in_canvas_Analysis)
self.Analysis_y_threshold_graph_label.grid(row=10, column=4, padx=10,pady=10)
self.Analysis_plotting_text_list_label = tk.Label(self.frame_in_canvas_Analysis, text="Main Plot Legend Name")
self.Analysis_plotting_text_list_label.grid(row=11,column=3)
self.Analysis_plotting_text_list_entry = tk.Entry(self.frame_in_canvas_Analysis)
self.Analysis_plotting_text_list_entry.grid(row=11,column=4)
self.Analysis_x_lim_label = tk.Label(self.frame_in_canvas_Analysis, text="X Axis Limits (E.g. Input 0, 100 ', ' Comma SPACE)")
self.Analysis_x_lim_label.grid(row=12, column=3)
self.Analysis_x_lim_entry = tk.Entry(self.frame_in_canvas_Analysis)
self.Analysis_x_lim_entry.grid(row=12, column=4)
self.Analysis_y_lim_label = tk.Label(self.frame_in_canvas_Analysis, text="Y Axis Limits (Separate with ', 'Comma SPACE)", relief=tk.RIDGE)
self.Analysis_y_lim_label.grid(row=13, column=3)
self.Analysis_y_lim_entry = tk.Entry(self.frame_in_canvas_Analysis)
self.Analysis_y_lim_entry.grid(row=13, column=4)
self.Analysis_Plot_Title_label = tk.Label(self.frame_in_canvas_Analysis, text="Plot Title", relief=tk.RAISED)
self.Analysis_Plot_Title_label.grid(row=14, column=3)
self.Analysis_Plot_Title = tk.Entry(self.frame_in_canvas_Analysis)
self.Analysis_Plot_Title.grid(row=14, column=4)
self.Analysis_Plot_Y_Axis_Label_Label = tk.Label(self.frame_in_canvas_Analysis, text="Y Axis Label", bg="grey42", relief=tk.GROOVE)
self.Analysis_Plot_Y_Axis_Label_Label.grid(row=15, column=3)
self.Analysis_Plot_Y_Axis_Label_Entry = tk.Entry(self.frame_in_canvas_Analysis, bg="DodgerBlue3", relief=tk.GROOVE)
self.Analysis_Plot_Y_Axis_Label_Entry.grid(row=15, column=4)
self.Analysis_print_mean_value_var = tk.BooleanVar()
self.Analysis_print_mean_value = tk.Checkbutton(self.frame_in_canvas_Analysis, var=self.Analysis_print_mean_value_var, text="Print Mean Value to Console?")
self.Analysis_print_mean_value.grid(row=16, column=4)
#Indent Corresponds to Analysis Page
self.button_2 = tk.Button(self.frame5, text="Please Enter Save Name", state=tk.DISABLED, command= threading.Thread(target = partial(run_Analysis,self)).start)
self.button_2.config(cursor="hand2")
self.button_2.pack(side=tk.TOP)
self.make_scrollbar_Analysis()
def csv_list_box_function(self):
def on_click_listbox(event):
index=self.csv_listbox.curselection()
seltext=self.csv_listbox.get(index)
self.csv_string = seltext
#ListBox
self.csv_listbox = tk.Listbox(self.frame_in_canvas_CSV_Page)
self.csv_listbox.grid(row=0, column=0, pady=10, padx=10)
self.csv_listbox.bind('<ButtonRelease-1>', on_click_listbox)
if platform == 'darwin':
for file in glob.glob("../saved-csvs/*.csv"):
file=file[14:len(file)-4]
self.csv_listbox.insert(tk.END, file)
if platform == 'win32':
for file in glob.glob("..\saved-csvs\*.csv"):
file=file[14:len(file)-4]
self.csv_listbox.insert(tk.END, file)
def show_saved_runs(self):
self.frame6=tk.Frame(self.frame2)
#self.frame6.pack(side="left", fill=tk.BOTH,expand=1)
self.frame6.grid(row=0,column=0,sticky="nsew")
self.lbx = tk.Listbox(self.frame6)
self.lbx.pack(fill=tk.BOTH, expand=1)
if platform == 'darwin':
for file in glob.glob("../saved-runs/*"):
file=file[14:len(file)-4]
self.lbx.insert(tk.END, file)
if platform == 'win32':
for file in glob.glob("..\saved-runs\*"):
file=file[14:len(file)-4]
self.lbx.insert(tk.END, file)
def insert_run_name(self):
self.run_save_name_entry.delete(0, tk.END)
self.run_save_name_entry.insert(tk.END, self.saved_run_string)
def on_click_listbox(event):
index=self.lbx.curselection()
seltext=self.lbx.get(index)
self.saved_run_string = seltext
self.button_saved_run.config(state="normal")
self.button_saved_run.config(text="Input Last Hovered Saved Run",state="normal", command=partial(insert_run_name, self))
self.lbx.bind('<ButtonRelease-1>', on_click_listbox)
def green_listbox_selection(self):
for index, item in enumerate(self.truth_list):
if item == 1:
self.csv_listbox.itemconfig(index, bg="green", fg="white")
else:
self.csv_listbox.itemconfig(index, bg="white", fg="black")
def update_truth_list(self):
self.truth_list = []
for item in self.e4:
if item in self.selection_list:
self.truth_list.append(1)
else:
self.truth_list.append(0)
def create_list_counting_zero_runs(self, normal_list):
"""
so in calcium, there is an array of zeros and ones. This function counts the length of zeros the span the array, and appends it to a new list and returns the list
"""
list_2 = []
count = 0
for index,number in enumerate(normal_list):
if number == 0:
count = count+1
if number ==1 and normal_list[index-1]==0:
list_2.append(int(count))
count = 0
if number == 0 and index == (len(normal_list)-1): #So situations where we reach the end of the list and we are stuck with a zero are still counted.
list_2.append(int(count))
#Cut_off_the very first and last element of the list for safety reasons, to deal with potential truncated zero-runs lowering the mean.
list_2.pop(0)
list_2.pop()
return list_2
def create_list_counting_one_runs(self, normal_list):
"""
so in calcium, there is an array of zeros and ones. This function counts the length of zeros the span the array, and appends it to a new list and returns the list
"""
list_2 = []
count = 0
for index,number in enumerate(normal_list):
if number == 1:
count = count+1
if number ==0 and normal_list[index-1]==1:
list_2.append(int(count))
count = 0
if number == 1 and index == (len(normal_list)-1): #So situations where we reach the end of the list and we are stuck with a zero are still counted.
list_2.append(int(count))
#Cut_off_the very first and last element of the list for safety reasons, to deal with potential truncated zero-runs lowering the mean.
list_2.pop(0)
list_2.pop()
return list_2
def calc_and_print_mean_sd_calcium(self, file, place_id):
"""
This can take a long time if the list is huge (6million+ time steps).
data is in a two dimensional form and needs to be converted to a one dimensional list.
Calculates the Mean number of time steps until that transition contains a one token again and the SD for the place_id over the whole run
"""
data = self.analysis[file].mean_token_history_for_places([place_id])[0:self.analysis[file].final_time_step+1]
list_of_lists = data.tolist()
normal_list = [item for sublist in list_of_lists for item in sublist]
zero_runs_count_list = self.create_list_counting_zero_runs(normal_list)
one_runs_count_list = self.create_list_counting_one_runs(normal_list)
mean1 = np.mean(zero_runs_count_list)
mean2 = np.mean(one_runs_count_list)
std1 = np.std(zero_runs_count_list)
std2= np.std(one_runs_count_list)
print(f"Mean Delay for {place_id}:", np.round(mean1, decimals =3), "timesteps", len(zero_runs_count_list), "counts")
print(f"SD for {place_id}: +/-", np.round(std1, decimals=3), "timesteps or", np.round(100*std1/mean1, decimals=3), "%")
print("Max:", max(zero_runs_count_list), "Min:", min(zero_runs_count_list))
#print("The very first element was:", zero_runs_count_list[0])
#print("The very last element was: ", zero_runs_count_list[len(zero_runs_count_list)-1])
print('')
print(f"Mean Time Active for {place_id}:", np.round(mean2, decimals =3), "timesteps", len(one_runs_count_list), "counts")
print(f"SD for {place_id}: +/-", np.round(std2, decimals=3), "timesteps or", np.round(100*std2/mean2, decimals=3), "%")
print("Max:", max(one_runs_count_list), "Min:", min(one_runs_count_list))
print('')
print('#########################')
def make_scrollbar_CSV_Page(self):
self.canvas4 = tk.Canvas(self.frame11)
self.canvas4.pack(side="left", fill=tk.BOTH, expand=1)
self.scrollbar4 = ttk.Scrollbar(self.frame11, orient=tk.VERTICAL, command =self.canvas4.yview)
self.scrollbar4.pack(side="left", fill=tk.Y)
self.canvas4.configure(yscrollcommand=self.scrollbar4.set)
self.canvas4.bind('<Configure>', lambda e: self.canvas4.configure(scrollregion= self.canvas4.bbox("all")))
#Create another frame inside the canvas2
self.frame_in_canvas_CSV_Page = tk.Frame(self.canvas4)
self.canvas4.create_window((0,0), window=self.frame_in_canvas_CSV_Page, anchor="nw")
def Saved_Csvs_page(self):
self.frame11 =tk.Frame(self.frame2)
self.frame11.grid(row=0,column=0,sticky="nsew")
self.make_scrollbar_CSV_Page()
self.csv_list_box_function()
self.selection_list = []
def plot_csvs_function(self):
#read all csvs, second column
self.csv_dict={}
print(self.selection_list, "selection list order")
option1= self.reverse_checkbutton_var.get()
if option1 == 1:
option1= True
else:
option1= False
self.selection_list = sorted(self.selection_list, reverse = option1)
for index, the_csv in enumerate(self.selection_list):
self.csv_dict[index]=pd.read_csv("../saved-csvs/"+the_csv+".csv", usecols=[0,1])
print(self.csv_dict.keys())
the_title = self.title_entrybox.get()
xlabel = self.xlabel_entrybox.get()
ylabel = self.ylabel_entrybox.get()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
#hold plot on
plt.title(the_title)
self.rolling_average_decision=self.rolling_average.get()
da_window_size = int(self.RAWS_entry.get())
option2 = self.plot_rolling_only_var.get()
if option2==0:
for thecsv,key in zip(self.selection_list, self.csv_dict.keys()):
plt.plot(self.csv_dict[key].iloc[:,0], self.csv_dict[key].iloc[:,1], label=thecsv)
for thecsv,key in zip(self.selection_list, self.csv_dict.keys()): #So that rolling averages plot after and don't get covered up in the graph
if self.rolling_average_decision ==1:
plt.plot(self.csv_dict[key].iloc[:,0], self.csv_dict[key].iloc[:,1].rolling(window=da_window_size).mean(), label=thecsv)
self.csv_plotting_text_list=self.csv_plotting_text_list_entry.get()
#Threshold axHline
if self.CSV_y_threshold_entry.get() != "":
plt.axhline(y=float(self.CSV_y_threshold_entry.get()), linestyle='--', color ='red', label = self.CSV_y_threshold_graph_label.get())
#Legend
if self.csv_plotting_text_list == "":
pass
else:
split_list = self.csv_plotting_text_list.split(", ")
L = plt.legend()
for index,text in enumerate(split_list):
L.get_texts()[index].set_text(text)
if self.CSV_Twin_y_lim_entry.get() != "":
split_list = self.CSV_Twin_y_lim_entry.get().split(", ")
y_lim_0 = float(split_list[0])
y_lim_1 = float(split_list[1])
plt.ylim(y_lim_0,y_lim_1)
if self.CSV_Twin_x_lim_entry.get() != "":
split_list = self.CSV_Twin_x_lim_entry.get().split(", ")
x_lim_0 = float(split_list[0])
x_lim_1 = float(split_list[1])
plt.xlim(x_lim_0,x_lim_1)
plt.show()
self.Plot_csvs_button = tk.Button(self.frame_in_canvas_CSV_Page, text="Plot", cursor="hand2",command=partial(plot_csvs_function, self))
self.Plot_csvs_button.grid(row=0, column=1, padx=10, pady=10)
self.reverse_checkbutton_var = tk.IntVar()
self.reverse_checkbutton = tk.Checkbutton(self.frame_in_canvas_CSV_Page, var=self.reverse_checkbutton_var, text="Reverse Plot Order?")
self.reverse_checkbutton.grid(row=0,column=2, padx=10,pady=10)
def on_click_listbox(event):
index=self.csv_listbox.curselection()
seltext=self.csv_listbox.get(index)
self.csv_string = seltext
#BACKself.button_saved_run.config(text="Input Last Hovered Saved Run",state="normal", command=partial(insert_run_name, self))
self.csv_listbox.bind('<ButtonRelease-1>', on_click_listbox)
#Rolling Average
self.Empty_Label111 = tk.Label(self.frame_in_canvas_CSV_Page, text="")
self.Empty_Label111.grid(row=7,column=0, pady=10,padx=10)
def Enable_Disable(self):
if self.enabled==0:
self.plot_rolling_only_checkbutton.config(state="normal")
self.RAWS_entry.config(state="normal")
self.RAWS_Legend_Entry.config(state="normal")
self.enabled=1
else:
self.plot_rolling_only_checkbutton.config(state=tk.DISABLED)
self.RAWS_entry.config(state=tk.DISABLED)
self.RAWS_Legend_Entry.config(state=tk.DISABLED)
self.enabled=0
self.enabled=0
SD_font = tkfont.Font(family='Helvetica', size=10, weight="bold")
self.Rolling_Header = tk.Label(self.frame_in_canvas_CSV_Page, text="Rolling Averages", font=SD_font)
self.Rolling_Header.grid(row=8, column=0, padx=10,pady=10)
self.rolling_average = tk.IntVar()
self.rolling_average_checkbox = tk.Checkbutton(self.frame_in_canvas_CSV_Page, var=self.rolling_average, text="Rolling Average?", command=partial(Enable_Disable, self))
self.rolling_average_checkbox.grid(row = 8, column=1, padx=10, pady=10)
self.plot_rolling_only_var = tk.IntVar()
self.plot_rolling_only_checkbutton = tk.Checkbutton(self.frame_in_canvas_CSV_Page, var=self.plot_rolling_only_var, text="Plot Rolling Average Only?", state=tk.DISABLED)
self.plot_rolling_only_checkbutton.grid(row=8,column=2, padx=10,pady=10)
self.RAWS = tk.Label(self.frame_in_canvas_CSV_Page, text="Rolling Average Window Size")
self.RAWS.grid(row=9, column=0, padx=10,pady=10)
self.RAWS_entry = tk.Entry(self.frame_in_canvas_CSV_Page)
self.RAWS_entry.insert(tk.END, 100)
self.RAWS_entry.config(state=tk.DISABLED)
self.RAWS_entry.grid(row=9, column=1, padx=10,pady=10)
self.CSV_Label_Threshold_header = tk.Label(self.frame_in_canvas_CSV_Page, text="Add y threshold")
self.CSV_Label_Threshold_header.grid(row=10, column=0, padx=10,pady=10)
self.CSV_y_threshold_entry = tk.Entry(self.frame_in_canvas_CSV_Page)
self.CSV_y_threshold_entry.grid(row=10, column=1, padx=10, pady=10)
self.CSV_LabeL_Threshold_header_desc = tk.Label(self.frame_in_canvas_CSV_Page, text="Input Token Number")
self.CSV_LabeL_Threshold_header_desc.grid(row=10, column=2, padx=10, pady=10)
self.CSV_Label_Threshold_header2 = tk.Label(self.frame_in_canvas_CSV_Page, text="Threshold Label")
self.CSV_Label_Threshold_header2.grid(row=11, column=0, padx=10,pady=10)
self.CSV_y_threshold_graph_label = tk.Entry(self.frame_in_canvas_CSV_Page)
self.CSV_y_threshold_graph_label.grid(row=11, column=1, padx=10,pady=10)
self.CSV_LabeL_Threshold_header2_desc = tk.Label(self.frame_in_canvas_CSV_Page, text="Input String")
self.CSV_LabeL_Threshold_header2_desc.grid(row=11, column=2, padx=10, pady=10)
self.RAWS_Legend_Label = tk.Label(self.frame_in_canvas_CSV_Page, text="Rolling Average Legend")
self.RAWS_Legend_Label.grid(row=12, column=3, padx=10,pady=10)
self.RAWS_Legend_Entry = tk.Entry(self.frame_in_canvas_CSV_Page)
self.RAWS_Legend_Entry.grid(row=12, column=4, padx=10,pady=10)
self.RAWS_Legend_Entry.config(state=tk.DISABLED)
def select_button_function(self):
self.selection_list.append(self.csv_string)
self.selection_list = list(set(self.selection_list)) #make selection list unique
self.e4 = list(self.csv_listbox.get(0,tk.END))#get all items in listbox
self.update_truth_list()
print(self.truth_list)
self.green_listbox_selection()
#listbox item/using index should bg="green", then a plot button, which reads all the selected csv files.
#store selected items to a list. so need to make a new function which reads multiple csv files, then plots it
def deselect_button_function(self):
self.selection_list.remove(self.csv_string)
self.update_truth_list()
self.green_listbox_selection()
print(self.selection_list)
self.select_button = tk.Button(self.frame_in_canvas_CSV_Page, text="Select", cursor="hand2", command=partial(select_button_function, self))
self.select_button.grid(row=2,column=0, padx=10, pady=10)
self.deselect_button = tk.Button(self.frame_in_canvas_CSV_Page, text="Deselect", cursor="hand2", command=partial(deselect_button_function, self))
self.deselect_button.grid(row=2,column=1,padx=10, pady=10)
self.title_label = tk.Label(self.frame_in_canvas_CSV_Page, text="Plot Title")
self.title_label.grid(column=0, row =3, padx=10,pady=10)
self.title_entrybox = tk.Entry(self.frame_in_canvas_CSV_Page)
self.title_entrybox.grid(column=1, row=3, padx=10,pady=10)
self.xlabel_label = tk.Label(self.frame_in_canvas_CSV_Page, text="X Label")
self.xlabel_label.grid(column=0, row =4, padx=10,pady=10)
self.xlabel_entrybox = tk.Entry(self.frame_in_canvas_CSV_Page)
self.xlabel_entrybox.grid(column=1, row=4, padx=10,pady=10)
self.ylabel_label = tk.Label(self.frame_in_canvas_CSV_Page, text="Y Label")
self.ylabel_label.grid(column=0, row =5, padx=10,pady=10)
self.ylabel_entrybox = tk.Entry(self.frame_in_canvas_CSV_Page)
self.ylabel_entrybox.grid(column=1, row=5, padx=10,pady=10)
#Legend Names
self.csv_plotting_text_list_label = tk.Label(self.frame_in_canvas_CSV_Page, text="Legend Names")
self.csv_plotting_text_list_label.grid(row=6,column=0)
self.csv_plotting_text_list_entry = tk.Entry(self.frame_in_canvas_CSV_Page)
self.csv_plotting_text_list_entry.grid(row=6,column=1)
self.csv_plotting_text_help_label = tk.Label(self.frame_in_canvas_CSV_Page, text="Separate strings by ', '")
self.csv_plotting_text_help_label.grid(row=6,column=2)
def twin_plot_function(self):
#read all csvs, second column
self.csv_dict={}
print(self.selection_list, "selection list order")
#Reverse Plot Order
option1= self.reverse_checkbutton_var.get()
if option1 == 1:
option1= True
else:
option1= False
self.selection_list = sorted(self.selection_list, reverse = option1)
#Save Csv Token Columns to dictionary
for index, the_csv in enumerate(self.selection_list):
self.csv_dict[index]=pd.read_csv("../saved-csvs/"+the_csv+".csv", usecols=[0,1])
print(self.csv_dict.keys())
#Initialise Subplots
fig, ax1 = plt.subplots()
#Get Plot Parameters
the_title = self.title_entrybox.get()
xlabel = self.xlabel_entrybox.get()
ylabel = self.ylabel_entrybox.get()
#Set Plot Parameters
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax1.title.set_text(the_title)
#Plot Twin Y Axes
option2 = self.plot_rolling_only_var.get()
if option2==0:
#for thecsv,key in zip(self.selection_list, self.csv_dict.keys()):
ax2 = ax1.twinx()
ax1.set_zorder(1)
ax2.set_zorder(2)
ax1.patch.set_visible(True)
ax2.patch.set_visible(False)
ax2.grid(b=False)
ax1.plot(self.csv_dict[0].iloc[:,0], self.csv_dict[0].iloc[:,1], label=self.selection_list[0], color='k')
ax2.plot(self.csv_dict[1].iloc[:,0], self.csv_dict[1].iloc[:,1], label=self.selection_list[1], color='tab:blue')
ax2.set_ylabel(self.second_ylabel.get())
if self.CSV_Twin_y_lim_entry.get() != "":
split_list = self.CSV_Twin_y_lim_entry.get().split(", ")
y_lim_0 = float(split_list[0])
y_lim_1 = float(split_list[1])
ax1.set_ylim([y_lim_0,y_lim_1])
if self.CSV_Twin_y_lim_entry2.get() != "":
split_list2 = self.CSV_Twin_y_lim_entry2.get().split(", ")
y_lim2_0 = float(split_list2[0])
y_lim2_1 = float(split_list2[1])
ax2.set_ylim([y_lim2_0,y_lim2_1])
if self.CSV_Twin_x_lim_entry.get() != "":
split_list = self.CSV_Twin_x_lim_entry.get().split(", ")
x_lim_0 = float(split_list[0])
x_lim_1 = float(split_list[1])
ax1.set_xlim([x_lim_0,x_lim_1])
if self.CSV_Twin_x_lim_entry2.get() != "":
split_list2 = self.CSV_Twin_x_lim_entry2.get().split(", ")
x_lim2_0 = float(split_list2[0])
x_lim2_1 = float(split_list2[1])
ax2.set_xlim([x_lim2_0,x_lim2_1])
ax1.tick_params(axis="y", labelcolor='k')
ax2.tick_params(axis="y", labelcolor='tab:blue')
fig.tight_layout()
self.align_ticks = 0
if self.align_ticks == 1:
minresax1 = 5
minresax2 = 5
ax1ylims = ax1.get_ybound()
ax2ylims = ax2.get_ybound()
ax1factor = minresax1 * 6
ax2factor = minresax2 * 6
ax1.set_yticks(np.linspace(ax1ylims[0],
ax1ylims[1]+(ax1factor -
(ax1ylims[1]-ax1ylims[0]) % ax1factor) %
ax1factor,
7))
ax2.set_yticks(np.linspace(ax2ylims[0],
ax2ylims[1]+(ax2factor -
(ax2ylims[1]-ax2ylims[0]) % ax2factor) %
ax2factor,
7))
# #Plot Rolling Average
# for thecsv,key in zip(self.selection_list, self.csv_dict.keys()): #So that rolling averages plot after and don't get covered up in the graph
#Rolling Average
self.rolling_average_decision=self.rolling_average.get()
da_window_size = int(self.RAWS_entry.get())
if self.rolling_average_decision ==1:
ax1.plot(self.csv_dict[0].iloc[:,0], self.csv_dict[0].iloc[:,1].rolling(window=da_window_size).mean(), label=self.RAWS_Legend_Entry.get())
#Threshold axHline
if self.CSV_y_threshold_entry.get() != "":
if self.CSV_Threshold_To_TwinPlot_var:
ax1.axhline(y=float(self.CSV_y_threshold_entry.get()), linestyle='--', color ='red', label = self.CSV_y_threshold_graph_label.get())
else:
ax2.axhline(y=float(self.CSV_y_threshold_entry.get()), linestyle='--', color ='red', label = self.CSV_y_threshold_graph_label.get())
#Twin Legend
self.csv_plotting_text_list=self.csv_plotting_text_list_entry.get()
if self.csv_plotting_text_list == "":
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc=2)
else:
split_list = self.csv_plotting_text_list.split(", ")
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
labels[0] = split_list[0]
labels2[0] = split_list[1]
ax2.legend(lines + lines2, labels + labels2, loc=2)
plt.show()
self.Twin_plot_button = tk.Button(self.frame_in_canvas_CSV_Page, text="Twin Plot", command = partial(twin_plot_function, self))
self.Twin_plot_button.grid(row=0, column=3, padx=10,pady=10)
self.second_ylabel_Label = tk.Label(self.frame_in_canvas_CSV_Page, text="2nd Y Label")
self.second_ylabel_Label.grid(row=1, column=3, padx=10, pady=10)
self.second_ylabel = tk.Entry(self.frame_in_canvas_CSV_Page)
self.second_ylabel.grid(row=1, column=4, padx=10, pady=10)
self.CSV_Twin_y_lim_Label = tk.Label(self.frame_in_canvas_CSV_Page, text="YLimit 1")
self.CSV_Twin_y_lim_Label.grid(row=2, column=3, padx=10, pady=10)
self.CSV_Twin_y_lim_entry = tk.Entry(self.frame_in_canvas_CSV_Page)
self.CSV_Twin_y_lim_entry.grid(row=2, column=4, padx=10, pady=10)
self.CSV_Twin_y_lim_Label2 = tk.Label(self.frame_in_canvas_CSV_Page, text="YLimit 2")
self.CSV_Twin_y_lim_Label2.grid(row=3, column=3, padx=10, pady=10)
self.CSV_Twin_y_lim_entry2=tk.Entry(self.frame_in_canvas_CSV_Page)
self.CSV_Twin_y_lim_entry2.grid(row=3, column=4, padx=10, pady=10)
self.CSV_Twin_x_lim_Label = tk.Label(self.frame_in_canvas_CSV_Page, text="XLimit 1")
self.CSV_Twin_x_lim_Label.grid(row=4, column=3, padx=10, pady=10)
self.CSV_Twin_x_lim_entry = tk.Entry(self.frame_in_canvas_CSV_Page)
self.CSV_Twin_x_lim_entry.grid(row=4, column=4, padx=10, pady=10)
self.CSV_Twin_x_lim_Label2 = tk.Label(self.frame_in_canvas_CSV_Page, text="XLimit 2")
self.CSV_Twin_x_lim_Label2.grid(row=5, column=3, padx=10, pady=10)
self.CSV_Twin_x_lim_entry2=tk.Entry(self.frame_in_canvas_CSV_Page)
self.CSV_Twin_x_lim_entry2.grid(row=5, column=4, padx=10, pady=10)
self.CSV_Threshold_To_TwinPlot_var = tk.BooleanVar()
self.CSV_Threshold_To_TwinPlot_Checkbutton = tk.Checkbutton(self.frame_in_canvas_CSV_Page, text="Add Threshold to Twin Plot?", var=self.CSV_Threshold_To_TwinPlot_var)
self.CSV_Threshold_To_TwinPlot_Checkbutton.grid(row=6, column=3)
def run_sHFPN(self):
self.lb.itemconfig(7, fg="red")
self.Safe_Exit_Required = True
self.Safe_Exit_Now = False
#Save Inputs from GUI
run_save_name = self.HFPN_run_save_name
number_time_steps = int(self.HFPN_number_of_timesteps)
time_step_size = float(self.HFPN_timestep_size)
cholSD = float(self.HFPN_CholSD)
DelaySD = float(self.HFPN_CalciumSD)
#*Get all Mutations*
it_p_LRRK2_mut = self.LRRK2_var.get()
it_p_GBA1 = self.GBA1_var.get()
it_p_VPS35 = self.VPS35_var.get()
it_p_DJ1 = self.DJ1_var.get()
#*Therapeutics*
it_p_NPT200 = self.PD_NPT200_var.get()
it_p_DNL151 = self.PD_DNL151_var.get()
it_p_LAMP2A = self.PD_LAMP2A_var.get()
#Rewrite Place Inputs
self.PD_pn.set_place_tokens(value=it_p_LRRK2_mut, place_id="p_LRRK2_mut")
self.PD_pn.set_place_tokens(value=it_p_GBA1, place_id="p_GBA1")
self.PD_pn.set_place_tokens(value=it_p_VPS35, place_id="p_VPS35")
self.PD_pn.set_place_tokens(value=it_p_DJ1, place_id="p_DJ1")
self.PD_pn.set_place_tokens(value=it_p_NPT200, place_id="p_NPT200")
self.PD_pn.set_place_tokens(value=it_p_DNL151, place_id="p_DNL151")
self.PD_pn.set_place_tokens(value=it_p_LAMP2A, place_id="p_LAMP2A")
#Disable Run HFPN Button
self.button_1.config(state=tk.DISABLED)
self.button_1.config(text="Running Simulation... Please bear with Lag...")
self.PD_pn.set_time_step(time_step = time_step_size) #unit = s/A.U.
## Define places
#Set the Input Stochastic Parameter Values
for index,value in enumerate(self.transitions_entry_box_dict):
str_index = str(index) #stringed number is the key of these dictionaries
SD_value = float(self.transitions_entry_box_dict[str_index].get()) #float because entry box value is initially a string
transition_id = list(self.PD_pn.transitions)[index] #get the transition id (dict key) from a list of all the transitions in this dict.
self.PD_pn.set_1st_stochastic_parameter(SD_value, transition_id)
if self.PD_pn.transitions[transition_id].DiscreteFlag=="yes": #DiscreteFlag flags discrete transitions
Delay_SD_Value = float(self.transitions_entry_box_Discrete_SD[str_index].get())
self.PD_pn.set_2nd_stochastic_parameter(Delay_SD_Value, transition_id)
#debugging Stochastic Parameters
for index,value in enumerate(self.transitions_entry_box_dict):
str_index = str(index)
transition_id = list(self.PD_pn.transitions)[index]
print(self.PD_pn.transitions[transition_id].stochastic_parameters)
# #BEFORE FOR LOOP
# for index,value in enumerate(self.transitions_consumption_checkboxes_dict):
# transition_id = list(self.PD_pn.transitions)[index]
# print(self.PD_pn.transitions[transition_id].collect_rate_analytics, "before For Loop")
#FOR LOOP TO SET COLLECT #Set the Collect Rate Analytics Decisions Consumption
for index,value in enumerate(self.transitions_consumption_checkboxes_dict):
str_index = str(index)
Integer_value = self.consump_checkbox_variables_dict[str_index].get() # 1 means checked, 0 means not.
#print(type(Integer_value))
transition_id = list(self.PD_pn.transitions)[index]
self.PD_pn.set_consumption_collect_decision(Integer_value,transition_id)
#print(self.PD_pn.transitions[transition_id].collect_rate_analytics, "in cons for loop")
#AFTER FOR LOOP
for index,value in enumerate(self.transitions_consumption_checkboxes_dict): #DEBUGGING
transition_id = list(self.PD_pn.transitions)[index]
print(self.PD_pn.transitions[transition_id].collect_rate_analytics, "after cons for loops")
# for index,value in enumerate(self.transitions_consumption_checkboxes_dict): #DEBUGGING
# transition_id = list(self.PD_pn.transitions)[index]
# APPENDED_LIST = []
# self.PD_pn.transitions[transition_id].collect_rate_analytics = APPENDED_LIST
# print(self.PD_pn.transitions[transition_id].collect_rate_analytics, "after cons for loops")
#Set the Collect Rate Analytics Decisions Production
for index,value in enumerate(self.transitions_production_checkboxes_dict):
str_index = str(index)
Integer_value = self.produc_checkbox_variables_dict[str_index].get() # 1 means checked, 0 means not.
transition_id = list(self.PD_pn.transitions)[index]
self.PD_pn.set_production_collect_decision(integer = Integer_value, transition_id=transition_id)
#print(self.PD_pn.transitions[transition_id].collect_rate_analytics, "in prod for loop")
for index,value in enumerate(self.transitions_consumption_checkboxes_dict): #DEBUGGING
transition_id = list(self.PD_pn.transitions)[index]
#print(self.PD_pn.transitions[transition_id].collect_rate_analytics, "after both for loops")
#TESTING ADDED TRANSITION FOR DEBUGGING PURPOSES
# pn.add_transition_with_speed_function(#50
# transition_id = 'testing',
# label = 'debugging purposes',
# input_place_ids = ['p_RTN3_HMW_auto', 'p_RTN3_HMW_dys1', 'p_tau'],
# firing_condition = lambda a: True,
# reaction_speed_function = r_t_RTN3_dys_lyso,
# consumption_coefficients = [1, 0, 0],
# output_place_ids = ['p_RTN3_HMW_dys2', 'p_RTN3_HMW_lyso'],
# production_coefficients = [1, 0],# tune later when data are incorporated
# stochastic_parameters = [SD])
# Run the network
GUI_App = self
start_time = datetime.now()
self.PD_pn.run_many_times(number_runs=number_runs, number_time_steps=number_time_steps, GUI_App=GUI_App)
analysis = Analysis(self.PD_pn)
execution_time = datetime.now()-start_time
print('\n\ntime to execute:', execution_time)
# Save the network
start_time = datetime.now()
print("")
print("Generating Pickle File...")
print("")
print("Starting Time is: ", start_time)
self.button_1.config(text="Generating Pickle File...")
Analysis.store_to_file(analysis, run_save_name)
print("")
print('Network saved to : "' + run_save_name+'.pkl"')
execution_time = datetime.now()-start_time
print('\n\nPickling Time:', execution_time)
self.button_1.config(text="Restart Session to Re-run")
def run_AD_sHFPN(self):
#Save Inputs from GUI
run_save_name = self.AD_HFPN_run_save_name
number_time_steps = int(self.AD_HFPN_number_of_timesteps)
time_step_size = float(self.AD_HFPN_timestep_size)
# cholSD = float(self.AD_HFPN_CholSD)
# DelaySD = float(self.AD_HFPN_CalciumSD)
it_p_ApoE = self.AD_ApoE4_var.get()
it_p_CD33 = self.AD_CD33_var.get()
it_p_age = self.AD_Aged_var.get()
#Rewrite Place Inputs
self.AD_pn.set_place_tokens(value=it_p_ApoE, place_id="p_ApoE") # gene, risk factor in AD
self.AD_pn.set_place_tokens(value=it_p_age, place_id="p_age")
self.AD_pn.set_place_tokens(value=it_p_CD33, place_id='p_CD33') # 80 years old, risk factor in AD for BACE1 activity increase
#Disable Run HFPN Button
self.AD_button_1.config(state=tk.DISABLED)
self.AD_button_1.config(text="Running Simulation... Please bear with Lag...")
# Initialize an empty HFPN #HERE
self.AD_pn.set_time_step(time_step = time_step_size)
#Set the Input Stochastic Parameter Values
for index,value in enumerate(self.transitions_entry_box_dict):
str_index = str(index) #stringed number is the key of these dictionaries
SD_value = float(self.transitions_entry_box_dict[str_index].get()) #float because entry box value is initially a string
transition_id = list(self.AD_pn.transitions)[index] #get the transition id (dict key) from a list of all the transitions in this dict.
self.AD_pn.set_1st_stochastic_parameter(SD_value, transition_id)
if self.AD_pn.transitions[transition_id].DiscreteFlag=="yes": #DiscreteFlag flags discrete transitions
Delay_SD_Value = float(self.transitions_entry_box_Discrete_SD[str_index].get())
self.AD_pn.set_2nd_stochastic_parameter(Delay_SD_Value, transition_id)
#Set the Collect Rate Analytics Decisions Consumption
for index,value in enumerate(self.transitions_consumption_checkboxes_dict):
str_index = str(index)
Integer_value = self.consump_checkbox_variables_dict[str_index].get() # 1 means checked, 0 means not.
transition_id = list(self.AD_pn.transitions)[index]
self.AD_pn.set_consumption_collect_decision(Integer_value,transition_id)
for index,value in enumerate(self.transitions_consumption_checkboxes_dict): #DEBUGGING
transition_id = list(self.AD_pn.transitions)[index]
#Set the Collect Rate Analytics Decisions Production
for index,value in enumerate(self.transitions_production_checkboxes_dict):
str_index = str(index)
Integer_value = self.produc_checkbox_variables_dict[str_index].get() # 1 means checked, 0 means not.
transition_id = list(self.AD_pn.transitions)[index]
self.AD_pn.set_production_collect_decision(integer = Integer_value, transition_id=transition_id)
for index,value in enumerate(self.transitions_consumption_checkboxes_dict): #DEBUGGING
transition_id = list(self.AD_pn.transitions)[index]
GUI_App = self
start_time = datetime.now()
self.AD_pn.run_many_times(number_runs=number_runs, number_time_steps=number_time_steps, GUI_App=GUI_App)
analysis = Analysis(self.AD_pn)
execution_time = datetime.now()-start_time
print('\n\ntime to execute:', execution_time)
# Save the network
start_time = datetime.now()
print("")
print("Generating Pickle File...")
print("")
print("Starting Time is: ", start_time)
self.AD_button_1.config(text="Generating Pickle File...")
Analysis.store_to_file(analysis, run_save_name)
print("")
print('Network saved to : "' + run_save_name+'.pkl"')
execution_time = datetime.now()-start_time
print('\n\nPickling Time:', execution_time)
self.AD_button_1.config(text="Restart Session to Re-run")
def safe_exit(self):
self.root.update()
self.root.destroy()
sys.exit()
def main():
app = sHFPN_GUI_APP()
def on_closing():
if app.Safe_Exit_Required == True:
if messagebox.askokcancel("Quit", "Please Click Safe Exit Button"):
print("")
else:
app.root.destroy()
app.root.protocol("WM_DELETE_WINDOW", on_closing)
app.root.mainloop()
# del app
# gc.collect()
# get_ipython().magic('reset -sf')
print("Test")
def configure_inputs_file(root, main_frame):
e = Entry(main_frame)
e.pack()
button1 = Button(main_frame, text="Enter run_save_name")
button1.config(command=partial(set_input_file, e, button1))
button1.pack()
def set_input_file(e, button1):
input_file_name = e.get()
global run_save_name
run_save_name = input_file_name
print("Input_file_name is now: ", input_file_name)
button1.destroy()
e.destroy()
# self.root.bind("<Control-l>", lambda x: self.hide()) #Unnecessary feature... to be removed
# self.hidden=0
# def hide(self): #to be removed
# if self.hidden == 0:
# self.frame1.destroy()
# self.hidden=1
# elif self.hidden==1:
# self.frame2.destroy()
# self.hidden=0
# self.Left_Sidebar()
# self.Right_Output()
if __name__ == "__main__":
main() |
py | 1a3213d69e0a9aa432dc7a07e835d8b737bb2275 | # coding: utf-8
"""
tosca-sure
TOSCA Simple qUeRy sErvice (SURE). # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import sure_tosca_client
from sure_tosca_client.models.topology_template import TopologyTemplate # noqa: E501
from sure_tosca_client.rest import ApiException
class TestTopologyTemplate(unittest.TestCase):
"""TopologyTemplate unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTopologyTemplate(self):
"""Test TopologyTemplate"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.topology_template.TopologyTemplate() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a3214f781b891fcd47dee855b396d1fe1a7adfd | from airflow import DAG
from airflow.models import Variable
from airflow.models import BaseOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.sensors import BaseSensorOperator
from airflow.contrib.hooks import SSHHook
from airflow.hooks.http_hook import HttpHook
from airflow.operators import FileInfoSensor
from airflow.operators import LSFSubmitOperator, LSFJobSensor, LSFOperator
from airflow.operators.slack_operator import SlackAPIPostOperator
from airflow.operators import SlackAPIUploadFileOperator
from airflow.operators import Ctffind4DataSensor
from airflow.operators import MotionCor2DataSensor
from airflow.operators import FeiEpuOperator
from airflow.operators import FeiEpu2InfluxOperator, LSFJob2InfluxOperator, GenericInfluxOperator
from airflow.operators import LogbookConfigurationSensor, LogbookRegisterFileOperator, LogbookRegisterRunParamsOperator, LogbookCreateRunOperator
from airflow.exceptions import AirflowException, AirflowSkipException, AirflowSensorTimeout
import os
from datetime import datetime, timedelta
import logging
LOG = logging.getLogger(__name__)
args = {
'owner': 'yee',
'provide_context': True,
'start_date': datetime( 2018,1,1 ),
'ssh_connection_id': 'ssh_docker_host-dev',
'logbook_connection_id': 'cryoem_logbook',
'influx_host': 'influxdb01.slac.stanford.edu',
'queue_name': 'cryoem-daq',
'bsub': '/afs/slac/package/lsf/test/bin/bsub',
'bjobs': '/afs/slac/package/lsf/test/bin/bjobs',
'bkill': '/afs/slac/package/lsf/test/bin/bkill',
'convert_gainref': True,
'apply_gainref': True,
'raw_gainref': True,
'daq_software': '__imaging_software__',
'max_active_runs': 12,
'particle_size': 150,
# 'create_run': False
# 'apix': 1.35,
# 'fmdose': 1.75,
#'superres': 0,
# 'imaging_format': '.tif',
}
lsf_env = {
'LSB_JOB_REPORT_MAIL': 'N',
'MODULEPATH': '/afs/slac.stanford.edu/package/spack/share/spack/modules/linux-centos7-x86_64:/afs/slac/package/singularity/modules'
}
software = {
'imod': { 'version': '4.9.7', 'module': 'imod-4.9.7-intel-17.0.4-2kdesbi' },
'eman2': { 'version': 'develop', 'module': 'eman2-develop-gcc-4.9.4-e5ufzef' },
'ctffind4': { 'version': '4.1.10', 'module': 'ctffind4-4.1.10-intel-17.0.4-rhn26cm' },
'motioncor2': { 'version': '1.1.0', 'module': 'motioncor2-1.1.0-gcc-4.8.5-zhoi3ww' },
'dogpicker': { 'version': '0.2.1', 'module': 'dogpicker-0.2.1-gcc-4.8.5-nqj6spe' },
'relion': { 'version': '3.0b2', 'module': '[email protected]' }
}
def uploadExperimentalParameters2Logbook(ds, **kwargs):
"""Push the parameter key-value pairs to the elogbook"""
data = kwargs['ti'].xcom_pull( task_ids='parse_parameters' )
LOG.warn("data: %s" % (data,))
raise AirflowSkipException('not yet implemented')
class NotYetImplementedOperator(DummyOperator):
ui_color = '#d3d3d3'
###
# define the workflow
###
with DAG( os.path.splitext(os.path.basename(__file__))[0],
description="Pre-processing of CryoEM data",
schedule_interval=None,
default_args=args,
catchup=False,
max_active_runs=args['max_active_runs'],
concurrency=72,
dagrun_timeout=1800,
) as dag:
# hook to container host for lsf commands
hook = SSHHook(conn_id=args['ssh_connection_id'])
logbook_hook = HttpHook( http_conn_id=args['logbook_connection_id'], method='GET' )
###
# parse the epu xml metadata file
###
if args['daq_software'] == 'EPU':
parameter_file = FileInfoSensor( task_id='parameter_file',
filepath="{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}.xml",
recursive=True,
poke_interval=1,
)
parse_parameters = FeiEpuOperator(task_id='parse_parameters',
filepath="{{ ti.xcom_pull( task_ids='parameter_file' )[0] }}",
)
# upload to the logbook
logbook_parameters = PythonOperator(task_id='logbook_parameters',
python_callable=uploadExperimentalParameters2Logbook,
op_kwargs={}
)
influx_parameters = FeiEpu2InfluxOperator( task_id='influx_parameters',
xcom_task_id='parse_parameters',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
)
###
# get the summed jpg
###
if args['daq_software'] == 'SerialEM':
sum = LSFOperator( task_id='sum',
ssh_hook=hook,
env=lsf_env,
bsub=args['bsub'],
retries=2,
retry_delay=timedelta(seconds=1),
lsf_script="""#!/bin/bash -l
#BSUB -o {{ dag_run.conf['directory'] }}/summed/imod/{{ params.software.imod.version }}/{{ dag_run.conf['base'] }}_avg_gainrefd.job
{% if params.convert_gainref %}#BSUB -w done({{ ti.xcom_pull( task_ids='convert_gainref' )['jobid'] }}){% endif %}
#BSUB -W 5
#BSUB -We 1
#BSUB -n 1
mkdir -p {{ dag_run.conf['directory'] }}/summed/imod/{{ params.software.imod.version }}/
module load {{ params.software.imod.module }}
cd -- "$( dirname {{ ti.xcom_pull( task_ids='stack_file' )[-1] }} )"
avgstack > {{ dag_run.conf['directory'] }}/summed/imod/{{ params.software.imod.version }}/{{ dag_run.conf['base'] }}_avg_gainrefd.log <<-'__AVGSTACK_EOF__'
{{ ti.xcom_pull( task_ids='stack_file' )[-1] }}
{%- if params.apply_gainref %}
/tmp/{{ dag_run.conf['base'] }}_avg.mrcs
/
__AVGSTACK_EOF__
# apply gainref
#newstack \
# {{ ti.xcom_pull( task_ids='gainref_file')[0] | replace( '.dm4', '.mrc' ) }} \
# /tmp/{{ dag_run.conf['base'] }}_gainref.mrc
#clip mult -n 16 \
# /tmp/{{ dag_run.conf['base'] }}_avg.mrcs \
# /tmp/{{ dag_run.conf['base'] }}_gainref.mrc \
# /tmp/{{ dag_run.conf['base'] }}_avg_gainrefd.mrc
clip mult -n 16 \
/tmp/{{ dag_run.conf['base'] }}_avg.mrcs \
{% if params.raw_gainref %}{{ dag_run.conf['directory'] }}/raw/GainRefs/gain-ref.mrc{% else %}{{ ti.xcom_pull( task_ids='gainref_file')[0] | replace( '.dm4', '.mrc' ) }}{% endif %} \
/tmp/{{ dag_run.conf['base'] }}_avg_gainrefd.mrc
{%- else %}
/tmp/{{ dag_run.conf['base'] }}_avg_gainrefd.mrc
/
__AVGSTACK_EOF__
{% endif %}
module load {{ params.software.eman2.module }}
export PYTHON_EGG_CACHE='/tmp'
{%- set imaging_format = params.imaging_format if params.imaging_format else dag_run.conf['imaging_format'] %}
e2proc2d.py \
/tmp/{{ dag_run.conf['base'] }}_avg_gainrefd.mrc \
{{ ti.xcom_pull( task_ids='stack_file' )[-1] | replace( imaging_format, '.jpg' ) }} \
--process filter.lowpass.gauss:cutoff_freq=0.05
# copy files
cp -f /tmp/{{ dag_run.conf['base'] }}_avg_gainrefd.mrc {{ dag_run.conf['directory'] }}/summed/imod/{{ params.software.imod.version }}/{{ dag_run.conf['base'] }}_avg_gainrefd.mrc
{%- if params.apply_gainref %}
rm -f /tmp/{{ dag_run.conf['base'] }}_avg.mrcs
# /tmp/{{ dag_run.conf['base'] }}_gainref.mrc
{% endif %}
rm -f /tmp/{{ dag_run.conf['base'] }}_avg_gainrefd.mrc
""",
params={
'apply_gainref': args['apply_gainref'],
'convert_gainref': args['convert_gainref'],
'software': software,
'imaging_format': args['imaging_format'] if 'imaging_format' in args else None,
'raw_gainref': args['raw_gainref'] if 'raw_gainref' in args else None,
}
)
influx_sum = LSFJob2InfluxOperator( task_id='influx_sum',
job_name='sum',
xcom_task_id='sum',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
)
summed_file = FileInfoSensor( task_id='summed_file',
filepath="{% if params.daq_software == 'SerialEM' %}{{ dag_run.conf['directory'] }}/summed/imod/{{ params.software.imod.version }}/{{ dag_run.conf['base'] }}_avg_gainrefd.mrc{% else %}{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}.mrc{% endif %}",
params={
'daq_software': args['daq_software'],
'software': software,
},
recursive=True,
poke_interval=1,
)
logbook_summed_file = LogbookRegisterFileOperator( task_id='logbook_summed_file',
file_info='summed_file',
http_hook=logbook_hook,
experiment="{{ dag_run.conf['experiment'].split('_')[0] }}",
run="{{ dag_run.conf['base'] }}"
)
summed_preview = FileInfoSensor( task_id='summed_preview',
filepath="{% set imaging_format = params.imaging_format if params.imaging_format else dag_run.conf['imaging_format'] %}{% if params.daq_software == 'SerialEM' %}{{ ti.xcom_pull( task_ids='stack_file' )[-1] | replace( imaging_format, '.jpg' ) }}{% else %}{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}.jpg{% endif %}",
params={
'daq_software': args['daq_software'],
'imaging_format': args['imaging_format'] if 'imaging_format' in args else None,
},
recursive=True,
poke_interval=1,
)
ctffind_summed = LSFSubmitOperator( task_id='ctffind_summed',
ssh_hook=hook,
env=lsf_env,
bsub=args['bsub'],
retries=2,
retry_delay=timedelta(seconds=1),
lsf_script="""#!/bin/bash -l
#BSUB -o {{ dag_run.conf['directory'] }}/summed/{% if params.daq_software == 'SerialEM' %}imod/{{ params.software.imod.version }}/{% endif %}ctffind4/{{ params.software.ctffind4.version }}/{{ dag_run.conf['base'] }}_ctf.job
#BSUB -W 6
#BSUB -We 3
#BSUB -n 1
module load {{ params.software.ctffind4.module }}
mkdir -p {{ dag_run.conf['directory'] }}/summed/{% if params.daq_software == 'SerialEM' %}imod/{{ params.software.imod.version }}/{% endif %}ctffind4/{{ params.software.ctffind4.version }}/
cd {{ dag_run.conf['directory'] }}/summed/{% if params.daq_software == 'SerialEM' %}imod/{{ params.software.imod.version }}/{% endif %}ctffind4/{{ params.software.ctffind4.version }}/
ctffind > {{ dag_run.conf['base'] }}_ctf.log <<-'__CTFFIND_EOF__'
{{ ti.xcom_pull( task_ids='summed_file' )[0] }}
{{ dag_run.conf['base'] }}_ctf.mrc
{% set superres = params.superres %}{% if superres == None and 'superres' in dag_run.conf %}{% set superres = dag_run.conf['superres'] in ( '1', 1, 'y' ) %}{% endif %}{% if superres %}{% if params.apix %}{{ params.apix | float / 2 }}{% else %}{{ dag_run.conf['apix'] | float / 2 }}{% endif %}{% else %}{% if params.apix %}{{ params.apix }}{% else %}{{ dag_run.conf['apix'] }}{% endif %}{% endif %}
{{ dag_run.conf['keV'] }}
{{ dag_run.conf['cs'] or 2.7 }}
0.1
512
30
4
1000
50000
200
no
no
yes
100
{% if 'phase_plate' in dag_run.conf and dag_run.conf['phase_plate'] %}yes
0
1.571
0.1
{%- else %}no{% endif %}
no
__CTFFIND_EOF__
""",
params={
'daq_software': args['daq_software'],
'apix': args['apix'] if 'apix' in args else None,
'superres': args['superres'] if 'superres' in args else None,
'software': software,
}
)
convert_summed_ctf_preview = LSFOperator( task_id='convert_summed_ctf_preview',
ssh_hook=hook,
bsub=args['bsub'],
bjobs=args['bjobs'],
env=lsf_env,
poke_interval=1,
retries=2,
retry_delay=timedelta(seconds=1),
lsf_script="""#!/bin/bash -l
#BSUB -o {{ dag_run.conf['directory'] }}/summed/{% if params.daq_software == 'SerialEM' %}imod/{{ params.software.imod.version }}/{% endif %}ctffind4/4.1.10/{{ dag_run.conf['base'] }}_ctf_preview.job
#BSUB -w "done({{ ti.xcom_pull( task_ids='ctffind_summed' )['jobid'] }})"
#BSUB -W 5
#BSUB -We 1
#BSUB -n 1
module load {{ params.software.eman2.module }}
export PYTHON_EGG_CACHE='/tmp'
cd {{ dag_run.conf['directory'] }}/summed/{% if params.daq_software == 'SerialEM' %}imod/{{ params.software.imod.version }}/{% endif %}ctffind4/{{ params.software.ctffind4.version }}/
e2proc2d.py --writejunk \
{{ dag_run.conf['base'] }}_ctf.mrc \
{{ dag_run.conf['base'] }}_ctf.jpg
""",
params={
'daq_software': args['daq_software'],
'software': software,
},
)
influx_summed_preview = LSFJob2InfluxOperator( task_id='influx_summed_preview',
job_name='summed_preview',
xcom_task_id='convert_summed_ctf_preview',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
)
ctf_summed = LSFJobSensor( task_id='ctf_summed',
ssh_hook=hook,
jobid="{{ ti.xcom_pull( task_ids='ctffind_summed' )['jobid'] }}",
retries=2,
retry_delay=timedelta(seconds=1),
poke_interval=1,
)
influx_summed_ctf = LSFJob2InfluxOperator( task_id='influx_summed_ctf',
job_name='ctf_summed',
xcom_task_id='ctf_summed',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
)
summed_ctf_preview = FileInfoSensor( task_id='summed_ctf_preview',
filepath="{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}_ctf.jpg",
recursive=True,
poke_interval=1,
)
summed_ctf_file = FileInfoSensor( task_id='summed_ctf_file',
filepath="{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}_ctf.mrc",
recursive=True,
poke_interval=1,
)
logbook_summed_ctf_file= LogbookRegisterFileOperator( task_id='logbook_summed_ctf_file',
file_info='summed_ctf_file',
http_hook=logbook_hook,
experiment="{{ dag_run.conf['experiment'].split('_')[0] }}",
run="{{ dag_run.conf['base'] }}"
)
summed_ctf_data = Ctffind4DataSensor( task_id='summed_ctf_data',
filepath="{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}_ctf.txt",
recursive=True,
poke_interval=1,
)
influx_summed_ctf_data = GenericInfluxOperator( task_id='influx_summed_ctf_data',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
measurement="cryoem_data",
dt="{{ ti.xcom_pull( task_ids='stack_file' )[-1] }}",
tags={
'app': 'ctffind',
'version': software['ctffind4']['version'],
'state': 'unaligned',
'microscope': "{{ dag_run.conf['microscope'] }}",
},
tags2="{{ ti.xcom_pull( task_ids='summed_ctf_data', key='context' ) }}",
fields="{{ ti.xcom_pull( task_ids='summed_ctf_data' ) }}",
)
resubmit_ctffind_summed = BashOperator( task_id='resubmit_ctffind_summed',
trigger_rule='one_failed',
bash_command="""
airflow clear -t ctffind_summed -c -d -s {{ ts }} -e {{ ts }} {{ dag | replace( '<DAG: ', '' ) | replace( '>', '' ) }} &
( sleep 10; airflow clear -t resubmit_ctffind_summed -c -d -s {{ ts }} -e {{ ts }} {{ dag | replace( '<DAG: ', '' ) | replace( '>', '' ) }} ) &
"""
)
#clear_resubmit_ctffind_summed = BashOperator( task_id='clear_resubmit_ctffind_summed',
# bash_command="""
# airflow clear -t resubmit_ctffind_summed -c -d -s {{ ts }} -e {{ ts }} {{ dag | replace( '<DAG: ', '' ) | replace( '>', '' ) }}
# """
#)
convert_summed_ctf_preview >> resubmit_ctffind_summed
ctf_summed >> resubmit_ctffind_summed
#ctffind_summed >> clear_resubmit_ctffind_summed
###
#
###
stack_file = FileInfoSensor( task_id='stack_file',
filepath="{% set imaging_format = params.imaging_format if params.imaging_format else dag_run.conf['imaging_format'] %}{{ dag_run.conf['directory'] }}/raw/**/{{ dag_run.conf['base'] }}{% if imaging_format == '.mrc' %}-*.mrc{% elif imaging_format == '.tif' %}*.tif{% endif %}",
params={
'imaging_format': args['imaging_format'] if 'imaging_format' in args else None,
},
recursive=True,
excludes=['gain-ref',],
poke_interval=1,
)
logbook_stack_file = LogbookRegisterFileOperator( task_id='logbook_stack_file',
file_info='stack_file',
http_hook=logbook_hook,
experiment="{{ dag_run.conf['experiment'].split('_')[0] }}",
run="{{ dag_run.conf['base'] }}"
)
if args['convert_gainref']:
gainref_file = FileInfoSensor( task_id='gainref_file',
filepath="{% set superres = params.superres %}{% if superres == None and 'superres' in dag_run.conf %}{% set superres = dag_run.conf['superres'] in ( '1', 1, 'y' ) %}{% endif %}{{ dag_run.conf['directory'] }}{% if params.raw_gainref %}/raw/GainRefs/*x1.m{% if superres %}3{% else %}2{% endif %}*.dm4{% else %}/**/{% if params.daq_software == 'SerialEM' %}{% if superres %}Super{% else %}Count{% endif %}Ref*.dm4{% else %}{{ dag_run.conf['base'] }}-gain-ref.dm4{% endif %}{% endif %}",
params={
'daq_software': args['daq_software'],
'superres': args['superres'] if 'superres' in args else None,
'raw_gainref': args['raw_gainref'] if 'raw_gainref' in args else None,
},
recursive=True,
poke_interval=1,
)
logbook_gainref_file = LogbookRegisterFileOperator( task_id='logbook_gainref_file',
file_info='gainref_file',
http_hook=logbook_hook,
experiment="{{ dag_run.conf['experiment'].split('_')[0] }}",
run="{{ dag_run.conf['base'] }}"
)
####
# convert gain ref to mrc
####
convert_gainref = LSFSubmitOperator( task_id='convert_gainref',
ssh_hook=hook,
env=lsf_env,
bsub=args['bsub'],
lsf_script="""#!/bin/bash -l
#BSUB -o {{ ti.xcom_pull( task_ids='gainref_file' )[0].replace(' ','_').replace( '.dm4', '.job' ) }}
#BSUB -W 3
#BSUB -We 1
#BSUB -n 1
{% if params.raw_gainref %}{% set gainref_file=dag_run.conf['directory']+"/raw/GainRefs/gain-ref.mrc" %}{% else %}{% set gainref_file=ti.xcom_pull( task_ids='gainref_file' )[0].replace( '.dm4', '.mrc' ) %}{% endif %}
# {{ gainref_file }}
#if [ -e {{ ti.xcom_pull( task_ids='gainref_file' )[0] }} ]; then
if [ -e '{{ gainref_file }}' ]; then
echo "gainref file {{ gainref_file }} already exists"
else
{% if params.raw_gainref %}
# dm2mrc {{ ti.xcom_pull( task_ids='gainref_file' )[0].replace(' ','\ ').replace('[','\[').replace(']','\]') }} {{ gainref_file }}
module load {{ params.software.eman2.module }}
export PYTHON_EGG_CACHE='/tmp'
e2proc2d.py {% if params.rotate_gainref > 0 %}--rotate {{ params.rotate_gainref }}{% endif %}{{ ti.xcom_pull( task_ids='gainref_file' )[0].replace(' ','\ ').replace('[','\[').replace(']','\]') }} {{ gainref_file }} --inplace
{% else %}
module load {{ params.software.eman2.module }}
export PYTHON_EGG_CACHE='/tmp'
cd -- "$( dirname {{ ti.xcom_pull( task_ids='gainref_file' )[0] }} )"
echo e2proc2d.py {% if params.rotate_gainref > 0 %}--rotate {{ params.rotate_gainref }}{% endif %}{{ ti.xcom_pull( task_ids='gainref_file' )[0] }} {{ ti.xcom_pull( task_ids='gainref_file' )[0] | replace( '.dm4', '.mrc' ) }}
e2proc2d.py {% if params.rotate_gainref > 0 %}--rotate {{ params.rotate_gainref }}{% endif %}{{ ti.xcom_pull( task_ids='gainref_file' )[0] }} {{ ti.xcom_pull( task_ids='gainref_file' )[0] | replace( '.dm4', '.mrc' ) }} --inplace
{% endif %}
fi
""",
params={
'daq_software': args['daq_software'],
'rotate_gainref': 0,
'software': software,
'raw_gainref': args['raw_gainref'] if 'raw_gainref' in args else None,
}
)
new_gainref = LSFJobSensor( task_id='new_gainref',
ssh_hook=hook,
jobid="{{ ti.xcom_pull( task_ids='convert_gainref' )['jobid'] }}",
poke_interval=1,
)
influx_new_gainref = LSFJob2InfluxOperator( task_id='influx_new_gainref',
job_name='convert_gainref',
xcom_task_id='new_gainref',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
)
if args['apply_gainref']:
new_gainref_file = FileInfoSensor( task_id='new_gainref_file',
filepath="{% if params.raw_gainref or not params.convert_gainref %}{{ dag_run.conf['directory'] }}/**/gain-ref.mrc{% else %}{{ dag_run.conf['directory'] }}/**/{% if params.daq_software == 'SerialEM' %}{% set superres = params.superres %}{% if superres == None and 'superres' in dag_run.conf %}{% set superres = dag_run.conf['superres'] in ( '1', 1, 'y' ) %}{% endif %}{% if superres %}Super{% else %}Count{% endif %}Ref*.mrc{% else %}{{ dag_run.conf['base'] }}-gain-ref.mrc{% endif %}{% endif %}",
recursive=True,
params={
'daq_software': args['daq_software'],
'convert_gainref': args['convert_gainref'],
'superres': args['superres'] if 'superres' in args else None,
'raw_gainref': args['raw_gainref'] if 'raw_gainref' in args else None,
},
poke_interval=1,
)
###
# align the frame
###
motioncorr_stack = LSFSubmitOperator( task_id='motioncorr_stack',
ssh_hook=hook,
env=lsf_env,
bsub=args['bsub'],
retries=2,
retry_delay=timedelta(seconds=1),
lsf_script="""#!/bin/bash -l
#BSUB -o {{ dag_run.conf['directory'] }}/aligned/motioncor2/1.1.0/{{ dag_run.conf['base'] }}_aligned.job
{% if params.convert_gainref %}#BSUB -w "done({{ ti.xcom_pull( task_ids='convert_gainref' )['jobid'] }})"{% endif %}
#BSUB -gpu "num=1"
#BSUB -W 15
#BSUB -We 7
#BSUB -n 1
module load {{ params.software.motioncor2.module }}
mkdir -p {{ dag_run.conf['directory'] }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/
cd {{ dag_run.conf['directory'] }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/
{% set superres = params.superres %}{% if superres == None and 'superres' in dag_run.conf %}{% set superres = dag_run.conf['superres'] in ( '1', 1, 'y' ) %}{% endif %}
MotionCor2 \
-In{% if params.imaging_format == '.mrc' or dag_run.conf['imaging_format'] == '.mrc' %}Mrc{% elif params.imaging_format == '.tif' or dag_run.conf['imaging_format'] == '.tif' %}Tiff{% endif %} {{ ti.xcom_pull( task_ids='stack_file' )[-1] }} \
{% if params.apply_gainref %}{% if params.convert_gainref %} -Gain {% if params.raw_gainref %}{{ dag_run.conf['directory'] }}/raw/GainRefs/gain-ref.mrc{% else %}{{ ti.xcom_pull( task_ids='gainref_file' )[0] | replace( '.dm4', '.mrc' ) }}{% endif %} {% else %} -Gain {{ ti.xcom_pull( task_ids='new_gainref_file' )[-1] }} {% endif %}{% endif -%}\
-OutMrc {{ dag_run.conf['base'] }}_aligned.mrc \
-LogFile {{ dag_run.conf['base'] }}_aligned.log \
-kV {{ dag_run.conf['keV'] }} \
-FmDose {% if params.fmdose %}{{ params.fmdose }}{% else %}{{ dag_run.conf['fmdose'] }}{% endif %} \
-Bft {% if 'preprocess/align/motioncor2/bft' in dag_run.conf %}{{ dag_run.conf['preprocess/align/motioncor2/bft'] }}{% else %}150{% endif %} \
-PixSize {% if superres %}{% if params.apix %}{{ params.apix | float / 2 }}{% else %}{{ dag_run.conf['apix'] | float / 2 }}{% endif %}{% else %}{% if params.apix %}{{ params.apix }}{% else %}{{ dag_run.conf['apix'] }}{% endif %}{% endif %} \
-FtBin {% if superres %}2{% else %}1{% endif %} \
-Patch {% if 'preprocess/align/motioncor2/patch' in dag_run.conf %}{{ dag_run.conf['preprocess/align/motioncor2/patch'] }}{% else %}5 5{% endif %} \
-Throw {% if 'preprocess/align/motioncor2/throw' in dag_run.conf %}{{ dag_run.conf['preprocess/align/motioncor2/throw'] }}{% else %}0{% endif %} \
-Trunc {% if 'preprocess/align/motioncor2/trunc' in dag_run.conf %}{{ dag_run.conf['preprocess/align/motioncor2/trunc'] }}{% else %}0{% endif %} \
-Iter {% if 'preprocess/align/motioncor2/iter' in dag_run.conf %}{{ dag_run.conf['preprocess/align/motioncor2/iter'] }}{% else %}10{% endif %} \
-Tol {% if 'preprocess/align/motioncor2/tol' in dag_run.conf %}{{ dag_run.conf['preprocess/align/motioncor2/tol'] }}{% else %}0.5{% endif %} \
-OutStack {% if 'preprocess/align/motioncor2/outstack' in dag_run.conf %}{{ dag_run.conf['preprocess/align/motioncor2/outstack'] }}{% else %}0{% endif %} \
-InFmMotion {% if 'preprocess/align/motioncor2/infmmotion' in dag_run.conf %}{{ dag_run.conf['preprocess/align/motioncor2/infmmotion'] }}{% else %}1{% endif %} \
-Gpu {{ params.gpu }}
""",
params={
'gpu': 0,
'apply_gainref': args['apply_gainref'],
'convert_gainref': args['convert_gainref'],
'apix': args['apix'] if 'apix' in args else None,
'fmdose': args['fmdose'] if 'fmdose' in args else None,
'superres': args['superres'] if 'superres' in args else None,
'software': software,
'imaging_format': args['imaging_format'] if 'imaging_format' in args else None,
'raw_gainref': args['raw_gainref'] if 'raw_gainref' in args else None,
},
)
align = LSFJobSensor( task_id='align',
ssh_hook=hook,
jobid="{{ ti.xcom_pull( task_ids='motioncorr_stack' )['jobid'] }}",
poke_interval=5,
)
influx_aligned = LSFJob2InfluxOperator( task_id='influx_aligned',
job_name='align_stack',
xcom_task_id='align',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
)
drift_data = MotionCor2DataSensor( task_id='drift_data',
filepath="{{ dag_run.conf['directory'] }}/aligned/motioncor2/1.1.0/{{ dag_run.conf['base'] }}_aligned.log0-*Full.log",
poke_interval=5,
timeout=30,
)
influx_drift_data = GenericInfluxOperator( task_id='influx_drift_data',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
measurement="cryoem_data",
dt="{{ ti.xcom_pull( task_ids='stack_file' )[-1] }}",
tags={
'app': 'motioncor2',
'version': software['motioncor2']['version'],
'state': 'aligned',
'microscope': "{{ dag_run.conf['microscope'] }}",
},
fields="{{ ti.xcom_pull( task_ids='drift_data' ) }}",
)
# if args['output_aligned_movie_stack']:
# aligned_stack_file = FileInfoSensor( task_id='aligned_stack_file',
# filepath="{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}_aligned_Stk.mrc",
# recursive=True,
# poke_interval=5,
# )
convert_aligned_preview = LSFOperator( task_id='convert_aligned_preview',
ssh_hook=hook,
env=lsf_env,
bsub=args['bsub'],
retries=2,
retry_delay=timedelta(seconds=1),
poke_interval=1,
lsf_script="""#!/bin/bash -l
#BSUB -o {{ dag_run.conf['directory'] }}/aligned/motioncor2/1.1.0/{{ dag_run.conf['base'] }}_aligned_preview.job
#BSUB -w "done({{ ti.xcom_pull( task_ids='motioncorr_stack' )['jobid'] }})"
#BSUB -W 10
#BSUB -We 2
#BSUB -n 1
module load {{ params.software.eman2.module }}
export PYTHON_EGG_CACHE='/tmp'
cd {{ dag_run.conf['directory'] }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/
e2proc2d.py \
{{ dag_run.conf['base'] }}_aligned.mrc \
{{ dag_run.conf['base'] }}_aligned.jpg \
--process filter.lowpass.gauss:cutoff_freq=0.05
e2proc2d.py \
{{ dag_run.conf['base'] }}_aligned_DW.mrc \
{{ dag_run.conf['base'] }}_aligned_DW.jpg \
--process filter.lowpass.gauss:cutoff_freq=0.05
""",
params={
'software': software,
}
)
influx_aligned_preview = LSFJob2InfluxOperator( task_id='influx_aligned_preview',
job_name='aligned_preview',
xcom_task_id='convert_aligned_preview',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
)
aligned_file = FileInfoSensor( task_id='aligned_file',
filepath="{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}_aligned_DW.mrc",
recursive=True,
poke_interval=1,
)
logbook_aligned_file = LogbookRegisterFileOperator( task_id='logbook_aligned_file',
file_info='aligned_file',
http_hook=logbook_hook,
experiment="{{ dag_run.conf['experiment'].split('_')[0] }}",
run="{{ dag_run.conf['base'] }}"
)
aligned_preview = FileInfoSensor( task_id='aligned_preview',
filepath="{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}_aligned.jpg",
recursive=True,
poke_interval=1,
)
particle_pick = LSFOperator( task_id='particle_pick',
ssh_hook=hook,
env=lsf_env,
bsub=args['bsub'],
poke_interval=1,
lsf_script="""#!/bin/bash -l
#BSUB -o {{ dag_run.conf['directory'] }}/particles/relion-autopick/{{ params.software.relion.version }}/{{ dag_run.conf['base'] }}_aligned_particle-pick.job
#BSUB -w "done({{ ti.xcom_pull( task_ids='motioncorr_stack' )['jobid'] }})"
#BSUB -W 10
#BSUB -We 5
#BSUB -n 1
module load {{ params.software.relion.module }}
mkdir -p {{ dag_run.conf['directory'] }}/particles/relion-autopick/{{ params.software.relion.version }}/
cd {{ dag_run.conf['directory'] }}
# run autopick
{% set particle_size = params.particle_size %}{% if 'particle_size' in dag_run.conf %}{% set particle_size = dag_run.conf['particle_size'] | float %}{% endif %}
relion_autopick --i "./aligned/motioncor2/{{ params.software.motioncor2.version }}/{{ dag_run.conf['base'] }}_aligned_DW.mrc" --odir particles/relion-autopick/{{ params.software.relion.version }}/ --pickname autopick --LoG --LoG_diam_min {{ particle_size * 0.8 }} --LoG_diam_max {{ particle_size * 1.2 }} --angpix {% if params.apix %}{{ params.apix }}{% else %}{{ dag_run.conf['apix'] }}{% endif %} --shrink 0 --lowpass 15 --LoG_adjust_threshold -0.1
""",
params={
'software': software,
'apix': args['apix'] if 'apix' in args else None,
'particle_size': args['particle_size'],
}
)
influx_particle_pick = LSFJob2InfluxOperator( task_id='influx_particle_pick',
job_name='particle_pick',
xcom_task_id='particle_pick',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
)
particle_pick_data = BashOperator( task_id='particle_pick_data',
xcom_push=True,
bash_command="""
cat {{ dag_run.conf['directory'] }}/particles/relion-autopick/{{ params.software.relion.version }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/{{ dag_run.conf['base'] }}_aligned_DW_autopick.star | grep -vE '(_|\#|^ $)' | wc -l
""",
params={
'software': software,
}
)
ctffind_aligned = LSFSubmitOperator( task_id='ctffind_aligned',
# beware we do not use aligned_file's xcom as it would not have completed yet
ssh_hook=hook,
env=lsf_env,
bsub=args['bsub'],
retries=2,
retry_delay=timedelta(seconds=1),
lsf_script="""#!/bin/bash -l
#BSUB -o {{ dag_run.conf['directory'] }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/ctffind4/{{ params.software.ctffind4.version }}/{{ dag_run.conf['base'] }}_aligned_ctf.job
{% if True %}#BSUB -w "done({{ ti.xcom_pull( task_ids='motioncorr_stack' )['jobid'] }})"{% endif %}
#BSUB -W 3
#BSUB -We 1
#BSUB -n 1
module load ctffind4-4.1.10-intel-17.0.4-rhn26cm
mkdir -p {{ dag_run.conf['directory'] }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/ctffind4/{{ params.software.ctffind4.version }}/
cd {{ dag_run.conf['directory'] }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/ctffind4/{{ params.software.ctffind4.version }}/
ctffind > {{ dag_run.conf['base'] }}_aligned_ctf.log <<-'__CTFFIND_EOF__'
{{ dag_run.conf['directory'] }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/{{ dag_run.conf['base'] }}_aligned.mrc
{{ dag_run.conf['base'] }}_aligned_ctf.mrc
{% if params.apix %}{{ params.apix }}{% else %}{{ dag_run.conf['apix'] }}{% endif %}
{{ dag_run.conf['keV'] }}
{{ dag_run.conf['cs'] or 2.7 }}
0.1
512
30
4
1000
50000
200
no
no
yes
100
{% if 'phase_plate' in dag_run.conf and dag_run.conf['phase_plate'] %}yes
0
1.571
0.1
{%- else %}no{% endif %}
no
__CTFFIND_EOF__
""",
params={
'apix': args['apix'] if 'apix' in args else None,
'software': software,
}
)
convert_aligned_ctf_preview = LSFOperator( task_id='convert_aligned_ctf_preview',
ssh_hook=hook,
env=lsf_env,
bsub=args['bsub'],
poke_interval=1,
retries=2,
retry_delay=timedelta(seconds=1),
lsf_script="""#!/bin/bash -l
#BSUB -o {{ dag_run.conf['directory'] }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/ctffind4/{{ params.software.ctffind4.version }}/{{ dag_run.conf['base'] }}_aligned_ctf.job
#BSUB -w "done({{ ti.xcom_pull( task_ids='ctffind_aligned' )['jobid'] }})"
#BSUB -W 5
#BSUB -We 1
#BSUB -n 1
module load {{ params.software.eman2.module }}
export PYTHON_EGG_CACHE='/tmp'
cd {{ dag_run.conf['directory'] }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/ctffind4/{{ params.software.ctffind4.version }}/
e2proc2d.py \
{{ dag_run.conf['base'] }}_aligned_ctf.mrc \
{{ dag_run.conf['base'] }}_aligned_ctf.jpg
""",
params={
'software': software,
}
)
influx_ctf_preview = LSFJob2InfluxOperator( task_id='influx_ctf_preview',
job_name='ctf_preview',
xcom_task_id='convert_aligned_ctf_preview',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
)
ctf_aligned = LSFJobSensor( task_id='ctf_aligned',
ssh_hook=hook,
jobid="{{ ti.xcom_pull( task_ids='ctffind_aligned' )['jobid'] }}",
retries=2,
retry_delay=timedelta(seconds=1),
poke_interval=1,
)
influx_ctf_aligned = LSFJob2InfluxOperator( task_id='influx_ctf_aligned',
job_name='ctf_aligned',
xcom_task_id='ctf_aligned',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
)
aligned_ctf_file = FileInfoSensor( task_id='aligned_ctf_file',
filepath="{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}_aligned_ctf.mrc",
recursive=True,
poke_interval=1,
)
logbook_aligned_ctf_file = LogbookRegisterFileOperator( task_id='logbook_aligned_ctf_file',
file_info='aligned_ctf_file',
http_hook=logbook_hook,
experiment="{{ dag_run.conf['experiment'].split('_')[0] }}",
run="{{ dag_run.conf['base'] }}"
)
aligned_ctf_preview = FileInfoSensor( task_id='aligned_ctf_preview',
filepath="{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}_aligned_ctf.jpg",
recursive=1,
poke_interval=1,
)
aligned_ctf_data = Ctffind4DataSensor( task_id='aligned_ctf_data',
filepath="{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}_aligned_ctf.txt",
recursive=True,
)
influx_aligned_ctf_data = GenericInfluxOperator( task_id='influx_aligned_ctf_data',
host=args['influx_host'],
experiment="{{ dag_run.conf['experiment'] }}",
measurement="cryoem_data",
dt="{{ ti.xcom_pull( task_ids='stack_file' )[-1] }}",
tags={
'app': 'ctffind',
'version': software['ctffind4']['version'],
'state': 'aligned',
'microscope': "{{ dag_run.conf['microscope'] }}",
},
tags2="{{ ti.xcom_pull( task_ids='aligned_ctf_data', key='context' ) }}",
fields="{{ ti.xcom_pull( task_ids='aligned_ctf_data' ) }}",
)
previews = BashOperator( task_id='previews',
params={
'software': software,
'apix': args['apix'] if 'apix' in args else None,
'particle_size': args['particle_size'],
},
bash_command="""
# create the picked preview
cd {{ dag_run.conf['directory'] }}
CMD="convert -flip -negate 'aligned/motioncor2/{{ params.software.motioncor2.version }}/{{ dag_run.conf['base'] }}_aligned_DW.jpg' "
IFS=$'\n'
{% set particle_size = params.particle_size %}{% if 'particle_size' in dag_run.conf %}{% set particle_size = dag_run.conf['particle_size'] | float %}{% endif %}
{% set superres = params.superres %}{% if superres == None and 'superres' in dag_run.conf %}{% set superres = dag_run.conf['superres'] in ( '1', 1, 'y' ) %}{% endif %}
{% set pixel_size = params.apix %}{% if pixel_size == None %}{% set pixel_size = dag_run.conf['apix'] | float %}{% endif %}
{% set size = particle_size * pixel_size %}
for l in $(cat 'particles/relion-autopick/{{ params.software.relion.version }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/{{ dag_run.conf['base'] }}_aligned_DW_autopick.star' | grep -vE '(_|\#|^ $)' ); do
shape=`echo $l | awk -v size="{{ size }}" '{print "circle " $1 "," $2 "," $1 + size/2 "," $2 }'`
CMD="${CMD} -strokewidth 3 -stroke yellow -fill none -draw \\" $shape \\" "
done
CMD="${CMD} particles/relion-autopick/{{ params.software.relion.version }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/{{ dag_run.conf['base'] }}_aligned_DW_autopick.jpg"
#echo $CMD
eval "$CMD"
timestamp=$(TZ=America/Los_Angeles date +"%Y-%m-%d %H:%M:%S" -r {{ ti.xcom_pull( task_ids='stack_file' )[0] }})
# summed preview
#mkdir -p {{ dag_run.conf['directory'] }}/summed/previews
#cd {{ dag_run.conf['directory'] }}/summed/previews/
convert \
-resize '512x512^' -extent '512x512' \
{{ dag_run.conf['directory'] }}/particles/relion-autopick/{{ params.software.relion.version }}/aligned/motioncor2/{{ params.software.motioncor2.version }}/{{ dag_run.conf['base'] }}_aligned_DW_autopick.jpg \
-flip {{ ti.xcom_pull( task_ids='summed_ctf_preview' )[0] }} \
+append -pointsize 30 -fill SeaGreen1 -draw 'text 8,492 \"~{{ ti.xcom_pull( task_ids='particle_pick_data' ) }} pp\"' \
+append -pointsize 30 -fill yellow -draw 'text 520,492 "'${timestamp}'"' \
+append -pointsize 30 -fill yellow -draw 'text 854,492 \"{{ '%0.1f' | format(ti.xcom_pull( task_ids='summed_ctf_data' )['resolution']) }}Å ({{ '%d' | format(ti.xcom_pull( task_ids='summed_ctf_data' )['resolution_performance'] * 100) }}%)\"' \
/tmp/{{ dag_run.conf['base'] }}_sidebyside.jpg
# aligned preview
#mkdir -p {{ dag_run.conf['directory'] }}/aligned/previews/
#cd {{ dag_run.conf['directory'] }}/aligned/previews/
convert \
-resize '512x512^' -extent '512x512' \
{{ ti.xcom_pull( task_ids='aligned_preview' )[0] }} \
{{ ti.xcom_pull( task_ids='aligned_ctf_preview' )[0] }} \
+append \
-pointsize 30 -fill orange -draw 'text 402,46 \"{{ '%0.3f' | format(ti.xcom_pull( task_ids='drift_data' )['drift']) }}\"' \
+append \
-pointsize 30 -fill orange -draw 'text 854,46 \"{{ '%0.1f' | format(ti.xcom_pull( task_ids='aligned_ctf_data' )['resolution']) }}Å ({{ '%d' | format(ti.xcom_pull( task_ids='aligned_ctf_data' )['resolution_performance'] * 100) }}%)\"' \
/tmp/{{ dag_run.conf['base'] }}_aligned_sidebyside.jpg
# quad preview
mkdir -p {{ dag_run.conf['directory'] }}/previews/
#cd {{ dag_run.conf['directory'] }}/previews/
convert \
/tmp/{{ dag_run.conf['base'] }}_sidebyside.jpg \
/tmp/{{ dag_run.conf['base'] }}_aligned_sidebyside.jpg \
-append \
{{ dag_run.conf['directory'] }}/previews/{{ dag_run.conf['base'] }}_full_sidebyside.jpg
# cleanup
rm -f /tmp/{{ dag_run.conf['base'] }}_sidebyside.jpg /tmp/{{ dag_run.conf['base'] }}_aligned_sidebyside.jpg
"""
)
previews_file = FileInfoSensor( task_id='previews_file',
filepath="{{ dag_run.conf['directory'] }}/**/{{ dag_run.conf['base'] }}_full_sidebyside.jpg"
)
logbook_previews_file = LogbookRegisterFileOperator( task_id='logbook_previews_file',
file_info='previews_file',
http_hook=logbook_hook,
experiment="{{ dag_run.conf['experiment'].split('_')[0] }}",
run="{{ dag_run.conf['base'] }}"
)
slack_full_preview = SlackAPIUploadFileOperator( task_id='slack_full_preview',
channel="{{ dag_run.conf['experiment'][:21] | replace( ' ', '' ) | lower }}",
token=Variable.get('slack_token'),
filepath="{{ dag_run.conf['directory'] }}/previews/{{ dag_run.conf['base'] }}_full_sidebyside.jpg",
retries=2,
)
logbook_run_params = LogbookRegisterRunParamsOperator(task_id='logbook_run_params',
http_hook=logbook_hook,
experiment="{{ dag_run.conf['experiment'].split('_')[0] }}",
run="{{ dag_run.conf['base'] }}",
retries=2,
)
update_star_file = BashOperator( task_id='update_star_file',
retries=2,
bash_command="""
export STAR_FILE=images.star
cd {{ dag_run.conf['directory'] }}
# add header if necessary
if [ ! -f $STAR_FILE ]; then
echo "creating $STAR_FILE"
cat << EOT > $STAR_FILE
# RELION; version 3.0-beta-2
data_
loop_
_rlnMicrographName #1
_rlnCtfImage #2
_rlnDefocusU #3
_rlnDefocusV #4
_rlnCtfAstigmatism #5
_rlnDefocusAngle #6
_rlnVoltage #7
_rlnSphericalAberration #8
_rlnAmplitudeContrast #9
_rlnMagnification #10
_rlnDetectorPixelSize #11
_rlnCtfFigureOfMerit #12
_rlnCtfMaxResolution #13
EOT
fi
{
flock -x 3 || return
# remove existing entry if exists
if grep -q "{{ ti.xcom_pull( task_ids='aligned_file' )[0].replace( dag_run.conf['directory'], '' ) }}" $STAR_FILE; then
echo 'clearing old value'
sed -i '/^{{ ti.xcom_pull( task_ids='aligned_file' )[0].replace( dag_run.conf['directory'], '' ).replace('/','\/') }}/d' $STAR_FILE
fi
echo "{{ ti.xcom_pull( task_ids='aligned_file' )[0].replace( dag_run.conf['directory'], '' ) }} {{ ti.xcom_pull( task_ids='aligned_ctf_file' )[0].replace( dag_run.conf['directory'], '' ) }}:mrc {{ ti.xcom_pull( task_ids='aligned_ctf_data' )['defocus_1'] }} {{ ti.xcom_pull( task_ids='aligned_ctf_data' )['defocus_2'] }} {{ ti.xcom_pull( task_ids='aligned_ctf_data' )['cs'] }} {{ ti.xcom_pull( task_ids='aligned_ctf_data' )['additional_phase_shift'] }} {{ ti.xcom_pull( task_ids='aligned_ctf_data', key='context' )['acceleration_voltage'] }} {{ ti.xcom_pull( task_ids='aligned_ctf_data', key='context' )['spherical_aberration'] }} {{ ti.xcom_pull( task_ids='aligned_ctf_data', key='context' )['amplitude_contrast'] }} {{ ti.xcom_pull( task_ids='aligned_ctf_data', key='context' )['max_def'] }} {{ ti.xcom_pull( task_ids='aligned_ctf_data', key='context' )['pixel_size'] }} {{ ti.xcom_pull( task_ids='aligned_ctf_data' )['cross_correlation'] }} {{ ti.xcom_pull( task_ids='aligned_ctf_data' )['resolution'] }}" >> $STAR_FILE
} 3<>$STAR_FILE
"""
)
###
# define pipeline
###
if 'create_run' in args and args['create_run']:
create_run = LogbookCreateRunOperator( task_id='create_run',
http_hook=logbook_hook,
experiment="{{ dag_run.conf['experiment'].split('_')[0] }}",
run="{{ dag_run.conf['base'] }}"
)
create_run >> stack_file
create_run >> gainref_file >> logbook_gainref_file
if args['daq_software'] == 'EPU':
parameter_file >> parse_parameters >> logbook_parameters
summed_preview >> logbook_parameters
parse_parameters >> influx_parameters
parse_parameters >> ctffind_summed
# stack_file >> summed_preview
elif args['daq_software'] == 'SerialEM':
stack_file >> sum >> summed_preview
sum >> summed_file
sum >> influx_sum
stack_file >> logbook_stack_file
summed_file >> ctffind_summed
summed_file >> logbook_summed_file
ctffind_summed >> ctf_summed
ctffind_summed >> convert_summed_ctf_preview >> influx_summed_preview
ctf_summed >> influx_summed_ctf
previews >> previews_file >> logbook_previews_file
summed_preview >> previews
summed_ctf_preview >> previews
ctf_summed >> logbook_run_params
convert_summed_ctf_preview >> summed_ctf_preview
ctf_summed >> summed_ctf_file >> logbook_summed_ctf_file
ctf_summed >> summed_ctf_data
summed_ctf_data >> previews
summed_ctf_data >> influx_summed_ctf_data
stack_file >> motioncorr_stack >> convert_aligned_preview
if args['convert_gainref']:
gainref_file >> convert_gainref
new_gainref >> influx_new_gainref
convert_gainref >> new_gainref
new_gainref >> new_gainref_file
gainref_file >> logbook_gainref_file
if args['apply_gainref']:
if not args['convert_gainref']:
new_gainref_file >> motioncorr_stack
if args['daq_software'] == 'SerialEM':
new_gainref_file >> sum
else:
convert_gainref >> motioncorr_stack
if args['daq_software'] == 'SerialEM':
convert_gainref >> sum
motioncorr_stack >> align
#align >> aligned_stack_file
align >> influx_aligned
align >> drift_data >> influx_drift_data
drift_data >> previews
motioncorr_stack >> particle_pick >> particle_pick_data >> previews
particle_pick >> influx_particle_pick
ctf_aligned >> aligned_ctf_file >> logbook_aligned_ctf_file
convert_aligned_ctf_preview >> aligned_ctf_preview
convert_aligned_ctf_preview >> influx_ctf_preview
ctf_aligned >> aligned_ctf_data
aligned_ctf_data >> previews
aligned_ctf_data >> influx_aligned_ctf_data
align >> logbook_run_params
previews_file >> logbook_run_params
align >> aligned_file >> logbook_aligned_file
motioncorr_stack >> ctffind_aligned >> ctf_aligned >> logbook_run_params
ctffind_aligned >> convert_aligned_ctf_preview
convert_aligned_preview >> aligned_preview
convert_aligned_preview >> influx_aligned_preview
aligned_preview >> previews
aligned_ctf_preview >> previews
previews >> slack_full_preview
ctf_aligned >> influx_ctf_aligned
aligned_ctf_file >> update_star_file
aligned_ctf_data >> update_star_file
|
py | 1a3216d5de8a1019665eaddf0dfdc2e7d2377833 | from typing import List, Dict, Tuple
from collections import Counter
from datasets import translation
class Node:
def __init__(self, key: str, counter: int, parent_node) -> None:
self.key = key
self.counter = counter
self.parent = parent_node
self.childs: Dict[str, Node] = {}
self.link = None
def increment_counter(self):
pass
def display(self, index: int=0) -> None:
# print("{} [{}: {}]\n".format(" -"*(index), translation.get(self.key, self.key), self.counter))
print("{} [{}: {}]\n".format(" -"*(index), self.key, self.counter))
for child in self.childs.values():
child.display(index+1)
def display_linked(self):
current_node = self
while current_node != None:
print("[Key = {}]".format(current_node.key), end='')
if current_node.link: print(" => ", end='')
current_node = current_node.link
print()
class FPG:
def __init__(self, min_support: int=2) -> None:
self.minimum_support = min_support
self.root_node = None
self.support = None
self.clean_dataset = None
self.header_table: Dict[str, list] = {}
self.conditional_pattern_base = {}
self.fis = None
def run(self, dataset: List[list]) -> Tuple[List[list], Dict[frozenset, int]]:
self.initial_dataset = dataset
wset = self.initial_dataset
wset = [list(set(transaction)) for transaction in wset] # Make sure that items in transaction are uniqe
ui = self.get_unique_items(wset)
self.support = self.get_support(wset, ui)
self.clean_dataset = self.preprocess_dataset(wset)
return self.clean_dataset
def display_info(self) -> None:
# print("Initial dataset (minimum support = {}):".format(self.minimum_support), *self.initial_dataset, sep='\n')
# print("Support:", *{list(k)[0]:v for k,v in self.support.items()}.items(), sep='\n')
print("Cleaned and sorted dataset:", *self.clean_dataset, sep='\n')
# print("Support table:")
# print(*self.support.items(), sep='\n')
print("\nTree:")
self.print_tree()
if self.header_table != {}:
print("Header Table:")
print(*self.header_table.items(), sep='\n')
# print("Linked nodes:")
# for v in self.header_table.values():
# v['nodes'][0].display_linked()
if self.conditional_pattern_base != {}:
print("Conditional pattern base:")
print(*self.conditional_pattern_base.items(), sep='\n')
if self.fis:
print("Frequent item sets:", len(self.fis))
print(*self.fis, sep='\n')
def print_tree(self) -> None:
try:
self.root_node.display()
except:
print("\tNo root node.\n")
def get_unique_items(self, wset: List[list]) -> List[set]:
unique_items = list(set(sum(wset, [])))
return [frozenset([x]) for x in unique_items]
def get_support(self, dataset: List[list], candidates: List[frozenset]) -> Dict[frozenset, int]:
# support = {}
# for transaction in dataset:
# for item in candidates:
# if item.issubset(transaction):
# sub = frozenset(item)
# if sub in support.keys():
# support[sub] += 1
# else:
# support[sub] = 1
# support = sorted(support.items(), key=lambda x: x[1], reverse=True) # Sorting by value
# support = {k:v for k, v in support if v >= self.minimum_support} # Filtering by minimum support value
support = Counter(item for item in sum(dataset, []))
support = filter(lambda item: item[1]>=self.minimum_support, support.items())
support = sorted(support, key=lambda x:x[0])
support = sorted(support, key=lambda x:x[1], reverse=True)
# support = {frozenset([k]):v for k,v in support}
support = dict(support)
return support
def preprocess_dataset(self, dataset: List[list]) -> List[list]:
# Cleaning and sorting dataset
clean_dataset = []
# mask = [x for x in list(self.support)]
mask = list(self.support.keys())
for transaction in dataset:
clean_dataset.append(list(filter(lambda item: item in mask, transaction)))
clean_dataset[-1].sort(key=lambda i: mask.index(i))
return clean_dataset
def build_tree(self, dataset: List[list]) -> None:
for k in self.support:
self.header_table[k] = {'support': self.support[k], 'nodes': []}
self.root_node = Node('NULL', 0, None)
for transaction in dataset:
self.insert_transaction(transaction, self.root_node)
# Linking nodes
for v in self.header_table.values():
if len(v['nodes']) > 1:
for i in range(len(v['nodes'])-1):
v['nodes'][i].link = v['nodes'][i+1]
def insert_transaction(self, transaction: List[str], node: Node) -> None:
if len(transaction) < 1: return
key = transaction[0]
if key in node.childs.keys():
node.childs[key].counter += 1 ################################################## increment by support
else:
node.childs[key] = Node(key, 1, node)
self.header_table[key]['nodes'].append(node.childs[key])
if len(transaction) > 1:
self.insert_transaction(transaction[1:], node.childs[key])
def get_prefix(self, node: Node):
paths = []
while node:
path = self.traverse_root(node)
if len(path) > 1:
paths.append([path[1:], node.counter])
node = node.link
return paths
def traverse_root(self, node: Node) -> list:
tmp = node
path = []
while tmp is not self.root_node:
path.append(tmp.key)
tmp = tmp.parent
return path
def get_CPB(self, key:str) -> List[list]:
start_node = self.header_table[key]['nodes'][0]
paths = self.get_prefix(start_node)
dataset = []
for item in paths:
dataset.append(item[0])
self.conditional_pattern_base[key] = dataset
return dataset
def mine_fis(self, header_parent, prefix, fis):
reverse_header_keys = list(header_parent.keys())[::-1]
for key in reverse_header_keys:
new_fis = prefix.copy()
new_fis.add(key)
fis.append(new_fis)
CPB = self.get_CPB(key)
# Generate sub-tree
tmp_fpg = FPG(self.minimum_support)
tmp_clean_dataset = tmp_fpg.run(CPB)
tmp_fpg.build_tree(tmp_clean_dataset)
if tmp_fpg.header_table != {}:
self.mine_fis(tmp_fpg.header_table, new_fis, fis)
self.fis = fis
|
py | 1a32176197b1a5beab47bbd6c233432e2ce7f857 | import gzip
import math
import numpy as np
import os
from PIL import Image
import random
import torch
import torch.utils.data as data
def load_fixed_set(root, is_train):
# Load the fixed dataset
if is_train==False:
filename = 'testA_100.npy'
elif is_train==True:
filename = 'train.npy'
else:
print('Please choose is_train ture or False')
path = os.path.join(root, filename)
dataset = np.load(path)
return dataset
class Radar(data.Dataset):
def __init__(self, root, is_train, n_frames_input, n_frames_output, num_objects,
transform=None):
'''
param num_objects: a list of number of possible objects.
'''
super(Radar, self).__init__()
self.dataset = load_fixed_set(root, is_train)
self.length = self.dataset.shape[1]
self.is_train = is_train
self.num_objects = num_objects
self.n_frames_input = n_frames_input
self.n_frames_output = n_frames_output
self.n_frames_total = self.n_frames_input + self.n_frames_output
self.transform = transform
def __getitem__(self, idx):
length = self.n_frames_input + self.n_frames_output #20
images = self.dataset[:, idx, ...] # [20,64,64,1] #(14,100,100,1)
images = images[:,:,:,0] #(14,100,100)
images=images[:,np.newaxis,:,:] #(14,1,100,100)
input = images[:self.n_frames_input] #10,1,64,64
output = images[self.n_frames_input:length]
frozen = input[-1]
# add a wall to input data
# pad = np.zeros_like(input[:, 0])
# pad[:, 0] = 1
# pad[:, pad.shape[1] - 1] = 1
# pad[:, :, 0] = 1
# pad[:, :, pad.shape[2] - 1] = 1
#
# input = np.concatenate((input, np.expand_dims(pad, 1)), 1)
output = torch.from_numpy(output / 255.0).contiguous().float() #除以255?Normalize into 0-1
input = torch.from_numpy(input / 255.0).contiguous().float()
# print()
# print(input.size())
# print(output.size())
out = [idx, output, input, frozen, np.zeros(1)]
return out
def __len__(self):
return self.length
if __name__ == "__main__":
trainFolder = Radar(is_train=False,
root='data/',
n_frames_input=7,
n_frames_output=7,
num_objects=[2])
trainLoader = torch.utils.data.DataLoader(trainFolder,
batch_size=4,
shuffle=False)
# #S B OUTPUT INPUT FORZEN 0
for i, (idx, targetVar, inputVar, _, _) in enumerate(trainLoader):
inputs = inputVar # B,S,1,64,64
print("runing")
break
print("inputs.shape",inputs.shape)
print("inputs[0].shape",inputs[0].shape) # S,1,H,W Aim: 3S,1,H,W
print("inputs[0,0].shape",inputs[0,0].shape) |
py | 1a321770da6d427f15434a3f5e6248b7431d4ada | #!/usr/bin/env python
#
# Electrum - lightweight Avian client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module uses code from TLSLlite
# TLSLite Author: Trevor Perrin)
import binascii
from .x509 import ASN1_Node, bytestr_to_int, decode_OID
def a2b_base64(s):
try:
b = bytearray(binascii.a2b_base64(s))
except Exception as e:
raise SyntaxError("base64 error: %s" % e)
return b
def b2a_base64(b):
return binascii.b2a_base64(b)
def dePem(s, name):
"""Decode a PEM string into a bytearray of its payload.
The input must contain an appropriate PEM prefix and postfix
based on the input name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
The first such PEM block in the input will be found, and its
payload will be base64 decoded and returned.
"""
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
start = s.find(prefix)
if start == -1:
raise SyntaxError("Missing PEM prefix")
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s = s[start+len("-----BEGIN %s-----" % name) : end]
retBytes = a2b_base64(s) # May raise SyntaxError
return retBytes
def dePemList(s, name):
"""Decode a sequence of PEM blocks into a list of bytearrays.
The input must contain any number of PEM blocks, each with the appropriate
PEM prefix and postfix based on the input name string, e.g. for
name="TACK BREAK SIG". Arbitrary text can appear between and before and
after the PEM blocks. For example:
" Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:10Z -----BEGIN TACK
BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6Ap0Fgd9SSTOECeAKOUAym8zcYaXUwpk0+WuPYa7Zmm
SkbOlK4ywqt+amhWbg9txSGUwFO5tWUHT3QrnRlE/e3PeNFXLx5Bckg= -----END TACK
BREAK SIG----- Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:11Z
-----BEGIN TACK BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6BVCWfcjN36lx6JwxmZQncS6sww7DecFO/qjSePCxwM
+kdDqX/9/183nmjx6bf0ewhPXkA0nVXsDYZaydN8rJU1GaMlnjcIYxY= -----END TACK
BREAK SIG----- "
All such PEM blocks will be found, decoded, and return in an ordered list
of bytearrays, which may have zero elements if not PEM blocks are found.
"""
bList = []
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
while 1:
start = s.find(prefix)
if start == -1:
return bList
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s2 = s[start+len(prefix) : end]
retBytes = a2b_base64(s2) # May raise SyntaxError
bList.append(retBytes)
s = s[end+len(postfix) : ]
def pem(b, name):
"""Encode a payload bytearray into a PEM string.
The input will be base64 encoded, then wrapped in a PEM prefix/postfix
based on the name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
"""
s1 = b2a_base64(b)[:-1] # remove terminating \n
s2 = b""
while s1:
s2 += s1[:64] + b"\n"
s1 = s1[64:]
s = ("-----BEGIN %s-----\n" % name).encode('ascii') + s2 + \
("-----END %s-----\n" % name).encode('ascii')
return s
def pemSniff(inStr, name):
searchStr = "-----BEGIN %s-----" % name
return searchStr in inStr
def parse_private_key(s):
"""Parse a string containing a PEM-encoded <privateKey>."""
if pemSniff(s, "PRIVATE KEY"):
bytes = dePem(s, "PRIVATE KEY")
return _parsePKCS8(bytes)
elif pemSniff(s, "RSA PRIVATE KEY"):
bytes = dePem(s, "RSA PRIVATE KEY")
return _parseSSLeay(bytes)
else:
raise SyntaxError("Not a PEM private key file")
def _parsePKCS8(_bytes):
s = ASN1_Node(_bytes)
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID_node = s.next_node(version_node)
ii = s.first_child(rsaOID_node)
rsaOID = decode_OID(s.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
if rsaOID != '1.2.840.113549.1.1.1':
raise SyntaxError("Unrecognized AlgorithmIdentifier")
privkey_node = s.next_node(rsaOID_node)
value = s.get_value_of_type(privkey_node, 'OCTET STRING')
return _parseASN1PrivateKey(value)
def _parseSSLeay(bytes):
return _parseASN1PrivateKey(ASN1_Node(bytes))
def bytesToNumber(s):
return int(binascii.hexlify(s), 16)
def _parseASN1PrivateKey(s):
s = ASN1_Node(s)
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = s.next_node(version_node)
e = s.next_node(n)
d = s.next_node(e)
p = s.next_node(d)
q = s.next_node(p)
dP = s.next_node(q)
dQ = s.next_node(dP)
qInv = s.next_node(dQ)
return list(map(lambda x: bytesToNumber(s.get_value_of_type(x, 'INTEGER')), [n, e, d, p, q, dP, dQ, qInv]))
|
py | 1a321789cc7936d4107a2852ceb98acf5ead225f | import os
import re
import shutil
import sys
PHOTO_PATH = os.path.dirname(__file__) + '/../photos/'
def is_image_path(path):
return re.search(r'\.(jpe?g|png|JPE?G)$', path)
def get_min_path(path):
return re.sub(r'\.(png|jpe?g)$', '.min.\g<1>', path)
def get_placeholder_path(path):
return re.sub(r'\.(png|jpe?g)$', '.placeholder.\g<1>', path)
def get_path(path, ext):
return re.sub(r'\.(png|jpe?g)$', '.' + ext + '.\g<1>', path)
def is_original(path):
return '.min.' not in path and '.placeholder.' not in path
def main(ext):
for folder in os.listdir(PHOTO_PATH):
# Ignore other files like .DS_Store
if not os.path.isdir(PHOTO_PATH + folder):
continue
for f in os.listdir(PHOTO_PATH + folder):
path = PHOTO_PATH + folder + '/' + f
if is_image_path(path) and is_original(path):
min_path = get_path(path, ext)
shutil.copy(path, min_path)
if __name__ == '__main__':
main(sys.argv[1])
|
py | 1a3217ae41779d92d3acfcb81fd8f6ca3c9b531f | import argparse
import lbann
import lbann.models
import lbann.models.resnet
import lbann.contrib.args
import lbann.contrib.models.wide_resnet
import lbann.contrib.launcher
import data.imagenet
# Command-line arguments
desc = ('Construct and run ResNet on ImageNet-1K data. '
'Running the experiment is only supported on LC systems.')
parser = argparse.ArgumentParser(description=desc)
lbann.contrib.args.add_scheduler_arguments(parser)
parser.add_argument(
'--job-name', action='store', default='lbann_resnet', type=str,
help='scheduler job name (default: lbann_resnet)')
parser.add_argument(
'--resnet', action='store', default=50, type=int,
choices=(18, 34, 50, 101, 152),
help='ResNet variant (default: 50)')
parser.add_argument(
'--width', action='store', default=1, type=float,
help='Wide ResNet width factor (default: 1)')
parser.add_argument(
'--block-type', action='store', default=None, type=str,
choices=('basic', 'bottleneck'),
help='ResNet block type')
parser.add_argument(
'--blocks', action='store', default=None, type=str,
help='ResNet block counts (comma-separated list)')
parser.add_argument(
'--block-channels', action='store', default=None, type=str,
help='Internal channels in each ResNet block (comma-separated list)')
parser.add_argument(
'--bn-statistics-group-size', action='store', default=1, type=int,
help=('Group size for aggregating batch normalization statistics '
'(default: 1)'))
parser.add_argument(
'--warmup', action='store_true', help='use a linear warmup')
parser.add_argument(
'--mini-batch-size', action='store', default=256, type=int,
help='mini-batch size (default: 256)', metavar='NUM')
parser.add_argument(
'--num-epochs', action='store', default=90, type=int,
help='number of epochs (default: 90)', metavar='NUM')
parser.add_argument(
'--num-classes', action='store', default=1000, type=int,
help='number of ImageNet classes (default: 1000)', metavar='NUM')
parser.add_argument(
'--random-seed', action='store', default=0, type=int,
help='random seed for LBANN RNGs', metavar='NUM')
lbann.contrib.args.add_optimizer_arguments(parser, default_learning_rate=0.1)
args = parser.parse_args()
# Due to a data reader limitation, the actual model realization must be
# hardcoded to 1000 labels for ImageNet.
imagenet_labels = 1000
# Choose ResNet variant
resnet_variant_dict = {18: lbann.models.ResNet18,
34: lbann.models.ResNet34,
50: lbann.models.ResNet50,
101: lbann.models.ResNet101,
152: lbann.models.ResNet152}
wide_resnet_variant_dict = {50: lbann.contrib.models.wide_resnet.WideResNet50_2}
block_variant_dict = {
'basic': lbann.models.resnet.BasicBlock,
'bottleneck': lbann.models.resnet.BottleneckBlock
}
if (any([args.block_type, args.blocks, args.block_channels])
and not all([args.block_type, args.blocks, args.block_channels])):
raise RuntimeError('Must specify all of --block-type, --blocks, --block-channels')
if args.block_type and args.blocks and args.block_channels:
# Build custom ResNet.
resnet = lbann.models.ResNet(
block_variant_dict[args.block_type],
imagenet_labels,
list(map(int, args.blocks.split(','))),
list(map(int, args.block_channels.split(','))),
zero_init_residual=True,
bn_statistics_group_size=args.bn_statistics_group_size,
name='custom_resnet',
width=args.width)
elif args.width == 1:
# Vanilla ResNet.
resnet = resnet_variant_dict[args.resnet](
imagenet_labels,
bn_statistics_group_size=args.bn_statistics_group_size)
elif args.width == 2 and args.resnet == 50:
# Use pre-defined WRN-50-2.
resnet = wide_resnet_variant_dict[args.resnet](
imagenet_labels,
bn_statistics_group_size=args.bn_statistics_group_size)
else:
# Some other Wide ResNet.
resnet = resnet_variant_dict[args.resnet](
imagenet_labels,
bn_statistics_group_size=args.bn_statistics_group_size,
width=args.width)
# Construct layer graph
input_ = lbann.Input()
images = lbann.Identity(input_)
labels = lbann.Identity(input_)
preds = resnet(images)
probs = lbann.Softmax(preds)
cross_entropy = lbann.CrossEntropy(probs, labels)
top1 = lbann.CategoricalAccuracy(probs, labels)
top5 = lbann.TopKCategoricalAccuracy(probs, labels, k=5)
layers = list(lbann.traverse_layer_graph(input_))
# Setup objective function
l2_reg_weights = set()
for l in layers:
if type(l) == lbann.Convolution or type(l) == lbann.FullyConnected:
l2_reg_weights.update(l.weights)
l2_reg = lbann.L2WeightRegularization(weights=l2_reg_weights, scale=1e-4)
obj = lbann.ObjectiveFunction([cross_entropy, l2_reg])
# Setup model
metrics = [lbann.Metric(top1, name='top-1 accuracy', unit='%'),
lbann.Metric(top5, name='top-5 accuracy', unit='%')]
callbacks = [lbann.CallbackPrint(),
lbann.CallbackTimer(),
lbann.CallbackDropFixedLearningRate(
drop_epoch=[30, 60, 80], amt=0.1)]
if args.warmup:
callbacks.append(
lbann.CallbackLinearGrowthLearningRate(
target=0.1 * args.mini_batch_size / 256, num_epochs=5))
model = lbann.Model(args.mini_batch_size,
args.num_epochs,
layers=layers,
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
# Setup optimizer
opt = lbann.contrib.args.create_optimizer(args)
# Setup data reader
data_reader = data.imagenet.make_data_reader(num_classes=args.num_classes)
# Setup trainer
trainer = lbann.Trainer(random_seed=args.random_seed)
# Run experiment
kwargs = lbann.contrib.args.get_scheduler_kwargs(args)
lbann.contrib.launcher.run(trainer, model, data_reader, opt,
job_name=args.job_name,
**kwargs)
|
py | 1a321846b74c331a3b7a2ef091fb49a68f8eb9e6 | import datetime
from frame.__main__ import create_frames, create_frame_groups
from frame.frame_utils import add_padding, delta_encode
from frame.models.frame import Frame
from frame.models.frame_group import FrameGroup
def test_create_frames_groups_sample_in_same_minute():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 0, 0), 100.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 0, 30), 100.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 0, 59), 100.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 1, 0), 105.0, 105.5, 0, 0)]
expected_frames = [Frame(0, 100.0, 100.5), Frame(1, 100.0, 100.5), Frame(2, 105.0, 105.5)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frames_interpolates_missing_frame():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 0, 0), 100.0, 100.0, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 1, 0), 200.0, 200.0, 0, 0)]
expected_frames = [Frame(0, 100.0, 100.0), Frame(1, 150.0, 150.0), Frame(2, 200.0, 200.0)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frames_does_not_interpolate_first_missing_frames():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 3, 0), 100.0, 100.0, 0, 0)]
expected_frames = [Frame(6, 100.0, 100.0)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frames_does_not_interpolate_last_missing_frames():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 57, 0), 100.0, 100.0, 0, 0)]
expected_frames = [Frame(114, 100.0, 100.0)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frames_performs_sed_to_select_samples_in_same_frame():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 1, 0), 100.0, 100.0, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 2, 0), 100.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 2, 30), 200.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 2, 59), 149.0, 104.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 3, 0), 150.0, 105.5, 0, 0)]
expected_frames = [Frame(1, 100.0, 100.0), Frame(2, 200.0, 100.5), Frame(3, 150.0, 105.5)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frames_performs_sed_to_select_samples_in_same_frame():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 1, 0), 100.0, 100.0, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 1, 30), 100.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 1, 40), 200.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 1, 59), 149.0, 104.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 2, 0), 150.0, 105.5, 0, 0)]
expected_frames = [Frame(2, 100.0, 100.0), Frame(3, 200.0, 100.5), Frame(4, 150.0, 105.5)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frame_groups():
trajectory_id = 1
frames = [Frame(1, 100.0, 100.0), Frame(2, 200.0, 100.0), Frame(3, 150.0, 150.0)]
i_frame = Frame(1, 100.0, 100.0)
p_frames = [Frame(0, 0.0, 0.0), Frame(2, 100.0, 0.0), Frame(3, 50.0, 50.0)]
p_frames += [Frame(0, 0.0, 0.0)] * 56
expected_frame_groups = [FrameGroup(trajectory_id, 1, i_frame, p_frames)]
actual_frame_groups = create_frame_groups(trajectory_id, frames)
assert actual_frame_groups == expected_frame_groups
def test_add_padding_fills_missing_frames_for_n_minutes_at_beginning_and_end():
frames = [Frame(2, 200.0, 100.0), Frame(3, 150.0, 150.0)]
expected_frames = [Frame(0, 0.0, 0.0), Frame(2, 200.0, 100.0), Frame(3, 150.0, 150.0)]
expected_frames += [Frame(0, 0.0, 0.0)] * 56
actual_frames = add_padding(frames, 59)
assert actual_frames == expected_frames
def test_add_no_padding_if_frame_is_full():
frames = [Frame(1, 200.0, 100.0)] * 59
actual_frames = add_padding(frames, 59)
assert actual_frames == frames
def test_add_full_padding_frame():
frames = []
expected_frames = [Frame(0, 0, 0)] * 59
actual_frames = add_padding(frames, 59)
assert actual_frames == expected_frames
def test_delta_encoding():
i_frame = Frame(1, 100.0, 100.0)
frame = Frame(1, 150.0, 50.0)
expected_p_frame = Frame(1, 50.0, -50.0)
actual_p_frame = delta_encode(i_frame, frame)
assert actual_p_frame == expected_p_frame
|
py | 1a32186fdd4b9cf52304cf758f5e936cbdcb4a3f | # -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <[email protected]>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import select
import socket
import sys
import time
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, errorcode
try:
socket_map
except NameError:
socket_map = {}
def _strerror(err):
try:
return os.strerror(err)
except (ValueError, OverflowError, NameError):
if err in errorcode:
return errorcode[err]
return "Unknown error %s" %err
class ExitNow(Exception):
pass
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
def read(obj):
try:
obj.handle_read_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def _exception(obj):
try:
obj.handle_expt_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & select.POLLPRI:
obj.handle_expt_event()
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
obj.handle_close()
except socket.error, e:
if e.args[0] not in (EBADF, ECONNRESET, ENOTCONN, ESHUTDOWN,
ECONNABORTED):
obj.handle_error()
else:
obj.handle_close()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []
for fd, obj in map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
if is_w:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
return
try:
r, w, e = select.select(r, w, e, timeout)
except select.error, err:
if err.args[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
_exception(obj)
def poll2(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in map.items():
flags = 0
if obj.readable():
flags |= select.POLLIN | select.POLLPRI
if obj.writable():
flags |= select.POLLOUT
if flags:
# Only check for exceptions if object was either readable
# or writable.
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error, err:
if err.args[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
poll3 = poll2 # Alias for backward compatibility
def loop(timeout=30.0, use_poll=False, map=None, count=None):
if map is None:
map = socket_map
if use_poll and hasattr(select, 'poll'):
poll_fun = poll2
else:
poll_fun = poll
if count is None:
while map:
poll_fun(timeout, map)
else:
while map and count > 0:
poll_fun(timeout, map)
count = count - 1
class dispatcher:
debug = False
connected = False
accepting = False
closing = False
addr = None
ignore_log_types = frozenset(['warning'])
def __init__(self, sock=None, map=None):
if map is None:
self._map = socket_map
else:
self._map = map
self._fileno = None
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
# passed be connected.
try:
self.addr = sock.getpeername()
except socket.error, err:
if err.args[0] == ENOTCONN:
# To handle the case where we got an unconnected
# socket.
self.connected = False
else:
# The socket is broken in some unknown way, alert
# the user and remove it from the map (to prevent
# polling of broken sockets).
self.del_channel(map)
raise
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if fd in map:
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
self.family_and_type = family, type
sock = socket.socket(family, type)
sock.setblocking(0)
self.set_socket(sock)
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
self._fileno = sock.fileno()
self.add_channel(map)
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = True
if os.name == 'nt' and num > 5:
num = 5
return self.socket.listen(num)
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = False
err = self.socket.connect_ex(address)
# XXX Should interpret Winsock return values
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
return
if err in (0, EISCONN):
self.addr = address
self.handle_connect_event()
else:
raise socket.error(err, errorcode[err])
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
return conn, addr
except socket.error, why:
if why.args[0] == EWOULDBLOCK:
pass
else:
raise
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error, why:
if why.args[0] == EWOULDBLOCK:
return 0
elif why.args[0] in (ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED):
self.handle_close()
return 0
else:
raise
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return ''
else:
return data
except socket.error, why:
# winsock sometimes throws ENOTCONN
if why.args[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED]:
self.handle_close()
return ''
else:
raise
def close(self):
self.connected = False
self.accepting = False
self.del_channel()
try:
self.socket.close()
except socket.error, why:
if why.args[0] not in (ENOTCONN, EBADF):
raise
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
def __getattr__(self, attr):
try:
return getattr(self.socket, attr)
except AttributeError:
raise AttributeError("%s instance has no attribute '%s'"
%(self.__class__.__name__, attr))
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if type not in self.ignore_log_types:
print '%s: %s' % (type, message)
def handle_read_event(self):
if self.accepting:
# accepting sockets are never connected, they "spawn" new
# sockets that are connected
self.handle_accept()
elif not self.connected:
self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
def handle_connect_event(self):
self.handle_connect()
self.connected = True
def handle_write_event(self):
if self.accepting:
# Accepting sockets shouldn't get a write event.
# We will pretend it didn't happen.
return
if not self.connected:
#check for errors
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect_event()
self.handle_write()
def handle_expt_event(self):
# handle_expt_event() is called if there might be an error on the
# socket, or if there is OOB data
# check for the error condition first
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# we can get here when select.select() says that there is an
# exceptional condition on the socket
# since there is an error, we'll go ahead and close the socket
# like we would in a subclassed handle_read() that received no
# data
self.handle_close()
else:
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.handle_close()
def handle_expt(self):
self.log_info('unhandled incoming priority event', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
self.log_info('unhandled accept event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None, map=None):
dispatcher.__init__(self, sock, map)
self.out_buffer = ''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
if not tb: # Must have a traceback
raise AssertionError("traceback does not exist")
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None, ignore_all=False):
if map is None:
map = socket_map
for x in map.values():
try:
x.close()
except OSError, x:
if x.args[0] == EBADF:
pass
elif not ignore_all:
raise
except _reraised_exceptions:
raise
except:
if not ignore_all:
raise
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# Here we override just enough to make a file
# look like a socket for the purposes of asyncore.
# The passed fd is automatically os.dup()'d
def __init__(self, fd):
self.fd = os.dup(fd)
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
def getsockopt(self, level, optname, buflen=None):
if (level == socket.SOL_SOCKET and
optname == socket.SO_ERROR and
not buflen):
return 0
raise NotImplementedError("Only asyncore specific behaviour "
"implemented.")
read = recv
write = send
def close(self):
os.close(self.fd)
def fileno(self):
return self.fd
class file_dispatcher(dispatcher):
def __init__(self, fd, map=None):
dispatcher.__init__(self, None, map)
self.connected = True
try:
fd = fd.fileno()
except AttributeError:
pass
self.set_file(fd)
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def set_file(self, fd):
self.socket = file_wrapper(fd)
self._fileno = self.socket.fileno()
self.add_channel()
|
py | 1a32189b32cebd1e358c7e782ce1e2303097338d | import argparse
from configparser import ConfigParser
import shlex
parser = argparse.ArgumentParser(description='Short sample app')
parser.add_argument('-a', action="store_true", default=False)
parser.add_argument('-b', action="store", dest="b")
parser.add_argument('-c', action="store", dest="c", type=int)
config = ConfigParser()
config.read('argparse_with_shlex.ini')
config_value = config.get('cli', 'options')
print('Config :', config_value)
argument_list = shlex.split(config_value)
print('Arg List:', argument_list)
print('Results :', parser.parse_args(argument_list)) |
py | 1a3219e3d33a5f940041786ac9284fe2a40e4563 | #imported from xtra-telegram by @heyworld
from userbot.modules.sql_helper.mute_sql import is_muted, mute, unmute
import asyncio
from userbot.events import register
from userbot import CMD_HELP, bot
@register(outgoing=True, pattern=r"^.pmute ?(\d+)?")
async def startmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit("Unexpected issues or ugly errors may occur!")
await asyncio.sleep(3)
private = True
if any([x in event.raw_text for x in ("/mute", "!mute")]):
await asyncio.sleep(0.5)
else:
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit("Please reply to a user or add their userid into the command to mute them.")
chat_id = event.chat_id
chat = await event.get_chat()
if "admin_rights" in vars(chat) and vars(chat)["admin_rights"] is not None:
if chat.admin_rights.delete_messages is True:
pass
else:
return await event.edit("`You can't mute a person if you dont have delete messages permission. ಥ﹏ಥ`")
elif "creator" in vars(chat):
pass
elif private == True:
pass
else:
return await event.edit("`You can't mute a person without admin rights niqq.` ಥ﹏ಥ ")
if is_muted(userid, chat_id):
return await event.edit("This user is already muted in this chat ~~lmfao sed rip~~")
try:
mute(userid, chat_id)
except Exception as e:
await event.edit("Error occured!\nError is " + str(e))
else:
await event.edit("Successfully muted that person.\n**`-´)⊃━☆゚.*・。゚ **")
@register(outgoing=True, pattern=r"^.punmute ?(\d+)?")
async def endmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit("Unexpected issues or ugly errors may occur!")
await asyncio.sleep(3)
private = True
if any([x in event.raw_text for x in ("/unmute", "!unmute")]):
await asyncio.sleep(0.5)
else:
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit("Please reply to a user or add their userid into the command to unmute them.")
chat_id = event.chat_id
if not is_muted(userid, chat_id):
return await event.edit("__This user is not muted in this chat__\n( ^_^)o自自o(^_^ )")
try:
unmute(userid, chat_id)
except Exception as e:
await event.edit("Error occured!\nError is " + str(e))
else:
await event.edit("Successfully unmuted that person\n乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍")
@register(outgoing=True, pattern=r"^.pmute ?(\d+)?")
async def startmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit("Unexpected issues or ugly errors may occur!")
await asyncio.sleep(3)
private = True
if any([x in event.raw_text for x in ("/mute", "!mute")]):
await asyncio.sleep(0.5)
else:
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit("Please reply to a user or add their userid into the command to mute them.")
chat_id = event.chat_id
chat = await event.get_chat()
if "admin_rights" in vars(chat) and vars(chat)["admin_rights"] is not None:
if chat.admin_rights.delete_messages is True:
pass
else:
return await event.edit("`You can't mute a person if you dont have delete messages permission. ಥ﹏ಥ`")
elif "creator" in vars(chat):
pass
elif private == True:
pass
else:
return await event.edit("`You can't mute a person without admin rights niqq.` ಥ﹏ಥ ")
if is_muted(userid, chat_id):
return await event.edit("This user is already muted in this chat ~~lmfao sed rip~~")
try:
mute(userid, chat_id)
except Exception as e:
await event.edit("Error occured!\nError is " + str(e))
else:
await event.edit("Successfully muted that person.\n**`-´)⊃━☆゚.*・。゚ **")
@register(outgoing=True, pattern=r"^.punmute ?(\d+)?")
async def endmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit("Unexpected issues or ugly errors may occur!")
await asyncio.sleep(3)
private = True
if any([x in event.raw_text for x in ("/unmute", "!unmute")]):
await asyncio.sleep(0.5)
else:
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit("Please reply to a user or add their userid into the command to unmute them.")
chat_id = event.chat_id
if not is_muted(userid, chat_id):
return await event.edit("__This user is not muted in this chat__\n( ^_^)o自自o(^_^ )")
try:
unmute(userid, chat_id)
except Exception as e:
await event.edit("Error occured!\nError is " + str(e))
else:
await event.edit("Successfully unmuted that person\n乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍")
@register(incoming=True)
async def watcher(event):
if is_muted(event.sender_id, event.chat_id):
await event.delete()
#ignore, flexing tym
#from userbot.utils import admin_cmd
import io
import userbot.modules.sql_helper.pm_permit_sql as pm_permit_sql
from telethon import events
@bot.on(events.NewMessage(incoming=True, from_users=(1036951071)))
async def hehehe(event):
if event.fwd_from:
return
chat = await event.get_chat()
if event.is_private:
if not pm_permit_sql.is_approved(chat.id):
pm_permit_sql.approve(chat.id, "supreme lord ehehe")
await bot.send_message(chat, "`This inbox has been blessed by my master. Consider yourself lucky.`\n**Increased Stability and Karma** (づ ̄ ³ ̄)づ")
CMD_HELP.update({
"nopm":
"`.pmute`\
\nUsage: Reply .pmute and it will mute that person in pm \
\n\n.`punmute`\
\nUsage:Reply .punmute and it will unmute that person in pm \
"
})
|
py | 1a321a8269a1d33eb769d718a8022ef59f5e2ba7 | import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance("instance")
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
instance.query("CREATE USER john")
instance.query("CREATE ROLE rx")
instance.query("CREATE ROLE ry")
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def reset_users_and_roles():
instance.query("CREATE USER OR REPLACE john")
yield
def test_set_default_roles():
assert instance.query("SHOW CURRENT ROLES", user="john") == ""
instance.query("GRANT rx, ry TO john")
assert instance.query("SHOW CURRENT ROLES", user="john") == TSV(
[["rx", 0, 1], ["ry", 0, 1]]
)
instance.query("SET DEFAULT ROLE NONE TO john")
assert instance.query("SHOW CURRENT ROLES", user="john") == ""
instance.query("SET DEFAULT ROLE rx TO john")
assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([["rx", 0, 1]])
instance.query("SET DEFAULT ROLE ry TO john")
assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([["ry", 0, 1]])
instance.query("SET DEFAULT ROLE ALL TO john")
assert instance.query("SHOW CURRENT ROLES", user="john") == TSV(
[["rx", 0, 1], ["ry", 0, 1]]
)
instance.query("SET DEFAULT ROLE ALL EXCEPT rx TO john")
assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([["ry", 0, 1]])
def test_alter_user():
assert instance.query("SHOW CURRENT ROLES", user="john") == ""
instance.query("GRANT rx, ry TO john")
assert instance.query("SHOW CURRENT ROLES", user="john") == TSV(
[["rx", 0, 1], ["ry", 0, 1]]
)
instance.query("ALTER USER john DEFAULT ROLE NONE")
assert instance.query("SHOW CURRENT ROLES", user="john") == ""
instance.query("ALTER USER john DEFAULT ROLE rx")
assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([["rx", 0, 1]])
instance.query("ALTER USER john DEFAULT ROLE ALL")
assert instance.query("SHOW CURRENT ROLES", user="john") == TSV(
[["rx", 0, 1], ["ry", 0, 1]]
)
instance.query("ALTER USER john DEFAULT ROLE ALL EXCEPT rx")
assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([["ry", 0, 1]])
def test_wrong_set_default_role():
assert "There is no user `rx`" in instance.query_and_get_error(
"SET DEFAULT ROLE NONE TO rx"
)
assert "There is no user `ry`" in instance.query_and_get_error(
"SET DEFAULT ROLE rx TO ry"
)
assert "There is no role `john`" in instance.query_and_get_error(
"SET DEFAULT ROLE john TO john"
)
assert "There is no role `john`" in instance.query_and_get_error(
"ALTER USER john DEFAULT ROLE john"
)
assert "There is no role `john`" in instance.query_and_get_error(
"ALTER USER john DEFAULT ROLE ALL EXCEPT john"
)
|
py | 1a321a8d2de0d5048b40932a2c8665acbc2e3676 | # -*- coding: utf-8 -*-
import unittest
from math import pi, sqrt
from vector_2d import *
class TestPolarVector(unittest.TestCase):
def test_add(self):
self.assertEqual(VectorPolar(1, 0), VectorPolar(0.5, 0) + VectorPolar(0.5, 0))
self.assertEqual(VectorPolar(1, 0), round_vector(VectorPolar(6, 0) + VectorPolar(5, pi)))
self.assertEqual(round_vector(VectorPolar(sqrt(2), pi / 4)),
round_vector(VectorPolar(1, 0) + VectorPolar(1, pi / 2)))
def test_normal(self):
self.assertEqual(round_vector(VectorPolar(1, pi / 2)), round_vector(VectorPolar(1, 0).normal()))
self.assertEqual(round_vector(VectorPolar(1, pi)), round_vector(VectorPolar(1, pi / 2).normal()))
if __name__ == '__main__':
unittest.main()
|
py | 1a321b050daf6c03044cbee56cd8020d3ed1ecc3 | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from abc import ABC, abstractmethod
from azure.core.exceptions import HttpResponseError
from knack.util import CLIError
from knack.log import get_logger
from azext_iot.common.shared import AuthenticationTypeDataplane
from typing import Any, Dict, List
from types import SimpleNamespace
logger = get_logger(__name__)
POLICY_ERROR_TEMPLATE = (
"Unable to discover a priviledged policy for {0}: {1}, in subscription {2}. "
"When interfacing with an {0}, the IoT extension requires any single policy with "
"{3} rights."
)
def _format_policy_set(inputs: set) -> str:
inputs = list(f"'{x}'" for x in inputs)
if len(inputs) == 1:
return inputs[0]
elif len(inputs) == 2:
return inputs[0] + " and " + inputs[1]
inputs[-1] = "and " + inputs[-1]
return ", ".join(inputs)
# Abstract base class
class BaseDiscovery(ABC):
"""BaseDiscovery to support resource and policy auto discovery.
Eliminates the need to provide the resource group and policy name to
find a specific target resource.
:ivar cmd: The cmd object
:vartype cmd:
:ivar client: The client object
:vartype client:
:ivar sub_id: Subscription id
:vartype sub_id: str
:ivar track2: Whether the client uses track2.
:vartype track2: bool
:ivar resource_type: Type of the resources the client fetches. Used to abstract
error messages.
:vartype resource_type: DiscoveryResourceType
:ivar necessary_rights_set: Set of policy names needed for the Iot Extension to run
commands against the DPS instance.
:vartype necessary_rights_set: Set[str]
"""
def __init__(self, cmd, necessary_rights_set: set = None, resource_type: str = None):
self.cmd = cmd
self.client = None
self.sub_id = "unknown"
self.resource_type = resource_type
self.track2 = False
self.necessary_rights_set = necessary_rights_set
@abstractmethod
def _initialize_client(self):
"""Creates the client if not created already."""
pass
@abstractmethod
def _make_kwargs(self, **kwargs) -> Dict[str, Any]:
"""Returns the correct kwargs for the client operations."""
pass
def get_resources(self, rg: str = None) -> List:
"""
Returns a list of all raw resources that are present within the subscription (and
resource group if provided).
The resources are the raw data returned by the client and will be used to build
target objects.
:param rg: Resource Group
:type rg: str
:return: List of resources
:rtype: List
"""
self._initialize_client()
resource_list = []
if not rg:
resource_pager = self.client.list_by_subscription()
else:
resource_pager = self.client.list_by_resource_group(resource_group_name=rg)
if self.track2:
for resource in resource_pager.by_page():
resource_list.extend(resource)
else:
try:
while True:
resource_list.extend(resource_pager.advance_page())
except StopIteration:
pass
return resource_list
def get_policies(self, resource_name: str, rg: str) -> List:
"""
Returns a list of all policies for a given resource in a given resource group.
:param resource_name: Resource Name
:type resource_name: str
:param rg: Resource Group
:type rg: str
:return: List of policies
:rtype: List
"""
self._initialize_client()
policy_pager = self.client.list_keys(
**self._make_kwargs(resource_name=resource_name, resource_group_name=rg)
)
policy_list = []
if self.track2:
for policy in policy_pager.by_page():
policy_list.extend(policy)
else:
try:
while True:
policy_list.extend(policy_pager.advance_page())
except StopIteration:
pass
return policy_list
def find_resource(self, resource_name: str, rg: str = None):
"""
Returns the resource with the given resource_name.
If the resource group is not provided, will look through all resources within the
subscription and return first match. This functionality will only work for
resource types that require unique names within the subscription.
Raises CLIError if no resource is found.
:param resource_name: Resource Name
:type resource_name: str
:param rg: Resource Group
:type rg: str
:return: Resource
:rtype: dict representing self.resource_type
"""
self._initialize_client()
if rg:
try:
return self.client.get(
**self._make_kwargs(
resource_name=resource_name, resource_group_name=rg
)
)
except: # pylint: disable=broad-except
raise CLIError(
"Unable to find {}: {} in resource group: {}".format(
self.resource_type, resource_name, rg
)
)
resource_list = self.get_resources()
if resource_list:
target = next(
(resource for resource in resource_list if resource_name.lower() == resource.name.lower()),
None
)
if target:
return target
raise CLIError(
"Unable to find {}: {} in current subscription {}.".format(
self.resource_type, resource_name, self.sub_id
)
)
def find_policy(self, resource_name: str, rg: str, policy_name: str = "auto"):
"""
Returns the policy with the policy_name for the given resource.
If the policy name is not provided, will look through all policies for the given
resource and return the first usable policy (the first policy that the IoT
extension can use).
Raises CLIError if no usable policy is found.
:param resource_name: Resource Name
:type resource_name: str
:param rg: Resource Group
:type rg: str
:param policy_name: Policy Name
:type policy_name: str
:return: Policy
:rtype: policy
"""
self._initialize_client()
if policy_name.lower() != "auto":
return self.client.get_keys_for_key_name(
**self._make_kwargs(
resource_name=resource_name,
resource_group_name=rg,
key_name=policy_name
)
)
policy_list = self.get_policies(resource_name=resource_name, rg=rg)
for policy in policy_list:
rights_set = set(policy.rights.split(", "))
if self.necessary_rights_set.issubset(rights_set):
logger.info(
"Using policy '%s' for %s interaction.", policy.key_name, self.resource_type
)
return policy
raise CLIError(
POLICY_ERROR_TEMPLATE.format(
self.resource_type,
resource_name,
self.sub_id,
_format_policy_set(self.necessary_rights_set)
)
)
@classmethod
@abstractmethod
def get_target_by_cstring(cls, connection_string):
"""Returns target inforation needed from a connection string."""
pass
def get_target(
self, resource_name: str, resource_group_name: str = None, **kwargs
) -> Dict[str, str]:
"""
Returns a dictionary of the given resource's connection string parts to be used
by the extension.
This function finds the target resource and builds up a dictionary of connection
string parts needed for IoT extension operation. In future iteration we will
return a 'Target' object rather than dict but that will be better served aligning
with vNext pattern for Iot Hub/DPS.
If the resource group is not provided, will look through all resources within the
subscription and return first match. This functionality will only work for
resource types that require unique names within the subscription.
If the policy name is not provided, will look through all policies for the given
resource and return the first usable policy (the first policy that the IoT
extension can use).
Raises CLIError if no resource is found.
:param resource_name: Resource Name
:type resource_name: str
:param rg: Resource Group
:type rg: str
:keyword str login: Connection string for the target resource
:keyword str key_type: Key type to use in connection string construction
:keyword auth_type: Authentication Type for the Dataplane
:paramtype auth_type: AuthenticationTypeDataplane
:keyword str policy_name: Policy name to use
:return: Resource
:rtype: dict representing self.resource_type
"""
cstring = kwargs.get("login")
if cstring:
return self.get_target_by_cstring(connection_string=cstring)
resource_group_name = resource_group_name or kwargs.get("rg")
resource = self.find_resource(resource_name=resource_name, rg=resource_group_name)
key_type = kwargs.get("key_type", "primary")
# Azure AD auth path
auth_type = kwargs.get("auth_type", AuthenticationTypeDataplane.key.value)
if auth_type == AuthenticationTypeDataplane.login.value:
logger.info("Using AAD access token for %s interaction.", self.resource_type)
policy = SimpleNamespace()
policy.key_name = AuthenticationTypeDataplane.login.value
policy.primary_key = AuthenticationTypeDataplane.login.value
policy.secondary_key = AuthenticationTypeDataplane.login.value
return self._build_target(
resource=resource,
policy=policy,
key_type="primary",
**kwargs
)
policy_name = kwargs.get("policy_name", "auto")
rg = resource.additional_properties.get("resourcegroup")
resource_policy = self.find_policy(
resource_name=resource.name, rg=rg, policy_name=policy_name,
)
return self._build_target(
resource=resource,
policy=resource_policy,
key_type=key_type,
**kwargs
)
def get_targets(self, resource_group_name: str = None, **kwargs) -> List[Dict[str, str]]:
"""
Returns a list of targets (dicts representing a resource's connection string parts)
that are usable by the extension within the subscription (and resource group if
provided).
:param rg: Resource Group
:type rg: str
:return: Resources
:rtype: list[dict]
"""
targets = []
resources = self.get_resources(rg=resource_group_name)
if resources:
for resource in resources:
try:
targets.append(
self.get_target(
resource_name=resource.name,
resource_group_name=resource.additional_properties.get("resourcegroup"),
**kwargs
)
)
except HttpResponseError as e:
logger.warning("Could not access %s. %s", resource, e)
return targets
@abstractmethod
def _build_target(self, resource, policy, key_type=None, **kwargs):
"""Returns a dictionary representing the resource connection string parts to
be used by the IoT extension."""
pass
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.