input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
import logging
import ipdb
import IPython
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
SOS_ID = 0
EOS_ID = 0
class Attention(nn.Module):
def __init__(self, input_dim, source_dim=None, output_dim=None, bias=False):
super(Attention, self).__init__()
if source_dim is None:
source_dim = input_dim
if output_dim is None:
output_dim = input_dim
self.input_dim = input_dim
self.source_dim = source_dim
self.output_dim = output_dim
self.input_proj = nn.Linear(input_dim, source_dim, bias=bias)
self.output_proj = nn.Linear(input_dim + source_dim, output_dim, bias=bias)
self.mask = None
def set_mask(self, mask):
self.mask = mask
def forward(self, input, source_hids):
batch_size = input.size(0)
source_len = source_hids.size(1)
# (batch, tgt_len, input_dim) -> (batch, tgt_len, source_dim)
x = self.input_proj(input)
# (batch, tgt_len, source_dim) * (batch, src_len, source_dim) -> (batch, tgt_len, src_len)
attn = torch.bmm(x, source_hids.transpose(1, 2))
if self.mask is not None:
attn.data.masked_fill_(self.mask, -float('inf'))
attn = F.softmax(attn.view(-1, source_len), dim=1).view(batch_size, -1, source_len)
# (batch, tgt_len, src_len) * (batch, src_len, source_dim) -> (batch, tgt_len, source_dim)
mix = torch.bmm(attn, source_hids)
# concat -> (batch, tgt_len, source_dim + input_dim)
combined = torch.cat((mix, input), dim=2)
# output -> (batch, tgt_len, output_dim)
output = torch.tanh(self.output_proj(combined.view(-1, self.input_dim + self.source_dim))).view(batch_size, -1, self.output_dim)
return output, attn
class DecoderDarts(nn.Module):
KEY_ATTN_SCORE = 'attention_score'
KEY_LENGTH = 'length'
KEY_SEQUENCE = 'sequence'
def __init__(self,
layers,
vocab_size,
hidden_size,
dropout,
length,
encoder_length,
args=None,
):
"""[summary]
Parameters
----------
layers : [type]
[description]
vocab_size : int
Vocabulary, number of distinct value in seq, usually used to separate the ops and nodes notation
Example, in DARTS search space, arch is [node1, op1, node2, op2 ...],
where node ~ [0, ... num_nodes + 2], op ~ [0, max_op_id]
to distinct the op and node, we will map the node as
node = node + 1 (i.e. vocab)
op = op + num_nodes + 2 (two input nodes)
hidden_size : [type]
[description]
dropout : [type]
[description]
length : [type]
[description]
encoder_length : [type]
[description]
args : [type], optional
[description], by default None
"""
super(DecoderDarts, self).__init__()
self.args = args
self.layers = layers
self.hidden_size = hidden_size
self.length = length
self.encoder_length = encoder_length
self.vocab_size = vocab_size
self.rnn = nn.LSTM(self.hidden_size, self.hidden_size, self.layers, batch_first=True, dropout=dropout)
self.sos_id = SOS_ID
self.eos_id = EOS_ID
self.init_input = None
self.embedding = nn.Embedding(self.vocab_size, self.hidden_size)
self.dropout = nn.Dropout(dropout)
self.attention = Attention(self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.vocab_size)
def forward_step(self, x, hidden, encoder_outputs):
"""Step function in the decoder.
Note that, the output size = 1 in NASBench case, because we only have 1 cell to predict!!!
Parameters
----------
x : Tensor
input to decoder, in NB example [BS, output_size]
hidden : tuple with shape [1, Bs, 36], [1, Bs, 36]
Hidden state
encoder_outputs : tensor
# TODO, QUESTION should be decoder outptus?
Output of the encoder? Size [Bs, Seq Length, 36]
Returns
-------
[type]
[description]
"""
batch_size = x.size(0)
output_size = x.size(1)
embedded = self.embedding(x) # BS, 1, 36
embedded = self.dropout(embedded)
output, hidden = self.rnn(embedded, hidden)
output, attn = self.attention(output, encoder_outputs)
# attn: BS, 1, Seq length.
# output: Bs, 1, 36 (Embedding size)
predicted_softmax = F.log_softmax(self.out(output.contiguous().view(-1, self.hidden_size)), dim=1)
predicted_softmax = predicted_softmax.view(batch_size, output_size, -1)
# bs, out_size, class
return predicted_softmax, hidden, attn
def forward(self, x, encoder_hidden=None, encoder_outputs=None):
"""
B = 72 here, but 72 is not existing anywhere...
TODO findout, why vocab size = num_Node(5) + num_ops(5) + 2 = 12?
:param x: size: [B, controller_decoder_length]
:param encoder_hidden: tuple, size=2, each of them, [1, B, 96 = encoder_hidden_size]
:param encoder_outputs: [B, 20, 96]
:return:
"""
ret_dict = dict()
ret_dict[DecoderDarts.KEY_ATTN_SCORE] = list()
if x is None:
inference = True
else:
inference = False
x, batch_size, length = self._validate_args(x, encoder_hidden, encoder_outputs)
assert length == self.length, f'sanity check, decoder_length {self.length} must match input {length}.'
decoder_hidden = self._init_state(encoder_hidden)
decoder_outputs = []
sequence_symbols = []
lengths = np.array([length] * batch_size)
def decode(step, step_output, step_attn):
"""
:param step: di, which is matching the Arch, if 0, then it is a index. else it is op.
:param step_output:
:param step_attn:
:return:
"""
decoder_outputs.append(step_output)
ret_dict[DecoderDarts.KEY_ATTN_SCORE].append(step_attn)
symbols = self.decoder_outputs_to_symbols(step, decoder_outputs)
sequence_symbols.append(symbols)
eos_batches = symbols.data.eq(self.eos_id)
if eos_batches.dim() > 0:
eos_batches = eos_batches.cpu().view(-1).numpy()
update_idx = ((lengths > step) & eos_batches) != 0
lengths[update_idx] = len(sequence_symbols)
return symbols
decoder_input = x[:, 0].unsqueeze(1)
for di in range(length):
if not inference:
decoder_input = x[:, di].unsqueeze(1)
decoder_output, decoder_hidden, step_attn = self.forward_step(decoder_input, decoder_hidden, encoder_outputs)
# decoder_output yields [B,1, length], which is the vocabulary size!
# step_attn -> [B, 1, length]
step_output = decoder_output.squeeze(1) # [B, length]
symbols = decode(di, step_output, step_attn)
decoder_input = symbols
ret_dict[DecoderDarts.KEY_SEQUENCE] = sequence_symbols # lenght = 41 x [72,1]
ret_dict[DecoderDarts.KEY_LENGTH] = lengths.tolist()
# IPython.embed(header='Checking forward of decoder...')
return decoder_outputs, decoder_hidden, ret_dict
def decoder_outputs_to_symbols(self, step, decoder_outputs):
""" node (4) + 2 + op (7) = 13, so vocab is 14 as the first is discarded for some unknown reason..."""
op_start_index = self.args.num_intermediate_nodes + 2 # 5 is number of node, 2 is previous input and input.
if step % 2 == 0: # sample index, should be in [1, index-1]
# index = tmp % (self.args.num_intermediate_nodes * 4) // 4 + 3
index = step % (self.args.num_intermediate_nodes * 4) // 4 + 3
# so here is the core part. because decoder_outputs[-1] is always vocab_size, so basically, index
symbols = decoder_outputs[-1][:, 1:index].topk(1)[1] + 1
else: # sample operation, should be in [7, 11]
symbols = decoder_outputs[-1][:, op_start_index:].topk(1)[1] + op_start_index
return symbols
def _init_state(self, encoder_hidden):
""" Initialize the encoder hidden state. """
if encoder_hidden is None:
return None
if isinstance(encoder_hidden, tuple):
encoder_hidden = tuple([h for h in encoder_hidden])
else:
encoder_hidden = encoder_hidden
return encoder_hidden
def _validate_args(self, x, encoder_hidden, encoder_outputs):
if encoder_outputs is None:
raise ValueError("Argument encoder_outputs cannot be None when attention is used.")
# inference batch size
if x is None and encoder_hidden is None:
batch_size = 1
else:
if x is not None:
batch_size = x.size(0)
else:
batch_size = encoder_hidden[0].size(1)
# set default input and max decoding length
if x is None:
x = torch.LongTensor([self.sos_id] * batch_size).view(batch_size, 1).cuda()
max_length = self.length
else:
max_length = x.size(1)
return x, batch_size, max_length
def eval(self):
return
def infer(self, x, encoder_hidden=None, encoder_outputs=None):
decoder_outputs, decoder_hidden, _ = self(x, encoder_hidden, encoder_outputs)
return decoder_outputs, decoder_hidden
class Decoder_Nasbench_old(DecoderDarts):
def __init__(self,
layers,
vocab_size,
hidden_size,
dropout,
length,
encoder_length,
args,
):
# super(DecoderDarts, self).__init__()
# self.args = args
# basically compute this to verify.
self.num_ops = 3
self.child_nodes = args.num_intermediate_nodes
self.num_cells_to_search = 1 # only have one cell to search
# because, we do not have -1 -2 as previous input, so
self.num_inputs = 1
verify_vocab_size = self.num_inputs + self.child_nodes + self.num_ops # 1 + 5 + 3 = 9
assert verify_vocab_size == vocab_size, f'Vocab size {vocab_size} is computed by self.num_inputs + self.child_nodes + self.num_ops' \
f'{verify_vocab_size}'
assert length == 2 * self.child_nodes, f'length = {length} but should be 2 * {self.child_nodes}'
super().__init__(
layers,
vocab_size,
hidden_size,
dropout,
length,
encoder_length,
args)
def decoder_outputs_to_symbols(self, step, decoder_outputs):
op_start_index = self.args.child_nodes + self.num_inputs # 5 is number of node, 1 is previous input and input.
if step % 2 == 0: # sample index, should be in [1, index-1]
# because they have reduced cell and not reduced cell, //2 should be remove
# so, if length == 10, your input is just [2,3,4,5,6]
index = step // self.num_cells_to_search % 10 // 2 + self.num_inputs # TODO, super hardcode, fix it.
# so here is the core part. because decoder_outputs[-1] is always vocab_size, so basically, index
symbols = decoder_outputs[-1][:, 1:index + 1].topk(1)[1] + 1
else:
# sample operation, should be in [6,7,8] , op_start_index = 6,
symbols = decoder_outputs[-1][:, op_start_index:].topk(1)[1] + op_start_index
return symbols
class DecoderProxylessNAS(nn.Module):
def __init__(self,
layers,
vocab_size,
hidden_size,
dropout,
length,
):
super(DecoderProxylessNAS, self).__init__()
self.layers = layers
self.hidden_size = hidden_size
self.length = length
self.vocab_size = vocab_size
self.rnn = nn.LSTM(self.hidden_size, self.hidden_size, self.layers, batch_first=True, dropout=dropout)
self.sos_id = SOS_ID
self.eos_id = EOS_ID
self.embedding = nn.Embedding(self.vocab_size, self.hidden_size)
self.dropout = dropout
self.attention = Attention(self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.vocab_size)
def forward(self, x, encoder_hidden=None, encoder_outputs=None):
decoder_hidden = self._init_state(encoder_hidden)
| |
'''
Copyright (c) 2017 <NAME>
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
'''
from __future__ import unicode_literals
from __future__ import print_function
from kaitaistruct import __version__ as ks_version, KaitaiStream, BytesIO
import apfs
import binascii
import collections
import logging
import lzfse
import struct
import tempfile
from writer import DataType
from common import *
import zlib
log = logging.getLogger('MAIN.HELPERS.APFS_READER')
class ApfsDbInfo:
'''
This class writes information about db version and volumes to the database.
It also checks if the db information corresponds to the currently loaded
image's volumes.
'''
def __init__(self, db):
self.db = db
self.version = 1 # This will change if db structure changes in future
self.ver_table_name = 'Version_Info'
self.vol_table_name = 'Volumes_Info'
self.version_info = collections.OrderedDict([('Version',DataType.INTEGER)])
self.volume_info = collections.OrderedDict([('Name',DataType.TEXT),('UUID',DataType.TEXT),
('Files',DataType.INTEGER),('Folders',DataType.INTEGER),
('Created',DataType.INTEGER),('Updated',DataType.INTEGER)])
def WriteVersionInfo(self):
self.db.CreateTable(self.version_info, self.ver_table_name)
data = [self.version]
self.db.WriteRow(data)
def WriteVolInfo(self, volumes):
'''Write volume info to seperate table'''
self.db.CreateTable(self.volume_info, self.vol_table_name)
data = []
for vol in volumes:
data.append([vol.volume_name, vol.uuid, vol.num_files, vol.num_folders, vol.time_created, vol.time_updated])
self.db.WriteRows(data, self.vol_table_name)
def CheckVerInfo(self):
'''Returns true if info in db matches current version number'''
query = 'SELECT Version FROM {}'.format(self.ver_table_name)
success, cursor, error = self.db.RunQuery(query)
index = 0
if success:
for row in cursor:
db_version = row[0]
if db_version == self.version:
return True
else:
log.info('Db version is {} but current version is {}'.format(db_version, self.version))
return False
else:
log.error('Error querying volume info from db: ' + error)
return False
def CheckVolInfo(self, volumes):
'''Returns true if info in db matches volume objects'''
query = 'SELECT Name, UUID, Files, Folders, Created, Updated FROM {}'.format(self.vol_table_name)
success, cursor, error = self.db.RunQuery(query)
index = 0
data_is_unaltered = True
if success:
for row in cursor:
if row[0] != volumes[index].volume_name or \
row[1] != volumes[index].uuid or \
row[2] != volumes[index].num_files or \
row[3] != volumes[index].num_folders or \
row[4] != volumes[index].time_created or \
row[5] != volumes[index].time_updated :
data_is_unaltered = False
log.info('DB volume info does not match file info! Checked {}'.format(volumes[index].name))
break
index += 1
else:
log.error('Error querying volume info from db: ' + error)
return index == len(volumes) and data_is_unaltered
class ApfsFileSystemParser:
'''
Reads and parses the file system, writes output to a database.
'''
def __init__(self, apfs_volume, db):
self.name = apfs_volume.name
self.volume = apfs_volume
self.container = apfs_volume.container
self.db = db
self.num_records_read_total = 0
self.num_records_read_batch = 0
self.hardlink_records = []
self.extent_records = []
self.thread_records = []
self.named_records = []
self.attr_records = []
self.hardlink_info = collections.OrderedDict([('CNID',DataType.INTEGER), ('Parent_CNID',DataType.INTEGER),
('Name',DataType.TEXT)])
self.extent_info = collections.OrderedDict([('CNID',DataType.INTEGER), ('Offset',DataType.INTEGER),
('Size',DataType.INTEGER), ('Block_Num',DataType.INTEGER)])
self.attr_info = collections.OrderedDict([('CNID',DataType.INTEGER), ('Name',DataType.TEXT),('Type',DataType.INTEGER),('Data',DataType.BLOB),
('Logical_uncompressed_size',DataType.INTEGER),('Extent_CNID',DataType.INTEGER)])
self.thread_info = collections.OrderedDict([('CNID',DataType.INTEGER), ('Parent_CNID',DataType.INTEGER),
('Extent_CNID',DataType.INTEGER), ('Name',DataType.TEXT), ('Created',DataType.INTEGER), ('Modified',DataType.INTEGER), ('Changed',DataType.INTEGER), ('Accessed',DataType.INTEGER), ('Flags',DataType.INTEGER), ('Links_or_Children',DataType.INTEGER), ('BSD_flags',DataType.INTEGER), ('UID',DataType.INTEGER), ('GID',DataType.INTEGER), ('Mode',DataType.INTEGER), ('Logical_Size',DataType.INTEGER), ('Physical_Size',DataType.INTEGER)])
self.named_info = collections.OrderedDict([('CNID',DataType.INTEGER), ('Parent_CNID',DataType.INTEGER),
('Timestamp',DataType.INTEGER),('ItemType',DataType.INTEGER),
('Name',DataType.TEXT)])
self.compressed_info = collections.OrderedDict([('CNID',DataType.INTEGER),('Data',DataType.BLOB),('Uncompressed_size',DataType.INTEGER),
('Extent_CNID',DataType.INTEGER),('fpmc_in_extent',DataType.INTEGER),('Extent_Logical_Size',DataType.INTEGER)])
#TODO: Remove fpmc_in_extent, this can be detected by checking Data == None
self.paths_info = collections.OrderedDict([('CNID',DataType.INTEGER),('Path',DataType.TEXT)])
## Optimization for search
self.blocks_read = set()
self.container_type_files = self.container.apfs.ContentType.files
self.container_type_location = self.container.apfs.ContentType.location
self.ptr_type = apfs.Apfs.PointerRecord
self.ext_type = self.container.apfs.EntryType.extent.value
self.name_type = self.container.apfs.EntryType.name.value
self.thrd_type = self.container.apfs.EntryType.thread.value
self.hard_type = self.container.apfs.EntryType.hardlink.value
self.attr_type = self.container.apfs.EntryType.extattr.value
## End optimization
def write_records(self):
if self.hardlink_records:
self.db.WriteRows(self.hardlink_records, self.name + '_Hardlinks')
if self.extent_records:
self.db.WriteRows(self.extent_records, self.name + '_Extents')
if self.thread_records:
self.db.WriteRows(self.thread_records, self.name + '_Threads')
if self.attr_records:
self.db.WriteRows(self.attr_records, self.name + '_Attributes')
if self.named_records:
self.db.WriteRows(self.named_records, self.name + '_IndexNodes')
def create_tables(self):
self.db.CreateTable(self.hardlink_info, self.name + '_Hardlinks')
self.db.CreateTable(self.extent_info, self.name + '_Extents')
self.db.CreateTable(self.attr_info, self.name + '_Attributes')
self.db.CreateTable(self.thread_info, self.name + '_Threads')
self.db.CreateTable(self.named_info, self.name + '_IndexNodes')
self.db.CreateTable(self.compressed_info, self.name + '_Compressed_Files')
self.db.CreateTable(self.paths_info, self.name + '_Paths')
def clear_records(self):
self.hardlink_records = []
self.extent_records = []
self.thread_records = []
self.named_records = []
self.attr_records = []
def create_indexes(self):
'''Create indexes on cnid and path in database'''
index_queries = ["CREATE INDEX {0}_attribute_cnid ON {0}_Attributes (CNID)".format(self.name),
"CREATE INDEX {0}_extent_cnid ON {0}_Extents (CNID)".format(self.name),
"CREATE INDEX {0}_index_cnid ON {0}_IndexNodes (CNID)".format(self.name),
"CREATE INDEX {0}_paths_path_cnid ON {0}_Paths (Path, CNID)".format(self.name),
"CREATE INDEX {0}_threads_cnid_parent_cnid ON {0}_Threads (CNID, Parent_CNID)".format(self.name),
"CREATE INDEX {0}_compressed_files_cnid ON {0}_Compressed_Files (CNID)".format(self.name)]
for query in index_queries:
success, cursor, error = self.db.RunQuery(query, writing=True)
if not success:
log.error('Error creating index: ' + error)
break
def run_query(self, query, writing=True):
'''Returns True/False on query execution'''
success, cursor, error = self.db.RunQuery(query, writing)
if not success:
log.error('Error executing query : Query was {}, Error was {}'.format(query, error))
return False
return True
def populate_compressed_files_table(self):
'''Pre-process all compressed file metadata and populate the compressed file table for quick retieval later'''
# In APFS, for compressed files, sometimes the compressed header (fpmc) is in the database, at other times
# it is in an extent. The compressed data is also sometime inline, at other times in an extent. This table
# will make the lookup easier as it consolidates the data, thus avoiding multiple queries when fetching
# info about a file. Also, we provide the uncompressed size of the file (logical size), so its always
# available for listing, without having to go and read an extent.
#Copy all decmpfs-Type2 attributes to table, where no resource forks <-- Nothing to do, just copy
type2_no_rsrc_query = "INSERT INTO {0}_Compressed_Files select b.CNID, b.Data, "\
" b.logical_uncompressed_size, 0 as extent_cnid, 0 as fpmc_in_extent, 0 as Extent_Logical_Size"\
" from {0}_Attributes as b "\
" left join {0}_Attributes as a on (a.cnid = b.cnid and a.Name = 'com.apple.ResourceFork') "\
" where b.Name='com.apple.decmpfs' and b.Type=2 and a.cnid is null".format(self.name)
if not self.run_query(type2_no_rsrc_query, True):
return
#Add all decmpfs-Type2 attributes where resource forks exist, rsrc's extent_cnid is used
type2_rsrc_query = "INSERT INTO {0}_Compressed_Files "\
"SELECT b.CNID, b.Data, b.logical_uncompressed_size, a.extent_cnid as extent_cnid, 0 as fpmc_in_extent, "\
" a.logical_uncompressed_size as Extent_Logical_Size FROM {0}_Attributes as b "\
" left join {0}_Attributes as a on (a.cnid = b.cnid and a.Name = 'com.apple.ResourceFork')"\
" where b.Name='com.apple.decmpfs' and b.Type=2 and a.cnid is not null".format(self.name)
if not self.run_query(type2_rsrc_query, True):
return
#Process decmpfs-Type1 attributes. Go to extent, read fpmc header to get uncompressed size
# This query gets extents for decmpfs and rsrc but only the first one, this way there is only
# one row returned for every cnid, and we are also only interested in the first extent.
# 0 1 2
type1_query = "select b.CNID, b.extent_cnid as decmpfs_ext_cnid, b.logical_uncompressed_size, "\
"e.Block_Num as decmpfs_first_ext_Block_num, a.extent_cnid as rsrc_extent_cnid , er.Block_Num as rsrc_first_extent_Block_num, "\
" a.logical_uncompressed_size as Extent_Logical_Size from {0}_Attributes as b "\
" left join {0}_Attributes as a on (a.cnid = b.cnid and a.Name = 'com.apple.ResourceFork') "\
" left join {0}_Extents as e on e.cnid=b.extent_cnid "\
" left join {0}_Extents as er on er.cnid=a.extent_cnid "\
" where b.Name='com.apple.decmpfs' and b.Type=1"\
" and (e.offset=0 or e.offset is null) and (er.offset = 0 or er.offset is null)".format(self.name)
success, cursor, error = self.db.RunQuery(type1_query, writing=False)
if success:
block_size = self.container.apfs.block_size
to_write = []
for row in cursor:
# Go to decmpfs_extent block and read uncompressed size
logical_size = row[2]
#decmpfs_ext_cnid = row[1]
self.container.seek(block_size * row[3])
decmpfs = self.container.read(logical_size)
#magic, compression_type, uncompressed_size = struct.unpack('<IIQ', decmpfs[0:16])
uncompressed_size = struct.unpack('<Q', decmpfs[8:16])[0]
#TODO: check magic if magic =='fpmc'
if row[4] == None:
# No resource fork , data must be in decmpfs_extent
if logical_size <= 32: # If < 32 bytes, write to db, else leave in extent
to_write.append([row[0], buffer(decmpfs), uncompressed_size, 0, 0, 0])
else:
to_write.append([row[0], None, uncompressed_size, row[1], 1, logical_size])
else:
# resource fork has data
to_write.append([row[0], buffer(decmpfs), uncompressed_size, row[4], 0, row[6]])
if to_write:
self.db.WriteRows(to_write, self.name + '_Compressed_Files')
else:
log.error('Error executing query : Query was {}, Error was {}'.format(type1_query, error))
return
def read_volume_records(self):
''' Get root btree node and parse all children, add
all information to a database.
'''
self.create_tables()
root_block = self.container.read_block(self.volume.root_block_num)
self.read_entries(self.volume.root_block_num, root_block)
# write remaining records to db
if self.num_records_read_batch > 0:
self.num_records_read_batch = 0
self.write_records()
self.clear_records() # Clear the data once written
self.create_other_tables_and_indexes()
def create_other_tables_and_indexes(self):
'''Populate paths table in db, create compressed_files table and create indexes for faster queries'''
insert_query = "INSERT INTO {0}_Paths SELECT * FROM " \
"( WITH RECURSIVE " \
" under_root(path,name,cnid) AS " \
" ( VALUES('','root',2) " \
" UNION ALL " \
" SELECT under_root.path || '/' || {0}_IndexNodes.name, " \
"{0}_IndexNodes.name, {0}_IndexNodes.cnid | |
<gh_stars>0
import numpy as np
from math import sqrt
import itertools
import matplotlib.pyplot as plt
class MDP():
def __init__(self):
# Max time steps.
self.T =15
#Origin at Top left
self.GRID_SIZE_COL = 6
self.GRID_SIZE_ROW = 5
self.reward = 0
'''State'''
# position of player
self.p = (0,0)
# position of minotaur
self.m = (4,4)
# dead state
self.dead = ((-100, -100), (-100, -100))
# win state for player
self.win = ((100, 100), (100, 100))
#states in form (x,y,x,y) correspond to killing position. all these states are modelled with state ((-100,-100),(-100,-100))
self.killing_states = [(position, position) for position in
itertools.product(range(self.GRID_SIZE_ROW), range(self.GRID_SIZE_COL)) if np.all(position != (4,4))] #state ((4,4)(4,4)) is included in winning set
# states in form (4,4,x,y) correspond to winning positions. all these states are modelled with state ((100,100),(100,100))
self.winning_states = [((4, 4), position) for position in
itertools.product(range(self.GRID_SIZE_ROW), range(self.GRID_SIZE_COL))]
# number of STATES
# remove states that belong to killing and winning set
self.NUM_STATES = (self.GRID_SIZE_COL * self.GRID_SIZE_ROW) ** 2 + 2 - (len(self.killing_states) + len(self.winning_states))# add WIN and DEAD state
'''Actions:
0: Left
1: Up
2: Right
3: Down
4: Stay
'''
self.actions_p = [0, 1, 2, 3, 4]
self.index_actions_p = ['left', 'up', 'right', 'down', 'stay']
self.actions_m = [0, 1, 2, 3, 4] #79.27%
#self.actions_m = [0, 1, 2, 3] #80.70%
self.index_actions_m = ['left', 'up', 'right', 'down', 'stay']
#self.index_actions_m = ['left', 'up', 'right', 'down']
self.win_mov = 10 #leads to win state
self.dead_mov = -10 #leads to dead state
# Number of possible actions for player and Minotaur
self.n_actions_p = len(self.actions_p)
self.n_actions_m = len(self.actions_m) # NOTE:THIS CAN BE SET TO 4/5 for part (c for eg.)
# List of acceptable actions that can be taken by the player (Change with every state)
self.acceptable_actions_player = []
# Container for walls
self.walls = np.zeros((self.GRID_SIZE_ROW, self.GRID_SIZE_COL, 4), dtype=np.int)
# State transition matrix: each cell (x,y) contains the probability of performing the action (x,y)
self.p_sHat = np.zeros((self.n_actions_p, self.n_actions_m))
'''Member Function'''
self.define_wall()
# simulate game (movement between player and minotaur)
self.game_grid = []
self.define_grid()
# State transition probabilities
self.all_states = []
self.states_mapping = []
self.state_values = []
self.policy = []
self.dynamic_programming()
# Forward iteration to simulate one/several game
self.forward_iteration()
def define_grid(self):
self.game_grid.append('###|######')
self.game_grid.append('# | #')
self.game_grid.append('# | |__#')
self.game_grid.append('# | | #')
self.game_grid.append('# ______ #')
self.game_grid.append('# | #')
self.game_grid.append('###|######')
def define_wall(self):
'''
Wall definition:
Every cell has 4 flags (0:free, 1:bocked by wall)
0: Left
1: Up
2: Right
3: Down
For eg, self.walls = (0,0) = [0,1,1,0], this means that
or cell 0,0: Up and Right sides are blocked by walls
'''
self.walls[0,0] = [1,1,0,0] # TOP LEFT
self.walls[self.GRID_SIZE_ROW-1,self.GRID_SIZE_COL-1] = [0,0,1,1] # BOTTOM RIGHT
self.walls[self.GRID_SIZE_ROW-1, 0] = [1,0,0,1]
self.walls[0,self.GRID_SIZE_COL-1] = [0,1,1,0]
# cells between TOP LEFT and TOP RIGHT
self.walls[0, 1:self.GRID_SIZE_COL-1] = [0,1,0,0]
# cells between BOTTOM LEFT and BOTTOM RIGHT
self.walls[self.GRID_SIZE_ROW-1, 1:self.GRID_SIZE_COL-1,] = [0,0,0,1]
# cells between TOP LEFT and BOTTOM LEFT
self.walls[1:self.GRID_SIZE_ROW-1,0] = [1,0,0,0]
# cells between TOP RIGHT and BOTTOM RIGHT
self.walls[1:self.GRID_SIZE_ROW-1, self.GRID_SIZE_COL-1] = [0,0,1,0]
'''Custom wall cells: depend on map
SET MANUALLY!!
'''
self.walls[0:3, 1, 2] = 1
self.walls[0:3, 2, 0] = 1
self.walls[self.GRID_SIZE_ROW-1, 1:5,1] = 1
self.walls[self.GRID_SIZE_ROW-2,1:5,3] = 1
self.walls[self.GRID_SIZE_ROW-1,3,2] = 1
self.walls[self.GRID_SIZE_ROW-1,4,0] = 1
self.walls[1:3, 3,2] = 1
self.walls[1:3, 4,0] = 1
self.walls[1,4:self.GRID_SIZE_COL,3] = 1
self.walls[2, 4:self.GRID_SIZE_COL,1] = 1
#self.walls = np.transpose(self.walls, (1,0,2))
def find_next_location(self, current_location, action, agent):
'''
returns location (x_hat, y_hat) after applying input action to current_location
'''
x = current_location[0]
y = current_location[1]
if action == 0: #LEFT
next_location = (x,y-1)
elif action == 1: #UP
next_location = (x-1,y)
elif action == 2: #RIGHT
next_location = (x,y+1)
elif action == 3: #DOWN
next_location = (x+1,y)
else: #STAY
next_location = (x,y)
if agent == 'minotaur':
if next_location[0] < 0: # it moved in -ve along ROW axis
next_location = (self.GRID_SIZE_ROW-1, next_location[1])
if next_location[0] >= self.GRID_SIZE_ROW: #It exceeded the Max ROW
next_location = (0, next_location[1])
if next_location[1] < 0: # agent moved -ve in COL axis
next_location = (next_location[0],self.GRID_SIZE_COL-1)
if next_location[1] >= self.GRID_SIZE_COL: #It exceeded Max COL
next_location = (next_location[0],0)
return next_location
def check_wall_constraint(self, forbidden_actions):
# NOTE: walls[0,-1] actually means walls[0,5]!!!
if self.walls[self.p[0], self.p[1], 0] == 1: # player's LEFT side is blocked
forbidden_actions[0] = 1
if self.walls[self.p[0], self.p[1], 1] == 1: # player's TOP side is blocked
forbidden_actions[1] = 1
if self.walls[self.p[0], self.p[1], 2] == 1: # player's RIGHT side is blocked
forbidden_actions[2] = 1
if self.walls[self.p[0], self.p[1], 3] == 1: # player's BOTTOM side is blocked
forbidden_actions[3] = 1
return forbidden_actions
def calc_transition_prob(self):
'''
This function takes self.p, self.m as inputs
- it examines the state and calculates set of possible actions
- The set of possible actions lead to a state transition matrix
State transition matrix: p_sHat (5x5 because 5 actions possible for p and m each)
p_actions → 0 1 2 3 4
m_actions ↓
0 p_left,m_left p_up,m_left ...
1 p_left,m_up p_up,m_up .
2 p_left,m_right p_up,m_right p_right,m_right
3 p_left,m_down p_up,m_down . p_down,m_down
4 p_left,m_stay p_up,m_stay . p_stay,m_stay
so if p_sHat[2,1] = 1/25, this means probability that player moved up and minotaur moved right is 1/25
'''
#### CHECK only MOVABLE ACTIONS!
# Generate all next possible locations for player and minotaur -- save the player's movement too!
next_locations_player = [self.find_next_location(self.p, mov, 'player') for mov in (self.actions_p)]
next_locations_minotaur = [self.find_next_location(self.m, mov, 'minotaur') for mov in self.actions_m]
# Container for storing forbidden actions for player: 1 means forbidden, free otherwise
forbidden_actions_player = np.zeros((self.n_actions_p), dtype=np.int) # dont take into account dead and win state
# check wall restrictions from current state
forbidden_actions_player = self.check_wall_constraint(forbidden_actions_player)
self.p_sHat = np.zeros((self.n_actions_p, self.n_actions_m)) # initialize p_sHat
for a_p in forbidden_actions_player:
if forbidden_actions_player[a_p] == 0:
for a_m in self.actions_m:
if np.all(next_locations_player[a_p] == next_locations_minotaur[a_m]):
forbidden_actions_player[a_p] = 1
n_possible_actions_player = np.count_nonzero(
forbidden_actions_player == 0) # number of possible safe actions for player
probability_parameter = 1.0 / ( n_possible_actions_player * self.n_actions_m ) # since Minotaur can always have all actions
self.acceptable_actions_player = [value[0] for value in np.argwhere(forbidden_actions_player == 0).tolist()]
# update transition probability matrix for current state
for j in range(len(self.acceptable_actions_player)):
self.p_sHat[self.acceptable_actions_player[j], :] = probability_parameter # otherwise probability is zero
if (round(np.sum(self.p_sHat), 2) != 1):
print('p_sHat NOT summing to 1 !!!')
def update_state(self, action):
'''
Update input state after input action
action: tuple representing (player_action, minotaur_action)
returns the next state (player_location, minotaur_location)
'''
player_action = action[0]
next_player_location = self.find_next_location(self.p, player_action, 'player')
minotaur_action = action[1]
next_minotaur_location = self.find_next_location(self.m, minotaur_action, 'minotaur')
#if player is in (4,4) move to winning state!
if np.all(next_player_location == np.array([4,4]) ):
return self.win
# if player is in the same position as the minotaur then move to dead state!
if np.all(next_player_location == next_minotaur_location):
return self.dead
return (next_player_location, next_minotaur_location)
def dynamic_programming(self):
self.define_grid()
# Generate all possible states (player_location, minotaur_location)
all_positions = [positions_pair for positions_pair in itertools.product(range(self.GRID_SIZE_ROW), range(self.GRID_SIZE_COL))]
self.all_states = [states_pair for states_pair in itertools.product(all_positions, all_positions) if states_pair not in self.killing_states and states_pair not in self.winning_states] # all positions for player/minotaur]
self.all_states.append(self.dead) # append dead state
self.all_states.append(self.win) # win state
# assign an index to every state - useful for accessing the state value of other states
self.states_mapping = dict(zip(list(self.all_states), range(0, self.NUM_STATES)))
## Change all_positions and self.all_states to numpy array
self.all_states = (np.array(self.all_states)).reshape((self.NUM_STATES, 4))
# all_actions = np.array(all_actions)
self.state_values = np.zeros((self.NUM_STATES, self.T)) # keep the best state values computed in each iteration
self.policy = np.zeros((self.NUM_STATES, self.T)) # store the best sequence of actions for the player
#####base case of dynamic programming - compute state value at timestep T
# WIN state
win_state_indx = self.states_mapping[self.win]
self.state_values[win_state_indx , self.T-1] = 1
# in all other states, terminal reward is zero
for t in range(self.T - 2, -1, -1):
print('----------------------------------')
print('Time: ' + str(t + 1))
for i in range(self.NUM_STATES):
# print('State: ' + str(self.all_states[i,:]))
# change current state
self.p = self.all_states[i, 0:2]
self.m = self.all_states[i, 2:4]
if np.all(((self.p[0], self.p[1]), (self.m[0], self.m[1])) == self.win):
self.state_values[i, t] = 100 #we dont care about the value since its a terminal state?
self.policy[i,t] = self.win_mov
elif np.all(((self.p[0], self.p[1]), (self.m[0], self.m[1])) | |
x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().__next__
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possibilities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1 << j) | # column ordinal
(1 << (n + i-j + n-1)) | # NW-SE ordinal
(1 << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print(sep)
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print("|" + "|".join(squares) + "|")
print(sep)
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finally, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 through m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 through m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max | |
LON_CTR,
'target_lat': LAT_CTR,
'nrows_blend': HALO_BLEND,
#
# Question:
# For a ESGgrid type grid, what should stretch_fac be set to? This depends
# on how the FV3 code uses the stretch_fac parameter in the namelist file.
# Recall that for a ESGgrid, it gets set in the function set_gridparams_ESGgrid(.sh)
# to something like 0.9999, but is it ok to set it to that here in the
# FV3 namelist file?
#
'stretch_fac': STRETCH_FAC,
'npx': npx,
'npy': npy,
'layout': [LAYOUT_X, LAYOUT_Y],
'bc_update_interval': LBC_SPEC_INTVL_HRS
}
settings['gfs_physics_nml'] = {
'kice': kice or None,
'lsoil': lsoil or None,
'do_shum': DO_SHUM,
'do_sppt': DO_SPPT,
'do_skeb': DO_SKEB,
'do_spp': DO_SPP,
'n_var_spp': N_VAR_SPP,
'n_var_lndp': N_VAR_LNDP,
'lndp_type': LNDP_TYPE,
'lndp_each_step': LSM_SPP_EACH_STEP,
'fhcyc': FHCYC_LSM_SPP_OR_NOT
}
#
# Add to "settings" the values of those namelist variables that specify
# the paths to fixed files in the FIXam directory. As above, these namelist
# variables are physcs-suite-independent.
#
# Note that the array FV3_NML_VARNAME_TO_FIXam_FILES_MAPPING contains
# the mapping between the namelist variables and the names of the files
# in the FIXam directory. Here, we loop through this array and process
# each element to construct each line of "settings".
#
dummy_run_dir=os.path.join(EXPTDIR,"any_cyc")
if DO_ENSEMBLE:
dummy_run_dir=os.path.join(dummy_run_dir,"any_ensmem")
regex_search="^[ ]*([^| ]+)[ ]*[|][ ]*([^| ]+)[ ]*$"
num_nml_vars=len(FV3_NML_VARNAME_TO_FIXam_FILES_MAPPING)
namsfc_dict = {}
for i in range(num_nml_vars):
mapping=f"{FV3_NML_VARNAME_TO_FIXam_FILES_MAPPING[i]}"
tup = find_pattern_in_str(regex_search, mapping)
nml_var_name = tup[0]
FIXam_fn = tup[1]
fp="\"\""
if FIXam_fn:
fp=os.path.join(FIXam,FIXam_fn)
#
# If not in NCO mode, for portability and brevity, change fp so that it
# is a relative path (relative to any cycle directory immediately under
# the experiment directory).
#
if RUN_ENVIR != "nco":
fp = os.path.relpath(os.path.realpath(fp), start=dummy_run_dir)
#
# Add a line to the variable "settings" that specifies (in a yaml-compliant
# format) the name of the current namelist variable and the value it should
# be set to.
#
namsfc_dict[nml_var_name] = fp
#
# Add namsfc_dict to settings
#
settings['namsfc'] = namsfc_dict
#
# Use netCDF4 when running the North American 3-km domain due to file size.
#
if PREDEF_GRID_NAME == "RRFS_NA_3km":
settings['fms2_io_nml'] = {
'netcdf_default_format': 'netcdf4'
}
#
# Add the relevant tendency-based stochastic physics namelist variables to
# "settings" when running with SPPT, SHUM, or SKEB turned on. If running
# with SPP or LSM SPP, set the "new_lscale" variable. Otherwise only
# include an empty "nam_stochy" stanza.
#
nam_stochy_dict = {}
if DO_SPPT:
nam_stochy_dict.update({
'iseed_sppt': ISEED_SPPT,
'new_lscale': NEW_LSCALE,
'sppt': SPPT_MAG,
'sppt_logit': SPPT_LOGIT,
'sppt_lscale': SPPT_LSCALE,
'sppt_sfclimit': SPPT_SFCLIMIT,
'sppt_tau': SPPT_TSCALE,
'spptint': SPPT_INT,
'use_zmtnblck': USE_ZMTNBLCK
})
if DO_SHUM:
nam_stochy_dict.update({
'iseed_shum': ISEED_SHUM,
'new_lscale': NEW_LSCALE,
'shum': SHUM_MAG,
'shum_lscale': SHUM_LSCALE,
'shum_tau': SHUM_TSCALE,
'shumint': SHUM_INT
})
if DO_SKEB:
nam_stochy_dict.update({
'iseed_skeb': ISEED_SKEB,
'new_lscale': NEW_LSCALE,
'skeb': SKEB_MAG,
'skeb_lscale': SKEB_LSCALE,
'skebnorm': SKEBNORM,
'skeb_tau': SKEB_TSCALE,
'skebint': SKEB_INT,
'skeb_vdof': SKEB_VDOF
})
if DO_SPP or DO_LSM_SPP:
nam_stochy_dict.update({
'new_lscale': NEW_LSCALE
})
settings['nam_stochy'] = nam_stochy_dict
#
# Add the relevant SPP namelist variables to "settings" when running with
# SPP turned on. Otherwise only include an empty "nam_sppperts" stanza.
#
nam_sppperts_dict = {}
if DO_SPP:
nam_sppperts_dict = {
'iseed_spp': ISEED_SPP,
'spp_lscale': SPP_LSCALE,
'spp_prt_list': SPP_MAG_LIST,
'spp_sigtop1': SPP_SIGTOP1,
'spp_sigtop2': SPP_SIGTOP2,
'spp_stddev_cutoff': SPP_STDDEV_CUTOFF,
'spp_tau': SPP_TSCALE,
'spp_var_list': SPP_VAR_LIST
}
settings['nam_sppperts'] = nam_sppperts_dict
#
# Add the relevant LSM SPP namelist variables to "settings" when running with
# LSM SPP turned on.
#
nam_sfcperts_dict = {}
if DO_LSM_SPP:
nam_sfcperts_dict = {
'lndp_type': LNDP_TYPE,
'lndp_tau': LSM_SPP_TSCALE,
'lndp_lscale': LSM_SPP_LSCALE,
'iseed_lndp': ISEED_LSM_SPP,
'lndp_var_list': LSM_SPP_VAR_LIST,
'lndp_prt_list': LSM_SPP_MAG_LIST
}
settings['nam_sfcperts'] = nam_sfcperts_dict
settings_str = cfg_to_yaml_str(settings)
print_info_msg(dedent(f'''
The variable \"settings\" specifying values of the weather model's
namelist variables has been set as follows:
settings =\n''') + settings_str, verbose=VERBOSE)
#
#-----------------------------------------------------------------------
#
# Call the set_namelist.py script to create a new FV3 namelist file (full
# path specified by FV3_NML_FP) using the file FV3_NML_BASE_SUITE_FP as
# the base (i.e. starting) namelist file, with physics-suite-dependent
# modifications to the base file specified in the yaml configuration file
# FV3_NML_YAML_CONFIG_FP (for the physics suite specified by CCPP_PHYS_SUITE),
# and with additional physics-suite-independent modificaitons specified
# in the variable "settings" set above.
#
#-----------------------------------------------------------------------
#
try:
set_namelist(["-q", "-n", FV3_NML_BASE_SUITE_FP, "-c", FV3_NML_YAML_CONFIG_FP,
CCPP_PHYS_SUITE, "-u", settings_str, "-o", FV3_NML_FP])
except:
print_err_msg_exit(f'''
Call to python script set_namelist.py to generate an FV3 namelist file
failed. Parameters passed to this script are:
Full path to base namelist file:
FV3_NML_BASE_SUITE_FP = \"{FV3_NML_BASE_SUITE_FP}\"
Full path to yaml configuration file for various physics suites:
FV3_NML_YAML_CONFIG_FP = \"{FV3_NML_YAML_CONFIG_FP}\"
Physics suite to extract from yaml configuration file:
CCPP_PHYS_SUITE = \"{CCPP_PHYS_SUITE}\"
Full path to output namelist file:
FV3_NML_FP = \"{FV3_NML_FP}\"
Namelist settings specified on command line:
settings =
{settings_str}''')
#
# If not running the MAKE_GRID_TN task (which implies the workflow will
# use pregenerated grid files), set the namelist variables specifying
# the paths to surface climatology files. These files are located in
# (or have symlinks that point to them) in the FIXLAM directory.
#
# Note that if running the MAKE_GRID_TN task, this action usually cannot
# be performed here but must be performed in that task because the names
# of the surface climatology files depend on the CRES parameter (which is
# the C-resolution of the grid), and this parameter is in most workflow
# configurations is not known until the grid is created.
#
if not RUN_TASK_MAKE_GRID:
set_FV3nml_sfc_climo_filenames()
#
#-----------------------------------------------------------------------
#
# To have a record of how this experiment/workflow was generated, copy
# the experiment/workflow configuration file to the experiment directo-
# ry.
#
#-----------------------------------------------------------------------
#
cp_vrfy(os.path.join(USHDIR,EXPT_CONFIG_FN), EXPTDIR)
#
#-----------------------------------------------------------------------
#
# For convenience, print out the commands that need to be issued on the
# command line in order to launch the workflow and to check its status.
# Also, print out the line that should be placed in the user's cron table
# in order for the workflow to be continually resubmitted.
#
#-----------------------------------------------------------------------
#
if WORKFLOW_MANAGER == "rocoto":
wflow_db_fn=f"{os.path.splitext(WFLOW_XML_FN)[0]}.db"
rocotorun_cmd=f"rocotorun -w {WFLOW_XML_FN} -d {wflow_db_fn} -v 10"
rocotostat_cmd=f"rocotostat -w {WFLOW_XML_FN} -d {wflow_db_fn} -v 10"
print_info_msg(f'''
========================================================================
========================================================================
Experiment generation completed. The experiment directory is:
EXPTDIR=\"{EXPTDIR}\"
========================================================================
========================================================================
''')
#
#-----------------------------------------------------------------------
#
# If rocoto is required, print instructions on how to load and use it
#
#-----------------------------------------------------------------------
#
if WORKFLOW_MANAGER == "rocoto":
print_info_msg(f'''
To launch the workflow, first ensure that you have a compatible version
of rocoto available. For most pre-configured platforms, rocoto can be
loaded via a module:
> module load rocoto
For more details on rocoto, see the User's Guide.
To launch the workflow, first ensure that you have a compatible version
of rocoto loaded. For example, to load version 1.3.1 of rocoto, use
> module load rocoto/1.3.1
(This version has been tested on hera; later versions may also work but
have not been tested.)
To launch the workflow, change location to the experiment directory
(EXPTDIR) and issue the rocotrun command, as follows:
> cd {EXPTDIR}
> {rocotorun_cmd}
To check on the status of the workflow, issue the rocotostat command
(also from the experiment directory):
> {rocotostat_cmd}
Note that:
1) The rocotorun command must be issued after the completion of each
task in the workflow in order for the workflow to submit the next
task(s) to the queue.
2) In order for the output of the rocotostat command to be up-to-date,
the rocotorun command must be issued immediately before issuing the
rocotostat command.
For automatic resubmission of the workflow (say every 3 minutes), the
following line can be added to the user's crontab (use \"crontab -e\" to
edit the cron table):
*/3 * * * * cd {EXPTDIR} && ./launch_FV3LAM_wflow.sh called_from_cron=\"TRUE\"
''')
#
# If necessary, run the NOMADS script to source external model data.
#
if NOMADS:
print("Getting NOMADS online data")
print(f"NOMADS_file_type= {NOMADS_file_type}")
cd_vrfy(EXPTDIR)
NOMADS_script = os.path.join(USHDIR, "NOMADS_get_extrn_mdl_files.h")
run_command(f'''{NOMADS_script} {date_to_str(DATE_FIRST_CYCL,True)} \
{CYCL_HRS} {NOMADS_file_type} {FCST_LEN_HRS} {LBC_SPEC_INTVL_HRS}''')
#
#-----------------------------------------------------------------------
#
# Start | |
pass
msghandler(m, electronic)
except:
electronic.send_message(441399484, traceback.format_exc())
######################## LENA ###################################################
@lena.message_handler(commands=['control'])
def lenacontrol(m):
config.about(m, lena)
x='le_admins'
adm=admins.find_one({'name':x})
if m.from_user.id in adm[x]:
if adm['controller'] == None:
admins.update_one({'name':x},{'$set':{'controller': {'id': m.from_user.id,
'name': m.from_user.first_name}}})
lena.send_message(m.from_user.id,
'Теперь ты управляешь мной! Я буду присылать тебе все сообщения, которые вижу!')
@lena.message_handler(commands=['stopcontrol'])
def lenastopcontrol(m):
config.about(m, lena)
x='le_admins'
adm=admins.find_one({'name':x})
if adm['controller'] != None:
if adm['controller']['id'] == m.from_user.id:
admins.update_one({'name':x},{'$set':{'controller':None}})
lena.send_message(m.from_user.id, 'Ты больше не управляешь мной!')
@lena.message_handler()
def lenamessages(m):
if ban.find_one({'id': m.from_user.id}) == None:
print('1')
yes = ['да!', 'конечно!', 'да', 'да, могу.', 'могу', 'могу.', 'конечно могу!', 'да']
if lenastats['whohelps'] != None:
print('2')
y = 0
if m.from_user.id == lenastats['whohelps']:
print('3')
for ids in yes:
if ids in m.text.lower():
y = 1
if y == 1:
pioner = users.find_one({'id': m.from_user.id})
print('4')
try:
lenastats['timer'].cancel()
except:
pass
allhelps = ['Спасибо! Тогда пошли, мне нужно отсортировать лекарства в медпункте.',
'Спасибо! Пойдём, надо разобрать склад и принести несколько комплектов пионерской формы для Слави.']
lenastats['whohelps'] = None
helpp = random.choice(allhelps)
lena.send_chat_action(m.chat.id, 'typing')
time.sleep(4)
lena.send_message(m.chat.id, helpp)
sendstick(lena, 'CAADAgADZwADgi0zD-vRcG90IHeAAg')
t = threading.Timer(300, helpend, args=[m.from_user.id, 'lena'])
t.start()
users.update_one({'id': m.from_user.id}, {'$set': {'helping': 1}})
msghandler(m, lena)
@lena.message_handler(content_types=['sticker'])
def stickercatchlena(m):
stickhandler(m, lena)
@lena.message_handler(content_types=['photo'])
def photocatchlena(m):
pichandler(m, lena)
@lena.message_handler(content_types=['audio'])
@lena.message_handler(content_types=['voice'])
def photocatchlena(m):
audiohandler(m, lena)
@lena.message_handler(content_types = ['document'])
def docsss(m):
dochandler(m, lena)
####################################### ALICE ##############################################
@alisa.message_handler(commands=['control'])
def alisacontrol(m):
config.about(m, alisa)
x='al_admins'
adm=admins.find_one({'name':x})
if m.from_user.id in adm[x]:
if adm['controller'] == None:
admins.update_one({'name':x},{'$set':{'controller': {'id': m.from_user.id,
'name': m.from_user.first_name}}})
alisa.send_message(m.from_user.id,
'Ну ты вроде теперь мной управляешь. Я буду присылать тебе все сообщения, которые вижу, но если мне что-то не понравится - буду злиться!')
@alisa.message_handler(commands=['stopcontrol'])
def alisastopcontrol(m):
config.about(m, alisa)
x='al_admins'
adm=admins.find_one({'name':x})
if adm['controller'] != None:
if adm['controller']['id'] == m.from_user.id:
admins.update_one({'name':x},{'$set':{'controller':None}})
alisa.send_message(m.from_user.id, 'Ты больше не управляешь мной!')
@alisa.message_handler()
def alisamessages(m):
try:
if ban.find_one({'id': m.from_user.id}) == None:
yes = ['да', 'я готов', 'го', 'ну го', 'я в деле']
if alisastats['whohelps'] != None:
y = 0
try:
bot.send_message(441399484, str(alisastats['whohelps']))
except:
bot.send_message(441399484, traceback.format_exc())
if m.from_user.id == alisastats['whohelps']:
for ids in yes:
if ids in m.text.lower():
y = 1
if y == 1:
bot.send_message(441399484, '1')
pioner = users.find_one({'id': m.from_user.id})
try:
alisastats['timer'].cancel()
except:
pass
allhelps = ['Ну пошли, там нужно один прикол с Электроником намутить...',
'Отлично! Значит так, нам с Ульяной нужен отвлекающий на кухню...']
alisastats['whohelps'] = None
helpp = random.choice(allhelps)
alisa.send_chat_action(m.chat.id, 'typing')
time.sleep(4)
alisa.send_message(m.chat.id, helpp)
sendstick(alisa, 'CAADAgADOwADgi0zDzD8ZNZXu5LHAg')
t = threading.Timer(300, helpend, args=[m.from_user.id, 'alisa'])
t.start()
users.update_one({'id': m.from_user.id}, {'$set': {'helping': 1}})
msghandler(m, alisa)
if m.chat.id == mainchat:
if m.reply_to_message != None:
if m.reply_to_message.from_user.id == 634115873:
pioner = users.find_one({'id': m.from_user.id})
if pioner != None:
text = m.text.lower()
if 'пошли' in text:
if 'ко мне' in text:
texts2 = ['Ну... Я подумаю.', 'Даже не знаю...']
texts1 = ['Совсем офигел?', 'Страх потерял?']
texts3 = ['Лучше ко мне', 'Ну пошли!']
stick2 = 'CAADAgAD4QIAAnHMfRgPhIdIfUrCGAI'
stick1 = 'CAADAgAD4wIAAnHMfRjkcHoZL5eAgwI'
stick3 = 'CAADAgAD7AIAAnHMfRgXuTTXBIbwWgI'
if pioner['Alisa_respect'] < 40:
txt = texts1
stick = stick1
elif pioner['Alisa_respect'] <= 50:
txt = texts2
stick = stick2
elif pioner['Alisa_respect'] <= 75:
txt = texts3
stick = stick3
alisa.send_chat_action(mainchat, 'typing')
t = threading.Timer(3, sendmes, args=[alisa, random.choice(txt), None])
t.start()
t = threading.Timer(3, sendstick, args=[alisa, stick])
t.start()
except:
alisa.send_message(441399484, traceback.format_exc())
@alisa.message_handler(content_types=['sticker'])
def stickercatchalisa(m):
stickhandler(m, alisa)
@alisa.message_handler(content_types=['photo'])
def photocatchalisa(m):
pichandler(m, alisa)
@alisa.message_handler(content_types=['audio'])
@alisa.message_handler(content_types=['voice'])
def photocatchalisa(m):
audiohandler(m, alisa)
@alisa.message_handler(content_types = ['document'])
def docsss(m):
dochandler(m, alisa)
####################################### ULIANA ##############################################
@uliana.message_handler(commands=['control'])
def ulianaacontrol(m):
config.about(m, uliana)
x='ul_admins'
adm=admins.find_one({'name':x})
if m.from_user.id in adm[x]:
if adm['controller'] == None:
admins.update_one({'name':x},{'$set':{'controller': {'id': m.from_user.id,
'name': m.from_user.first_name}}})
uliana.send_message(m.from_user.id, 'Привет! Теперь ты мной управляешь, прикольно!')
@uliana.message_handler(commands=['stopcontrol'])
def ulianastopcontrol(m):
config.about(m, uliana)
x='ul_admins'
adm=admins.find_one({'name':x})
if adm['controller'] != None:
if adm['controller']['id'] == m.from_user.id:
admins.update_one({'name':x},{'$set':{'controller':None}})
uliana.send_message(m.from_user.id, 'Ты больше не управляешь мной!')
@uliana.message_handler()
def ulianamessages(m):
if ban.find_one({'id': m.from_user.id}) == None:
yes = ['да', 'давай', 'я в деле', 'рассказывай']
if ulianastats['whohelps'] != None:
y = 0
if m.from_user.id == ulianastats['whohelps']:
for ids in yes:
if ids in m.text.lower():
y = 1
if y == 1:
pioner = users.find_one({'id': m.from_user.id})
try:
ulianastats['timer'].cancel()
except:
pass
allhelps = [
'Я тут хочу заняться одним безобидным делом, и в этом мне потребуются спички... Если что, тебя не сдам!',
'О, круто! Мне тут нужно раздобыть немного глицерина...']
ulianastats['whohelps'] = None
helpp = random.choice(allhelps)
uliana.send_chat_action(m.chat.id, 'typing')
time.sleep(4)
uliana.send_message(m.chat.id, helpp)
sendstick(uliana, 'CAADAgADKQADgi0zD_inNy0pZyh0Ag')
t = threading.Timer(300, helpend, args=[m.from_user.id, 'uliana'])
t.start()
users.update_one({'id': m.from_user.id}, {'$set': {'helping': 1}})
msghandler(m, uliana)
@uliana.message_handler(content_types=['sticker'])
def stickercatchalisa(m):
stickhandler(m, uliana)
@uliana.message_handler(content_types=['audio'])
@uliana.message_handler(content_types=['voice'])
def stickercatchalisa(m):
audiohandler(m, uliana)
@uliana.message_handler(content_types=['photo'])
def photocatchuliana(m):
pichandler(m, uliana)
@uliana.message_handler(content_types = ['document'])
def docsss(m):
dochandler(m, uliana)
####################################### SLAVYA ##############################################
@slavya.message_handler(commands=['control'])
def slavyacontrol(m):
config.about(m, slavya)
x='sl_admins'
adm=admins.find_one({'name':x})
if m.from_user.id in adm[x]:
if adm['controller'] == None:
admins.update_one({'name':x},{'$set':{'controller': {'id': m.from_user.id,
'name': m.from_user.first_name}}})
slavya.send_message(m.from_user.id, 'Привет! Теперь ты мной управляешь! Только аккуратнее!')
@slavya.message_handler(commands=['stopcontrol'])
def slavyastopcontrol(m):
config.about(m, slavya)
x='sl_admins'
adm=admins.find_one({'name':x})
if adm['controller'] != None:
if adm['controller']['id'] == m.from_user.id:
admins.update_one({'name':x},{'$set':{'controller':None}})
slavya.send_message(m.from_user.id, 'Ты больше не управляешь мной!')
@slavya.message_handler()
def slavyamessages(m):
if ban.find_one({'id': m.from_user.id}) == None:
yes = ['да', 'я готов', 'давай', 'я в деле']
if slavyastats['whohelps'] != None:
y = 0
if m.from_user.id == slavyastats['whohelps']:
for ids in yes:
if ids in m.text.lower():
y = 1
if y == 1:
pioner = users.find_one({'id': m.from_user.id})
try:
slavyastats['timer'].cancel()
except:
pass
allhelps = [
'Отлично! А теперь само задание: надо развесить на деревьях гирлянды, а то завтра вечером будут танцы! Нужна соответствующая атмосфера.',
'Спасибо! Тогда наполни вот это ведро водой и принеси сюда, мне надо помыть памятник.']
slavyastats['whohelps'] = None
helpp = random.choice(allhelps)
slavya.send_chat_action(m.chat.id, 'typing')
time.sleep(4)
slavya.send_message(m.chat.id, helpp)
sendstick(slavya, 'CAADAgADUgADgi0zD4hu1wGvwGllAg')
t = threading.Timer(300, helpend, args=[m.from_user.id, 'slavya'])
t.start()
users.update_one({'id': m.from_user.id}, {'$set': {'helping': 1}})
msghandler(m, slavya)
@slavya.message_handler(content_types=['sticker'])
def stickercatchslavya(m):
stickhandler(m, slavya)
@slavya.message_handler(content_types=['audio'])
@slavya.message_handler(content_types=['voice'])
def stickercatchslavya(m):
audiohandler(m, slavya)
@slavya.message_handler(content_types=['photo'])
def photocatchslavya(m):
pichandler(m, slavya)
@slavya.message_handler(content_types = ['document'])
def docsss(m):
dochandler(m, slavya)
####################################### MIKU ##############################################
@miku.message_handler(commands=['control'])
def mikucontrol(m):
config.about(m, miku)
x='mi_admins'
adm=admins.find_one({'name':x})
if m.from_user.id in adm[x]:
if adm['controller'] == None:
admins.update_one({'name':x},{'$set':{'controller': {'id': m.from_user.id,
'name': m.from_user.first_name}}})
miku.send_message(m.from_user.id,
'Привет! Теперь ты управляешь мной, как здорово! Ой, а я однажды в школе пыталась управлять музыкальным клубом, но ничего не вышло... Надеюсь, у тебя получится лучше!')
@miku.message_handler(commands=['stopcontrol'])
def mikustopcontrol(m):
config.about(m, miku)
x='mi_admins'
adm=admins.find_one({'name':x})
if adm['controller'] != None:
if adm['controller']['id'] == m.from_user.id:
admins.update_one({'name':x},{'$set':{'controller':None}})
miku.send_message(m.from_user.id, 'Ты больше не управляешь мной!')
@miku.message_handler()
def mikumessages(m):
if ban.find_one({'id': m.from_user.id}) == None:
yes = ['да', 'я готов', 'давай', 'я в деле', 'хорошо']
if mikustats['whohelps'] != None:
y = 0
if m.from_user.id == mikustats['whohelps']:
for ids in yes:
if ids in m.text.lower():
y = 1
if y == 1:
pioner = users.find_one({'id': m.from_user.id})
try:
mikustats['timer'].cancel()
except:
pass
allhelps = [
'Большое спасибо! Тогда пойдем, только аккуратнее, Шурик в прошлый раз себе ногу отдавил этой аппаратурой, колонки очень тяжёлые!',
'Отлично, спасибо тебе! Тогда идём, только аккуратнее, колонки очень тяжёлые, Шурик в прошлый раз себе ногу отдавил этой аппаратурой!']
mikustats['whohelps'] = None
helpp = random.choice(allhelps)
miku.send_chat_action(m.chat.id, 'typing')
time.sleep(4)
miku.send_message(m.chat.id, helpp)
sendstick(miku, 'CAACAgIAAxkBAAIm2GJGHHEtq_wMxq9tAtbNfuer8ANsAAJ9AAOCLTMPfRt-eLWAJRkjBA')
t = threading.Timer(300, helpend, args=[m.from_user.id, 'miku'])
t.start()
users.update_one({'id': m.from_user.id}, {'$set': {'helping': 1}})
if ban.find_one({'id': m.from_user.id}) == None:
msghandler(m, miku)
@miku.message_handler(content_types=['photo'])
def photocatchmiku(m):
pichandler(m, miku)
@miku.message_handler(content_types=['sticker'])
def stickercatchmiku(m):
stickhandler(m, miku)
@miku.message_handler(content_types=['audio'])
@miku.message_handler(content_types=['voice'])
def stickercatchmiku(m):
audiohandler(m, miku)
@miku.message_handler(content_types = ['document'])
def docsss(m):
dochandler(m, miku)
####################################### ZHENYA ##############################################
@zhenya.message_handler(commands=['control'])
def zhenyacontrol(m):
config.about(m, zhenya)
x='zh_admins'
adm=admins.find_one({'name':x})
if m.from_user.id in adm[x]:
if adm['controller'] == None:
admins.update_one({'name':x},{'$set':{'controller': {'id': m.from_user.id,
'name': m.from_user.first_name}}})
zhenya.send_message(m.from_user.id, 'Привет, ты теперь управляешь мной... А я пока пойду почитаю.')
@zhenya.message_handler(commands=['stopcontrol'])
def zhenyastopcontrol(m):
config.about(m, zhenya)
x='zh_admins'
adm=admins.find_one({'name':x})
if adm['controller'] != None:
if adm['controller']['id'] == m.from_user.id:
admins.update_one({'name':x},{'$set':{'controller':None}})
zhenya.send_message(m.from_user.id, 'Ты больше не управляешь мной!')
@zhenya.message_handler()
def zhenyamessages(m):
if ban.find_one({'id': m.from_user.id}) == None:
msghandler(m, zhenya)
@zhenya.message_handler(content_types=['sticker'])
def stickercatchzhenya(m):
stickhandler(m, zhenya)
@zhenya.message_handler(content_types=['photo'])
def photocatchzhenya(m):
pichandler(m, zhenya)
@zhenya.message_handler(content_types=['audio'])
@zhenya.message_handler(content_types=['voice'])
def photocatchzhenya(m):
audiohandler(m, zhenya)
@zhenya.message_handler(content_types = ['document'])
def docsss(m):
dochandler(m, zhenya)
####################################### TOLIK ##############################################
@tolik.message_handler(commands=['control'])
def tolikcontrol(m):
config.about(m, tolik)
x='to_admins'
adm=admins.find_one({'name':x})
if m.from_user.id in adm[x]:
if adm['controller'] == None:
admins.update_one({'name':x},{'$set':{'controller': {'id': m.from_user.id,
'name': m.from_user.first_name}}})
tolik.send_message(m.from_user.id, 'Я - Толик.')
@tolik.message_handler(commands=['stopcontrol'])
def tolikstopcontrol(m):
config.about(m, tolik)
x='to_admins'
adm=admins.find_one({'name':x})
if adm['controller'] != None:
if adm['controller']['id'] == m.from_user.id:
admins.update_one({'name':x},{'$set':{'controller':None}})
tolik.send_message(m.from_user.id, 'Ты больше не управляешь мной!')
@tolik.message_handler()
def tolikmessages(m):
if ban.find_one({'id': m.from_user.id}) == None:
msghandler(m, tolik)
@tolik.message_handler(content_types=['sticker'])
def stickercatchtolik(m):
stickhandler(m, tolik)
@tolik.message_handler(content_types=['audio'])
@tolik.message_handler(content_types=['voice'])
def stickercatchtolik(m):
audiohandler(m, tolik)
@tolik.message_handler(content_types=['photo'])
def photocatchtolik(m):
pichandler(m, tolik)
@tolik.message_handler(content_types = ['document'])
def docsss(m):
dochandler(m, tolik)
####################################### SHURIK ##############################################
@shurik.message_handler(commands=['control'])
def shurikcontrol(m):
config.about(m, shurik)
x='sh_admins'
adm=admins.find_one({'name':x})
if m.from_user.id in adm[x]:
if adm['controller'] == None:
admins.update_one({'name':x},{'$set':{'controller': {'id': m.from_user.id,
'name': m.from_user.first_name}}})
shurik.send_message(m.from_user.id, 'Привет, ну ты теперь управляешь мной. Думаю, что умеешь.')
@shurik.message_handler(commands=['stopcontrol'])
def shuriktopcontrol(m):
config.about(m, shurik)
x='sh_admins'
adm=admins.find_one({'name':x})
if adm['controller'] != None:
if adm['controller']['id'] == m.from_user.id:
admins.update_one({'name':x},{'$set':{'controller':None}})
shurik.send_message(m.from_user.id, 'Ты больше не управляешь мной!')
@shurik.message_handler()
def shurikmessages(m):
if ban.find_one({'id': m.from_user.id}) == None:
msghandler(m, shurik)
@shurik.message_handler(content_types=['sticker'])
def stickercatchzshurik(m):
stickhandler(m, shurik)
@shurik.message_handler(content_types=['audio'])
@shurik.message_handler(content_types=['voice'])
def stickercatchzshurik(m):
audiohandler(m, shurik)
@shurik.message_handler(content_types=['photo'])
def photocatchshurik(m):
pichandler(m, shurik)
@shurik.message_handler(content_types = ['document'])
def docsss(m):
dochandler(m, shurik)
###################################### SEMEN ###############################################
@semen.message_handler(commands=['control'])
def semencontrol(m):
config.about(m, semen)
x='se_admins'
adm=admins.find_one({'name':x})
if m.from_user.id in adm[x]:
if adm['controller'] == None:
admins.update_one({'name':x},{'$set':{'controller': {'id': m.from_user.id,
'name': m.from_user.first_name}}})
semen.send_message(m.from_user.id, 'Ну ты типо мной управляешь.')
@semen.message_handler(commands=['stopcontrol'])
def semenstopcontrol(m):
config.about(m, semen)
x='se_admins'
| |
East Asian ideograph
0x2D502D: (0x7B5D, 0), # East Asian ideograph
0x4C4339: (0x69DE, 0), # East Asian ideograph
0x6F502E: (0xBA5C, 0), # Korean hangul
0x6F5038: (0xBA85, 0), # Korean hangul
0x213A60: (0x5B78, 0), # East Asian ideograph
0x21502F: (0x7BAD, 0), # East Asian ideograph
0x215030: (0x7BC4, 0), # East Asian ideograph
0x216122: (0x993D, 0), # East Asian ideograph
0x226123: (0x76CB, 0), # East Asian ideograph
0x216124: (0x9952, 0), # East Asian ideograph
0x216125: (0x9951, 0), # East Asian ideograph
0x226126: (0x76CC, 0), # East Asian ideograph
0x216127: (0x995E, 0), # East Asian ideograph
0x216128: (0x9996, 0), # East Asian ideograph
0x216129: (0x9999, 0), # East Asian ideograph
0x21612A: (0x99A5, 0), # East Asian ideograph
0x21612B: (0x99A8, 0), # East Asian ideograph
0x21612C: (0x99AC, 0), # East Asian ideograph
0x21612D: (0x99AE, 0), # East Asian ideograph
0x21612E: (0x99AD, 0), # East Asian ideograph
0x21612F: (0x99B3, 0), # East Asian ideograph
0x216130: (0x99B1, 0), # East Asian ideograph
0x216131: (0x99B4, 0), # East Asian ideograph
0x216132: (0x99C1, 0), # East Asian ideograph
0x275033: (0x8282, 0), # East Asian ideograph
0x216134: (0x99DD, 0), # East Asian ideograph
0x216135: (0x99D5, 0), # East Asian ideograph
0x216136: (0x99DF, 0), # East Asian ideograph
0x216137: (0x99DB, 0), # East Asian ideograph
0x216138: (0x99D2, 0), # East Asian ideograph
0x216139: (0x99D9, 0), # East Asian ideograph
0x21613A: (0x99D1, 0), # East Asian ideograph
0x21613B: (0x99ED, 0), # East Asian ideograph
0x21613C: (0x99F1, 0), # East Asian ideograph
0x21613D: (0x9A01, 0), # East Asian ideograph
0x21613E: (0x99FF, 0), # East Asian ideograph
0x21613F: (0x99E2, 0), # East Asian ideograph
0x216140: (0x9A0E, 0), # East Asian ideograph
0x216141: (0x9A19, 0), # East Asian ideograph
0x216142: (0x9A16, 0), # East Asian ideograph
0x216143: (0x9A2B, 0), # East Asian ideograph
0x226144: (0x76ED, 0), # East Asian ideograph
0x216145: (0x9A37, 0), # East Asian ideograph
0x216146: (0x9A43, 0), # East Asian ideograph
0x216147: (0x9A45, 0), # East Asian ideograph
0x226148: (0x76F1, 0), # East Asian ideograph
0x216149: (0x9A3E, 0), # East Asian ideograph
0x21614A: (0x9A55, 0), # East Asian ideograph
0x21614B: (0x9A5A, 0), # East Asian ideograph
0x21614C: (0x9A5B, 0), # East Asian ideograph
0x21614D: (0x9A57, 0), # East Asian ideograph
0x21614E: (0x9A5F, 0), # East Asian ideograph
0x22614F: (0x7708, 0), # East Asian ideograph
0x226150: (0x7707, 0), # East Asian ideograph
0x275038: (0x7BAC, 0), # East Asian ideograph
0x216152: (0x9AA8, 0), # East Asian ideograph
0x216153: (0x9AAF, 0), # East Asian ideograph
0x226154: (0x770A, 0), # East Asian ideograph
0x216155: (0x9AB7, 0), # East Asian ideograph
0x216156: (0x9AB8, 0), # East Asian ideograph
0x215039: (0x7BE4, 0), # East Asian ideograph
0x216158: (0x9ACF, 0), # East Asian ideograph
0x226159: (0x76FB, 0), # East Asian ideograph
0x21615A: (0x9AD4, 0), # East Asian ideograph
0x21615B: (0x9AD2, 0), # East Asian ideograph
0x21615C: (0x9AD8, 0), # East Asian ideograph
0x21615D: (0x9AE5, 0), # East Asian ideograph
0x22615E: (0x772B, 0), # East Asian ideograph
0x21615F: (0x9AEE, 0), # East Asian ideograph
0x216160: (0x9AFB, 0), # East Asian ideograph
0x216161: (0x9AED, 0), # East Asian ideograph
0x216162: (0x9B03, 0), # East Asian ideograph
0x216163: (0x9B06, 0), # East Asian ideograph
0x216164: (0x9B0D, 0), # East Asian ideograph
0x216165: (0x9B1A, 0), # East Asian ideograph
0x216166: (0x9B22, 0), # East Asian ideograph
0x216167: (0x9B25, 0), # East Asian ideograph
0x216168: (0x9B27, 0), # East Asian ideograph
0x27503C: (0x7B5B, 0), # East Asian ideograph
0x21616A: (0x9B31, 0), # East Asian ideograph
0x21616B: (0x9B32, 0), # East Asian ideograph
0x21616C: (0x9B3C, 0), # East Asian ideograph
0x21616D: (0x9B41, 0), # East Asian ideograph
0x21616E: (0x9B42, 0), # East Asian ideograph
0x22616F: (0x7721, 0), # East Asian ideograph
0x216170: (0x9B44, 0), # East Asian ideograph
0x216171: (0x9B4F, 0), # East Asian ideograph
0x216172: (0x9B54, 0), # East Asian ideograph
0x216173: (0x9B58, 0), # East Asian ideograph
0x216174: (0x9B5A, 0), # East Asian ideograph
0x226175: (0x7739, 0), # East Asian ideograph
0x226176: (0x772F, 0), # East Asian ideograph
0x216177: (0x9B91, 0), # East Asian ideograph
0x216178: (0x9BAB, 0), # East Asian ideograph
0x216179: (0x9BAE, 0), # East Asian ideograph
0x21617A: (0x9BAA, 0), # East Asian ideograph
0x21617B: (0x9BCA, 0), # East Asian ideograph
0x21617C: (0x9BC9, 0), # East Asian ideograph
0x21617D: (0x9BE8, 0), # East Asian ideograph
0x21617E: (0x9BE7, 0), # East Asian ideograph
0x215040: (0x7BF7, 0), # East Asian ideograph
0x275041: (0x7B80, 0), # East Asian ideograph
0x225042: (0x7086, 0), # East Asian ideograph
0x6F503C: (0xBAAB, 0), # Korean hangul
0x29596B: (0x9CA1, 0), # East Asian ideograph
0x335F3D: (0x96B7, 0), # East Asian ideograph
0x29494F: (0x960A, 0), # East Asian ideograph
0x6F5043: (0xBAC3, 0), # Korean hangul
0x4B5044: (
0x7C27,
0,
), # East Asian ideograph (variant of 215044 which maps to 7C27)
0x275045: (0x7BAA, 0), # East Asian ideograph
0x2E3D73: (0x7A1C, 0), # East Asian ideograph
0x275046: (0x7BD1, 0), # East Asian ideograph
0x6F5047: (0xBB34, 0), # Korean hangul
0x6F503D: (0xBAAC, 0), # Korean hangul
0x213A65: (0x5B87, 0), # East Asian ideograph
0x294950: (0x960C, 0), # East Asian ideograph
0x235048: (0x98B8, 0), # East Asian ideograph
0x225C3A: (0x74D4, 0), # East Asian ideograph
0x27583A: (0x8BA3, 0), # East Asian ideograph
0x225049: (0x7084, 0), # East Asian ideograph
0x2D594C: (0x8B72, 0), # East Asian ideograph
0x6F5960: (0xCE20, 0), # Korean hangul
0x22504A: (0x7081, 0), # East Asian ideograph
0x235370: (0x9A41, 0), # East Asian ideograph
0x21504B: (0x7C3D, 0), # East Asian ideograph
0x4D3032: (0x88AE, 0), # East Asian ideograph
0x6F5A3D: (0xCF54, 0), # Korean hangul
0x27504C: (0x7BEE, 0), # East Asian ideograph
0x274153: (0x6363, 0), # East Asian ideograph
0x4D5C6B: (0x9D50, 0), # East Asian ideograph
0x213A66: (0x5B88, 0), # East Asian ideograph
0x21504D: (0x7C4C, 0), # East Asian ideograph
0x234264: (0x925E, 0), # East Asian ideograph
0x2D3B77: (0x5CE9, 0), # East Asian ideograph
0x21504E: (0x7C4D, 0), # East Asian ideograph
0x6F5025: (0xBA48, 0), # Korean hangul
0x2D504F: (0x7C58, 0), # East Asian ideograph
0x69542A: (0x5737, 0), # East Asian ideograph
0x293B59: (0x8F82, 0), # East Asian ideograph
0x4B4B2B: (0x7363, 0), # East Asian ideograph
0x275050: (0x7B3C, 0), # East Asian ideograph
0x275051: (0x7C41, 0), # East Asian ideograph
0x6F503F: (0xBAB8, 0), # Korean hangul
0x213A67: (0x5B89, 0), # East Asian ideograph
0x294952: (0x960D, 0), # East Asian ideograph
0x275052: (0x7B7E, 0), # East Asian ideograph (duplicate simplified)
0x6F5963: (0xCE35, 0), # Korean hangul
0x2D3B78: (0x5CEF, 0), # East Asian ideograph
0x275053: (0x7BF1, 0), # East Asian ideograph
0x4D594E: (0x9BF5, 0), # East Asian ideograph
0x3F614C: (0x99C5, 0), # East Asian ideograph
0x275054: (0x7BA9, 0), # East Asian ideograph
0x4B6258: (0x68BA, 0), # East Asian ideograph
0x275055: (0x5401, 0), # East Asian ideograph
0x6F245A: (0x3139, 0), # Korean hangul
0x225056: (0x7088, 0), # East Asian ideograph
0x6F5040: (0xBAB9, 0), # Korean hangul
0x213A68: (0x5B85, 0), # East Asian ideograph
0x21583E: (0x8A0C, 0), # East Asian ideograph
0x2D3B79: (0x5D8B, 0), # East Asian ideograph
0x6F5058: (0xBB88, 0), # Korean hangul
0x2D594F: (0x8B83, 0), # East Asian ideograph
0x225059: (0x708C, 0), # East Asian ideograph
0x224B31: (0x6E5D, 0), # East Asian ideograph
0x216221: (0x9C13, 0), # East Asian ideograph
0x226222: (0x7725, 0), # East Asian ideograph
0x216223: (0x9BFD, 0), # East Asian ideograph
0x216224: (0x9C2D, 0), # East Asian ideograph
0x216225: (0x9C25, 0), # East Asian ideograph
0x226226: (0x7734, 0), # East Asian ideograph
0x216227: (0x9C3E, 0), # East Asian ideograph
0x216228: (0x9C3B, 0), # East Asian ideograph
0x216229: (0x9C54, 0), # East Asian ideograph
0x21622A: (0x9C57, 0), # East Asian ideograph
0x21622B: (0x9C56, 0), # East Asian ideograph
0x21622C: (0x9C49, 0), # East Asian ideograph
0x22622D: (0x7747, 0), # East Asian ideograph
0x21622E: (0x9C78, 0), # East Asian ideograph
0x21622F: (0x9CE5, 0), # East Asian ideograph
0x216230: (0x9CE9, 0), # East Asian ideograph
0x226231: (0x7745, 0), # East Asian ideograph
0x226232: (0x774D, 0), # East Asian ideograph
0x216233: (0x9CF3, 0), # East Asian | |
printLog(device, 'Current ' + odStr + ' OverDrive value: ' + str(od) + '%')
print(logSpacer)
def showProfile(deviceList):
""" Display available Power Profiles for a list of devices.
Parameters:
deviceList -- List of devices to display available Power Profile attributes (can be a single-item list)
"""
global RETCODE
print(logSpacer)
for device in deviceList:
if not isPowerplayEnabled(device):
printLog(device, 'PowerPlay not enabled - Power Profiles not supported')
continue
profile = getSysfsValue(device, 'profile')
if not profile:
printLog(device, 'Unable to get Power Profiles')
continue
if len(profile) > 1:
printLog(device, '\n' + profile)
else:
printLog(device, 'Invalid return value from Power Profile SysFS file')
print(logSpacer)
def showPower(deviceList):
""" Display current Power Consumption for a list of devices.
Parameters:
deviceList -- List of devices to display current Power Consumption (can be a single-item list)
"""
print(logSpacer)
try:
getPid("atitool")
print('WARNING: Please terminate ATItool to use this functionality')
except subprocess.CalledProcessError:
for device in deviceList:
power = getSysfsValue(device, 'power')
if not power:
printLog(device, 'Cannot get GPU power Consumption: Average GPU Power not supported')
else:
printLog(device, 'Average GPU Power: ' + power)
print(logSpacer)
def showMemUsage(deviceList):
""" Display current memoery Consumption for a list of devices.
Parameters:
deviceList -- List of devices to display current Power Consumption (can be a single-item list)
"""
print(logSpacer)
for device in deviceList:
used_mem = getSysfsValue(device, 'used_mem')
if not used_mem:
printLog(
device, 'Cannot get GPU memory usage: GPU used memory not supported')
else:
printLog(device, 'Average GPU used memory: ' +
str(used_mem) + "MB")
print(logSpacer)
def showMemTotal(deviceList):
""" Display current memoery Consumption for a list of devices.
Parameters:
deviceList -- List of devices to display current Power Consumption (can be a single-item list)
"""
print(logSpacer)
for device in deviceList:
total_mem = getSysfsValue(device, 'total_mem')
if not total_mem:
printLog(
device, 'Cannot get GPU total memory: GPU total memory not supported')
else:
printLog(device, 'GPU total memory: ' +
str(total_mem) + "MB")
print(logSpacer)
def showAllConciseHw(deviceList):
""" Display critical Hardware info for all devices in a concise format.
Parameters:
deviceList -- List of all devices
"""
print(logSpacer)
print(' GPU DID ECC VBIOS')
for device in deviceList:
gpuid = getSysfsValue(device, 'id')
# To support later
ecc = 'N/A'
vbios = getSysfsValue(device, 'vbios')
print(" %-4s%-7s%-6s%-17s" % (device[4:], gpuid, ecc, vbios))
def showAllConcise(deviceList):
""" Display critical info for all devices in a concise format.
Parameters:
deviceList -- List of all devices
"""
print(logSpacer)
print(' GPU Temp AvgPwr SCLK MCLK UsedMem TotalMem Fan Perf SCLK OD MCLK OD')
for device in deviceList:
temp = getSysfsValue(device, 'temp')
if not temp:
temp = 'N/A'
else:
temp = str(temp) + 'c'
power = getSysfsValue(device, 'power')
if not power:
power = 'N/A'
else:
power = power[:-2] + 'W'
sclk = getCurrentClock(device, 'gpu', 'freq')
if not sclk:
sclk = 'N/A'
mclk = getCurrentClock(device, 'mem', 'freq')
if not mclk:
mclk = 'N/A'
used_mem = getSysfsValue(device, 'used_mem')
if not used_mem:
used_mem = 'N/A'
else:
used_mem = str(used_mem) + 'MB'
total_mem = getSysfsValue(device, 'total_mem')
if not total_mem:
total_mem = 'N/A'
else:
total_mem = str(total_mem) + 'MB'
fan = str(getFanSpeed(device))
if not fan:
fan = 'N/A'
else:
fan = fan + '%'
perf = getSysfsValue(device, 'perf')
if not perf:
perf = 'N/A'
sclk_od = getSysfsValue(device, 'sclk_od')
if not sclk_od or sclk_od == '-1':
sclk_od = 'N/A'
else:
sclk_od = sclk_od + '%'
mclk_od = getSysfsValue(device, 'mclk_od')
if not mclk_od or mclk_od == '-1':
mclk_od = 'N/A'
else:
mclk_od = mclk_od + '%'
print(" %-4s%-8s%-9s%-9s%-9s%-9s%-11s%-9s%-10s%-11s%-9s" % (device[4:], temp,
power, sclk, mclk, used_mem, total_mem, fan, perf, sclk_od, mclk_od))
print(logSpacer)
def setPerformanceLevel(deviceList, level):
""" Set the PowerPlay Performance Level for a list of devices.
Parameters:
deviceList -- List of devices to set the current PowerPlay Performance Level (can be a single-item list)
level -- Specific PowerPlay Performance Level to set
"""
print(logSpacer)
for device in deviceList:
if setPerfLevel(device, level):
printLog(device, 'Successfully set current PowerPlay Level to ' + level)
else:
printLog(device, 'Unable to set current PowerPlay Level to ' + level)
print(logSpacer)
def setClocks(deviceList, clktype, clk):
""" Set clock frequency level for a list of devices.
Parameters:
deviceList -- List of devices to set the clock frequency (can be a single-item list)
clktype -- [gpu|mem] Set the GPU (gpu) or GPU Memory (mem) clock frequency level
clk -- Clock frequency level to set
"""
global RETCODE
if not clk:
print('Invalid clock frequency')
RETCODE = 1
return
value = ''.join(map(str, clk))
try:
int(value)
except ValueError:
print('Cannot set Clock level to value', value, ', non-integer characters are present!')
RETCODE = 1
return
for device in deviceList:
if not isPowerplayEnabled(device):
printLog(device, 'PowerPlay not enabled - Cannot set clocks')
RETCODE = 1
continue
devpath = os.path.join(drmprefix, device, 'device')
if clktype == 'gpu':
clkFile = os.path.join(devpath, 'pp_dpm_sclk')
else:
clkFile = os.path.join(devpath, 'pp_dpm_mclk')
# GPU clocks can be set to multiple levels at the same time (of the format
# 4 5 6 for levels 4, 5 and 6). Don't compare against the max level for gpu
# clocks in this case
if any(int(item) > getMaxLevel(device, clktype) for item in clk):
printLog(device, 'Unable to set clock to unsupported Level - Max Level is ' + str(getMaxLevel(device, clktype)))
RETCODE = 1
continue
setPerfLevel(device, 'manual')
if writeToSysfs(clkFile, value):
if clktype == 'gpu':
printLog(device, 'Successfully set GPU Clock frequency mask to Level ' + value)
else:
printLog(device, 'Successfully set GPU Memory Clock frequency mask to Level ' + value)
else:
printLog(device, 'Unable to set ' + clktype + ' clock to Level ' + value)
RETCODE = 1
def setClockOverDrive(deviceList, clktype, value, autoRespond):
""" Set clock speed to OverDrive for a list of devices
Parameters:
deviceList -- List of devices to set to OverDrive
type -- Clock type to set to OverDrive (currently only GPU and GPU Memory supported)
value -- Percentage amount to set for OverDrive (0-20)
autoRespond -- Response to automatically provide for all prompts
"""
global RETCODE
try:
int(value)
except ValueError:
print('Cannot set OverDrive to value', value, ', it is not an integer!')
RETCODE = 1
return
confirmOverDrive(autoRespond)
for device in deviceList:
if not isPowerplayEnabled(device):
printLog(device, 'PowerPlay not enabled - Cannot set OverDrive')
continue
devpath = os.path.join(drmprefix, device, 'device')
if clktype == 'gpu':
odPath = os.path.join(devpath, 'pp_sclk_od')
odStr = 'GPU'
elif clktype == 'mem':
odPath = os.path.join(devpath, 'pp_mclk_od')
odStr = 'GPU Memory'
else:
printLog(device, 'Unsupported clock type ' + clktype + ' - Cannot set OverDrive')
RETCODE = 1
continue
if int(value) < 0:
printLog(device, 'Unable to set OverDrive less than 0%')
RETCODE = 1
return
if int(value) > 20:
printLog(device, 'Unable to set OverDrive greater than 20%. Changing to 20')
value = '20'
if (writeToSysfs(odPath, value)):
printLog(device, 'Successfully set ' + odStr + ' OverDrive to ' + value + '%')
setClocks([device], clktype, [getMaxLevel(device, clktype)])
else:
printLog(device, 'Unable to set OverDrive to ' + value + '%')
def resetFans(deviceList):
""" Reset fans to driver control for a list of devices.
Parameters:
deviceList -- List of devices to set the fan speed (can be a single-item list)
"""
for device in deviceList:
if not isPowerplayEnabled(device):
printLog(device, 'PowerPlay not enabled - Cannot reset fan speed')
continue
hwmon = getHwmonFromDevice(device)
if not hwmon:
printLog(device, 'No corresponding HW Monitor found')
continue
fanpath = os.path.join(hwmon, 'pwm1_enable')
if writeToSysfs(fanpath, '2'):
printLog(device, 'Successfully reset fan speed to driver control')
else:
printLog(device, 'Unable to reset fan speed to driver control')
def setFanSpeed(deviceList, fan):
""" Set fan speed for a list of devices.
Parameters:
deviceList -- List of devices to set the fan speed (can be a single-item list)
level -- Fan speed level to set (0-255)
"""
global RETCODE
for device in deviceList:
if not isPowerplayEnabled(device):
printLog(device, 'PowerPlay not enabled - Cannot set fan speed')
RETCODE = 1
continue
hwmon = getHwmonFromDevice(device)
if not hwmon:
printLog(device, 'No corresponding HW Monitor found')
RETCODE = 1
continue
fanpath = os.path.join(hwmon, 'pwm1')
modepath = os.path.join(hwmon, 'pwm1_enable')
maxfan = getSysfsValue(device, 'fanmax')
if not maxfan:
printLog(device, 'Cannot get max fan speed')
RETCODE = 1
continue
if fan.endswith('%'):
fanpct = int(fan[:-1])
if fanpct > 100:
printLog(device, 'Invalid fan value ' | |
<filename>src/core/transform.py<gh_stars>10-100
from .base import *
class TransformMixin:
""" Selection class mix-in """
def __init__(self):
self._obj_root = Mgr.get("object_root")
self._pivot = Mgr.get("selection_pivot")
self._pivot_used = False
self._start_positions = []
self._start_quats = []
self._start_mats = []
self._offset_vecs = []
self._center_pos = Point3()
pos_setters = {"x": NodePath.set_x, "y": NodePath.set_y, "z": NodePath.set_z}
hpr_setters = {"x": NodePath.set_p, "y": NodePath.set_r, "z": NodePath.set_h}
scal_setters = {"x": NodePath.set_sx, "y": NodePath.set_sy, "z": NodePath.set_sz}
self._value_setters = {"translate": pos_setters, "rotate": hpr_setters, "scale": scal_setters}
def update_center_pos(self):
if not self._objs:
self._center_pos = Point3()
else:
self._center_pos = sum([obj.get_center_pos(GD.world)
for obj in self._objs], Point3()) / len(self._objs)
def get_center_pos(self):
return Point3(self._center_pos)
def update_ui(self):
cs_type = GD["coord_sys_type"]
tc_type = GD["transf_center_type"]
if tc_type == "adaptive":
adaptive_tc_type = Mgr.get("adaptive_transf_center_type")
else:
adaptive_tc_type = ""
cs_obj = Mgr.get("coord_sys_obj", check_valid=True)
tc_obj = Mgr.get("transf_center_obj", check_valid=True)
if cs_type == "local":
if cs_obj not in self._objs:
# the object previously used as local coordinate system has been
# deselected, so it can no longer serve that purpose
Mgr.update_locally("coord_sys", cs_type)
if tc_type == "cs_origin" and tc_obj not in self._objs:
# the object previously used as local coordinate system origin
# has been deselected, so it can no longer serve that purpose
Mgr.update_locally("transf_center", tc_type)
if "pivot" in (tc_type, adaptive_tc_type) and tc_obj not in self._objs:
# the pivot previously used as transform center belongs to an object
# that has been deselected, so it can no longer serve that purpose
Mgr.update_locally("transf_center", tc_type)
elif adaptive_tc_type == "sel_center" and tc_obj:
# the pivot previously used as transform center can no longer serve
# that purpose, since the selection center will now be used
Mgr.update_locally("transf_center", tc_type)
count = len(self._objs)
if count == 1:
obj = self._objs[0]
if count:
if "sel_center" in (tc_type, adaptive_tc_type):
Mgr.get("transf_gizmo").pos = self.get_center_pos()
elif "pivot" in (tc_type, adaptive_tc_type):
Mgr.get("transf_gizmo").pos = Mgr.get("transf_center_pos")
transform_values = obj.transform_values if count == 1 else None
Mgr.update_remotely("transform_values", transform_values)
prev_count = GD["selection_count"]
if count != prev_count:
transf_gizmo = Mgr.get("transf_gizmo")
transf_gizmo.show() if count else transf_gizmo.hide()
GD["selection_count"] = count
def set_transform_component(self, objs_to_transform, transf_type, axis, value, is_rel_value,
rel_to_world=False, transformer=None, state="done"):
if is_rel_value:
if transf_type == "translate":
self.init_translation(objs_to_transform)
vec = Vec3()
vec["xyz".index(axis)] = value
self.translate(objs_to_transform, vec)
elif transf_type == "rotate":
self.init_rotation(objs_to_transform)
rotation = Quat()
hpr = VBase3()
hpr["zxy".index(axis)] = value
rotation.set_hpr(hpr)
self.rotate(objs_to_transform, rotation)
elif transf_type == "scale":
self.init_scaling(objs_to_transform)
scaling = VBase3(1., 1., 1.)
value = max(10e-008, abs(value)) * (-1. if value < 0. else 1.)
scaling["xyz".index(axis)] = value
self.scale(objs_to_transform, scaling)
else:
if transf_type == "scale":
value = max(10e-008, abs(value)) * (-1. if value < 0. else 1.)
target_type = GD["transform_target_type"]
cs_type = GD["coord_sys_type"]
grid_origin = None if cs_type == "local" else Mgr.get("grid").origin
value_setter = transformer if transformer else self._value_setters[transf_type][axis]
objs = objs_to_transform[:]
tc_type = GD["transf_center_type"]
use_transf_center = not (transf_type == "translate" or tc_type == "pivot"
or (cs_type == "local" and tc_type == "cs_origin"))
if use_transf_center:
self._pivot.set_pos(Mgr.get("transf_center_pos"))
while objs:
for obj in objs:
other_objs = objs[:]
other_objs.remove(obj)
ancestor_found = False
for other_obj in other_objs:
if other_obj in obj.ancestors:
ancestor_found = True
break
if not ancestor_found:
node = obj.origin if target_type == "geom" else obj.pivot
ref_node = GD.world if rel_to_world else (node
if grid_origin is None else grid_origin)
if use_transf_center:
quat = node.get_quat(GD.world)
scale = node.get_scale(GD.world)
self._pivot.set_quat_scale(quat, scale)
parent_node = node.parent
node.wrt_reparent_to(self._pivot)
value_setter(self._pivot, ref_node, value)
node.wrt_reparent_to(parent_node)
else:
value_setter(node, ref_node, value)
objs.remove(obj)
if use_transf_center:
self._pivot.clear_transform()
if target_type != "geom":
if Mgr.get("coord_sys_obj") in objs_to_transform:
Mgr.do("notify_coord_sys_transformed")
for obj in objs_to_transform:
Mgr.do("update_obj_transf_info", obj.id, [transf_type])
Mgr.do("update_obj_link_viz")
Mgr.do("reset_obj_transf_info")
def finalize_transform_component(self, objs_to_transform, transf_type, is_rel_value,
add_to_hist=True, state="done"):
if is_rel_value:
self.finalize_transform(objs_to_transform, add_to_hist=add_to_hist, state=state)
else:
self.update_center_pos()
Mgr.get("transf_gizmo").pos = Mgr.get("transf_center_pos")
target_type = GD["transform_target_type"]
if target_type != "geom":
if Mgr.get("coord_sys_obj") in objs_to_transform:
Mgr.do("update_coord_sys")
if len(self._objs) == 1:
Mgr.update_remotely("transform_values", objs_to_transform[0].transform_values)
for obj in objs_to_transform:
obj.update_group_bbox()
if target_type in ("geom", "links", "no_children"):
Mgr.do("update_group_bboxes", [obj.id])
if add_to_hist:
self.__add_history(objs_to_transform, transf_type)
def update_transform_values(self):
if len(self._objs) == 1:
Mgr.update_remotely("transform_values", self._objs[0].transform_values)
def init_translation(self, objs_to_transform):
target_type = GD["transform_target_type"]
grid_origin = Mgr.get("grid").origin
cs_obj = Mgr.get("coord_sys_obj")
if cs_obj in objs_to_transform and target_type != "geom":
Mgr.do("notify_coord_sys_transformed")
if GD["coord_sys_type"] == "local":
if target_type == "geom":
self._start_mats = [Mat4(obj.origin.get_mat())
for obj in objs_to_transform]
else:
self._start_mats = [obj.pivot.get_mat(grid_origin)
for obj in objs_to_transform]
else:
self._pivot_used = True
self._pivot.set_pos(grid_origin, 0., 0., 0.)
if target_type == "geom":
for obj in objs_to_transform:
obj.origin.wrt_reparent_to(self._pivot)
else:
for obj in objs_to_transform:
obj.pivot.wrt_reparent_to(self._pivot)
for obj in objs_to_transform:
Mgr.do("update_obj_transf_info", obj.id, ["translate"])
def translate(self, objs_to_transform, translation_vec):
grid_origin = Mgr.get("grid").origin
target_type = GD["transform_target_type"]
cs_type = GD["coord_sys_type"]
tc_type = GD["transf_center_type"]
if tc_type == "adaptive":
adaptive_tc_type = Mgr.get("adaptive_transf_center_type")
else:
adaptive_tc_type = ""
if cs_type == "local":
cs_obj = Mgr.get("coord_sys_obj")
vec_local = cs_obj.pivot.get_relative_vector(grid_origin, translation_vec)
if target_type == "geom":
for obj, start_mat in zip(objs_to_transform, self._start_mats):
orig = obj.origin
pivot = obj.pivot
pivot_mat = pivot.get_mat(grid_origin)
mat = start_mat * Mat4.translate_mat(translation_vec) * pivot_mat
orig.set_mat(grid_origin, mat)
else:
for obj, start_mat in zip(objs_to_transform, self._start_mats):
obj.pivot.set_pos(grid_origin, start_mat.xform_point(vec_local))
else:
self._pivot.set_pos(grid_origin, Point3(translation_vec))
if GD["object_links_shown"] and target_type != "geom":
Mgr.do("update_obj_link_viz")
def init_rotation(self, objs_to_transform):
target_type = GD["transform_target_type"]
grid_origin = Mgr.get("grid").origin
cs_type = GD["coord_sys_type"]
tc_type = GD["transf_center_type"]
tc_pos = Mgr.get("transf_center_pos")
cs_obj = Mgr.get("coord_sys_obj")
if tc_type == "adaptive":
adaptive_tc_type = Mgr.get("adaptive_transf_center_type")
else:
adaptive_tc_type = ""
if cs_obj in objs_to_transform and target_type != "geom":
Mgr.do("notify_coord_sys_transformed")
if tc_type == "pivot" or adaptive_tc_type == "pivot":
if target_type == "geom":
if cs_type == "local":
self._start_mats = [Mat4(obj.origin.get_mat())
for obj in objs_to_transform]
else:
self._start_mats = [Mat4(obj.origin.get_mat())
for obj in objs_to_transform]
else:
if cs_type == "local":
self._start_mats = [obj.pivot.get_mat(grid_origin)
for obj in objs_to_transform]
else:
self._start_quats = [obj.pivot.get_quat(grid_origin)
for obj in objs_to_transform]
elif cs_type == "local":
if target_type == "geom":
self._start_mats = [Mat4(obj.origin.get_mat()) for obj in objs_to_transform]
if tc_type != "cs_origin":
self._offset_vecs = [Point3() - obj.pivot.get_relative_point(GD.world, tc_pos)
for obj in objs_to_transform]
else:
self._start_mats = [obj.pivot.get_mat(grid_origin) for obj in objs_to_transform]
if tc_type != "cs_origin":
self._offset_vecs = [Point3() - obj.pivot.get_relative_point(GD.world, tc_pos)
for obj in objs_to_transform]
else:
self._pivot_used = True
self._pivot.set_pos(tc_pos)
self._pivot.set_hpr(grid_origin, 0., 0., 0.)
if target_type == "geom":
for obj in objs_to_transform:
obj.origin.wrt_reparent_to(self._pivot)
else:
for obj in objs_to_transform:
obj.pivot.wrt_reparent_to(self._pivot)
for obj in objs_to_transform:
Mgr.do("update_obj_transf_info", obj.id, ["rotate"])
def rotate(self, objs_to_transform, rotation):
grid_origin = Mgr.get("grid").origin
target_type = GD["transform_target_type"]
cs_type = GD["coord_sys_type"]
tc_type = GD["transf_center_type"]
if tc_type == "adaptive":
adaptive_tc_type = Mgr.get("adaptive_transf_center_type")
else:
adaptive_tc_type = ""
if tc_type == "pivot" or adaptive_tc_type == "pivot":
if target_type == "geom":
if cs_type == "local":
for obj, start_mat in zip(objs_to_transform, self._start_mats):
orig = obj.origin
pivot = obj.pivot
pivot_mat = pivot.get_mat(grid_origin)
mat = start_mat * (rotation * pivot_mat)
orig.set_mat(grid_origin, mat)
else:
for obj, start_mat in zip(objs_to_transform, self._start_mats):
mat = Mat4()
rotation.extract_to_matrix(mat)
orig = obj.origin
pivot = obj.pivot
pivot_mat = pivot.get_mat(grid_origin)
pivot_mat.set_row(3, VBase3())
mat = start_mat * pivot_mat * mat * Mat4.translate_mat(pivot.get_pos(grid_origin))
orig.set_mat(grid_origin, mat)
else:
if cs_type == "local":
for obj, start_mat in zip(objs_to_transform, self._start_mats):
mat = rotation * start_mat
obj.pivot.set_mat(grid_origin, mat)
else:
for obj, start_quat in zip(objs_to_transform, self._start_quats):
quat = start_quat * rotation
obj.pivot.set_quat(grid_origin, quat)
elif cs_type == "local":
tc_pos = Mgr.get("transf_center_pos")
if target_type == "geom":
if tc_type == "cs_origin":
for obj, start_mat in zip(objs_to_transform, self._start_mats):
orig = obj.origin
pivot = obj.pivot
pivot_mat = pivot.get_mat(grid_origin)
mat = start_mat * (rotation * pivot_mat)
orig.set_mat(grid_origin, mat)
else:
for obj, start_mat, start_vec in zip(objs_to_transform, self._start_mats,
self._offset_vecs):
orig = obj.origin
pivot = obj.pivot
pivot_mat = pivot.get_mat(grid_origin)
mat = rotation * pivot_mat
vec = pivot_mat.xform_vec(rotation.xform(start_vec))
mat.set_row(3, grid_origin.get_relative_point(GD.world, tc_pos) + vec)
mat = start_mat * mat
orig.set_mat(grid_origin, mat)
else:
if tc_type == "cs_origin":
for obj, start_mat in zip(objs_to_transform, self._start_mats):
mat = rotation * start_mat
obj.pivot.set_mat(grid_origin, mat)
else:
for obj, start_mat, start_vec in zip(objs_to_transform, self._start_mats,
self._offset_vecs):
pivot = obj.pivot
mat = rotation * start_mat
pivot.set_mat(grid_origin, mat)
vec = GD.world.get_relative_vector(grid_origin,
start_mat.xform_vec(rotation.xform(start_vec)))
pivot.set_pos(GD.world, tc_pos + vec)
else:
self._pivot.set_quat(grid_origin, rotation)
if GD["object_links_shown"] and target_type != "geom":
Mgr.do("update_obj_link_viz")
def init_scaling(self, objs_to_transform):
grid_origin = Mgr.get("grid").origin
tc_type = GD["transf_center_type"]
tc_pos = Mgr.get("transf_center_pos")
cs_type = GD["coord_sys_type"]
cs_obj = Mgr.get("coord_sys_obj")
if tc_type == "adaptive":
adaptive_tc_type = Mgr.get("adaptive_transf_center_type")
else:
adaptive_tc_type = ""
if cs_obj in objs_to_transform:
Mgr.do("notify_coord_sys_transformed")
if tc_type == "pivot" or adaptive_tc_type == "pivot":
self._start_mats = [obj.pivot.get_mat(grid_origin) for obj in objs_to_transform]
if cs_type != "local":
self._start_positions = [obj.pivot.get_pos() for obj in objs_to_transform]
elif cs_type == "local":
self._start_mats = [obj.pivot.get_mat(grid_origin) for obj in objs_to_transform]
if tc_type != "cs_origin":
self._offset_vecs = [Point3() - obj.pivot.get_relative_point(GD.world, tc_pos)
for obj in objs_to_transform]
else:
| |
import threading
import shlex
from guppyproxy.util import max_len_str, query_to_str, display_error_box, display_info_box, display_req_context, hostport, method_color, sc_color, DisableUpdates, host_color
from guppyproxy.proxy import HTTPRequest, RequestContext, InvalidQuery, SocketClosed, time_to_nsecs, ProxyThread
from guppyproxy.reqview import ReqViewWidget
from guppyproxy.reqtree import ReqTreeView
from PyQt5.QtWidgets import QWidget, QTableWidget, QTableWidgetItem, QGridLayout, QHeaderView, QAbstractItemView, QVBoxLayout, QHBoxLayout, QComboBox, QTabWidget, QPushButton, QLineEdit, QStackedLayout, QToolButton, QCheckBox, QLabel, QTableView, QMenu
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QObject, QVariant, Qt, QAbstractTableModel, QModelIndex, QItemSelection, QSortFilterProxyModel
from itertools import groupby, count
def get_field_entry():
dropdown = QComboBox()
dropdown.addItem("Anywhere", "all")
dropdown.addItem("Req. Body", "reqbody")
dropdown.addItem("Rsp. Body", "rspbody")
dropdown.addItem("Any Body", "body")
# dropdown.addItem("WSMessage", "wsmessage")
dropdown.addItem("Req. Header", "reqheader")
dropdown.addItem("Rsp. Header", "rspheader")
dropdown.addItem("Any Header", "header")
dropdown.addItem("Method", "method")
dropdown.addItem("Host", "host")
dropdown.addItem("Path", "path")
dropdown.addItem("URL", "url")
dropdown.addItem("Status", "statuscode")
dropdown.addItem("Tag", "tag")
dropdown.addItem("Any Param", "param")
dropdown.addItem("URL Param", "urlparam")
dropdown.addItem("Post Param", "postparam")
dropdown.addItem("Rsp. Cookie", "rspcookie")
dropdown.addItem("Req. Cookie", "reqcookie")
dropdown.addItem("Any Cookie", "cookie")
# dropdown.addItem("After", "")
# dropdown.addItem("Before", "")
# dropdown.addItem("TimeRange", "")
# dropdown.addItem("Id", "")
return dropdown
def get_string_cmp_entry():
dropdown = QComboBox()
dropdown.addItem("cnt.", "contains")
dropdown.addItem("cnt. (rgx)", "containsregexp")
dropdown.addItem("is", "is")
dropdown.addItem("len. >", "lengt")
dropdown.addItem("len. <", "lenlt")
dropdown.addItem("len. =", "leneq")
return dropdown
class StringCmpWidget(QWidget):
returnPressed = pyqtSignal()
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
layout = QHBoxLayout()
self.cmp_entry = get_string_cmp_entry()
self.text_entry = QLineEdit()
self.text_entry.returnPressed.connect(self.returnPressed)
layout.addWidget(self.cmp_entry)
layout.addWidget(self.text_entry)
self.setLayout(layout)
self.layout().setContentsMargins(0, 0, 0, 0)
def get_value(self):
str_cmp = self.cmp_entry.itemData(self.cmp_entry.currentIndex())
str_val = self.text_entry.text()
return [str_cmp, str_val]
def reset(self):
self.cmp_entry.setCurrentIndex(0)
self.text_entry.setText("")
def dt_sort_key(r):
if r.time_start:
return time_to_nsecs(r.time_start)
return 0
class StringKVWidget(QWidget):
returnPressed = pyqtSignal()
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.str2_shown = False
self.str1 = StringCmpWidget()
self.str2 = StringCmpWidget()
self.str1.returnPressed.connect(self.returnPressed)
self.str2.returnPressed.connect(self.returnPressed)
self.toggle_button = QToolButton()
self.toggle_button.setText("+")
self.toggle_button.clicked.connect(self._show_hide_str2)
layout = QHBoxLayout()
layout.addWidget(self.str1)
layout.addWidget(self.str2)
layout.addWidget(self.toggle_button)
self.str2.setVisible(self.str2_shown)
self.setLayout(layout)
self.layout().setContentsMargins(0, 0, 0, 0)
@pyqtSlot()
def _show_hide_str2(self):
if self.str2_shown:
self.toggle_button.setText("+")
self.str2_shown = False
else:
self.toggle_button.setText("-")
self.str2_shown = True
self.str2.setVisible(self.str2_shown)
def get_value(self):
retval = self.str1.get_value()
if self.str2_shown:
retval += self.str2.get_value()
return retval
def reset(self):
self.str1.reset()
self.str2.reset()
class DropdownFilterEntry(QWidget):
# a widget that lets you enter filters using ezpz dropdowns/text boxes
filterEntered = pyqtSignal(list)
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
layout = QHBoxLayout()
confirm = QToolButton()
confirm.setText("OK")
confirm.setToolTip("Apply the entered filter")
self.field_entry = get_field_entry()
# stack containing widgets for string, k/v, date, daterange
self.str_cmp_entry = StringCmpWidget()
self.kv_cmp_entry = StringKVWidget()
self.inv_entry = QCheckBox("inv")
# date
# daterange
self.entry_layout = QStackedLayout()
self.entry_layout.setContentsMargins(0, 0, 0, 0)
self.current_entry = 0
self.entry_layout.addWidget(self.str_cmp_entry)
self.entry_layout.addWidget(self.kv_cmp_entry)
# add date # 2
# add daterange # 3
confirm.clicked.connect(self.confirm_entry)
self.str_cmp_entry.returnPressed.connect(self.confirm_entry)
self.kv_cmp_entry.returnPressed.connect(self.confirm_entry)
self.field_entry.currentIndexChanged.connect(self._display_value_widget)
layout.addWidget(confirm)
layout.addWidget(self.inv_entry)
layout.addWidget(self.field_entry)
layout.addLayout(self.entry_layout)
self.setLayout(layout)
self.setContentsMargins(0, 0, 0, 0)
self._display_value_widget()
@pyqtSlot()
def _display_value_widget(self):
# show the correct value widget in the value stack layout
field = self.field_entry.itemData(self.field_entry.currentIndex())
self.current_entry = 0
if field in ("all", "reqbody", "rspbody", "body", "wsmessage", "method",
"host", "path", "url", "statuscode", "tag"):
self.current_entry = 0
elif field in ("reqheader", "rspheader", "header", "param", "urlparam"
"postparam", "rspcookie", "reqcookie", "cookie"):
self.current_entry = 1
# elif for date
# elif for daterange
self.entry_layout.setCurrentIndex(self.current_entry)
def get_value(self):
val = []
if self.inv_entry.isChecked():
val.append("inv")
field = self.field_entry.itemData(self.field_entry.currentIndex())
val.append(field)
if self.current_entry == 0:
val += self.str_cmp_entry.get_value()
elif self.current_entry == 1:
val += self.kv_cmp_entry.get_value()
# elif for date
# elif for daterange
return [val] # no support for OR
@pyqtSlot()
def confirm_entry(self):
phrases = self.get_value()
self.filterEntered.emit(phrases)
self.str_cmp_entry.reset()
self.kv_cmp_entry.reset()
# reset date
# reset date range
class TextFilterEntry(QWidget):
# a text box that can be used to enter filters
filterEntered = pyqtSignal(list)
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
layout = QHBoxLayout()
self.textEntry = QLineEdit()
self.textEntry.returnPressed.connect(self.confirm_entry)
self.textEntry.setToolTip("Enter the filter here and press return to apply it")
layout.addWidget(self.textEntry)
self.setLayout(layout)
self.layout().setContentsMargins(0, 0, 0, 0)
@pyqtSlot()
def confirm_entry(self):
args = shlex.split(self.textEntry.text())
phrases = [list(group) for k, group in groupby(args, lambda x: x == "OR") if not k]
self.filterEntered.emit(phrases)
self.textEntry.setText("")
class FilterEntry(QWidget):
# a widget that lets you switch between filter entries
filterEntered = pyqtSignal(list)
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.current_entry = 0
self.max_entries = 2
self.text_entry = TextFilterEntry()
dropdown_entry = DropdownFilterEntry()
self.text_entry.filterEntered.connect(self.filterEntered)
dropdown_entry.filterEntered.connect(self.filterEntered)
self.entry_layout = QStackedLayout()
self.entry_layout.addWidget(dropdown_entry)
self.entry_layout.addWidget(self.text_entry)
swap_button = QToolButton()
swap_button.setText(">")
swap_button.setToolTip("Switch between dropdown and text entry")
swap_button.clicked.connect(self.next_entry)
hlayout = QHBoxLayout()
hlayout.addWidget(swap_button)
hlayout.addLayout(self.entry_layout)
self.setLayout(hlayout)
self.layout().setContentsMargins(0, 0, 0, 0)
self.layout().setSpacing(0)
@pyqtSlot()
def next_entry(self):
self.current_entry += 1
self.current_entry = self.current_entry % self.max_entries
self.entry_layout.setCurrentIndex(self.current_entry)
def set_entry(self, entry):
self.current_entry = entry
self.current_entry = self.current_entry % self.max_entries
self.entry_layout.setCurrentIndex(self.current_entry)
class FilterListWidget(QTableWidget):
# list part of the filter tab
def __init__(self, *args, **kwargs):
self.client = kwargs.pop("client")
QTableWidget.__init__(self, *args, **kwargs)
self.context = RequestContext(self.client)
# Set up table
self.setColumnCount(1)
self.horizontalHeader().hide()
self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.verticalHeader().hide()
self.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
#self.setSelectionMode(QAbstractItemView.NoSelection)
#self.setEditTriggers(QAbstractItemView.NoEditTriggers)
def append_fstr(self, fstr):
args = shlex.split(fstr)
phrase = [list(group) for k, group in groupby(args, lambda x: x == "OR") if not k]
self.context.apply_phrase(phrase)
self._append_fstr_row(fstr)
def set_query(self, query):
self.context.set_query(query)
self.redraw_table()
def pop_phrase(self):
self.context.pop_phrase()
self.redraw_table()
def clear_phrases(self):
self.context.set_query([])
self.redraw_table()
def _append_fstr_row(self, fstr):
row = self.rowCount()
self.insertRow(row)
self.setItem(row, 0, QTableWidgetItem(fstr))
def redraw_table(self):
self.setRowCount(0)
query = self.context.query
for p in query:
condstrs = [' '.join(l) for l in p]
fstr = ' OR '.join(condstrs)
self._append_fstr_row(fstr)
def get_query(self):
return self.context.query
class FilterEditor(QWidget):
# a widget containing a list of filters and the ability to edit the filters in the list
filtersEdited = pyqtSignal(list)
builtin_filters = (
('No Images', ['inv', 'path', 'containsregexp', r'(\.png$|\.jpg$|\.jpeg$|\.gif$|\.ico$|\.bmp$|\.svg$)']),
('No JavaScript/CSS/Fonts', ['inv', 'path', 'containsregexp', r'(\.js$|\.css$|\.woff$)']),
)
def __init__(self, *args, **kwargs):
self.client = kwargs.pop("client")
QWidget.__init__(self, *args, **kwargs)
layout = QVBoxLayout()
# Manage bar
manage_bar = QHBoxLayout()
pop_button = QPushButton("Pop")
pop_button.setToolTip("Remove the most recently applied filter")
clear_button = QPushButton("Clear")
clear_button.setToolTip("Remove all active filters")
scope_reset_button = QPushButton("Scope")
scope_reset_button.setToolTip("Set the active filters to the current scope")
scope_save_button = QPushButton("Save Scope")
scope_save_button.setToolTip("Set the scope to the current filters. Any messages that don't match the active filters will be ignored by the proxy.")
self.builtin_combo = QComboBox()
self.builtin_combo.addItem("Apply a built-in filter", None)
for desc, filt in FilterEditor.builtin_filters:
self.builtin_combo.addItem(desc, filt)
self.builtin_combo.currentIndexChanged.connect(self._apply_builtin_filter)
manage_bar.addWidget(clear_button)
manage_bar.addWidget(pop_button)
manage_bar.addWidget(scope_reset_button)
manage_bar.addWidget(scope_save_button)
manage_bar.addWidget(self.builtin_combo)
manage_bar.addStretch()
mbar_widget = QWidget()
mbar_widget.setLayout(manage_bar)
pop_button.clicked.connect(self.pop_phrase)
clear_button.clicked.connect(self.clear_phrases)
scope_reset_button.clicked.connect(self.reset_to_scope)
scope_save_button.clicked.connect(self.save_scope)
# Filter list
self.filter_list = FilterListWidget(client=self.client)
# Filter entry
self.entry = FilterEntry()
self.entry.setMaximumHeight(self.entry.sizeHint().height())
self.entry.filterEntered.connect(self.apply_phrase)
layout.addWidget(mbar_widget)
layout.addWidget(self.filter_list)
layout.addWidget(self.entry)
self.setLayout(layout)
self.layout().setSpacing(0)
self.layout().setContentsMargins(0, 0, 0, 0)
@pyqtSlot()
def save_scope(self):
query = self.filter_list.get_query()
self.client.set_scope(query)
display_info_box("Scope updated")
@pyqtSlot()
def reset_to_scope(self):
query = self.client.get_scope().filter
self.filter_list.set_query(query)
self.filtersEdited.emit(self.filter_list.get_query())
@pyqtSlot()
def clear_phrases(self):
self.filter_list.clear_phrases()
self.filtersEdited.emit(self.filter_list.get_query())
@pyqtSlot()
def pop_phrase(self):
self.filter_list.pop_phrase()
self.filtersEdited.emit(self.filter_list.get_query())
@pyqtSlot(list)
def apply_phrase(self, phrase):
fstr = query_to_str([phrase])
try:
self.filter_list.append_fstr(fstr)
except InvalidQuery as e:
display_error_box("Could not add filter:\n\n%s" % e)
return
self.filtersEdited.emit(self.filter_list.get_query())
@pyqtSlot(int)
def _apply_builtin_filter(self, ind):
phrase = self.builtin_combo.itemData(ind)
if phrase:
self.apply_phrase([phrase])
self.builtin_combo.setCurrentIndex(0)
def set_is_text(self, is_text):
if is_text:
self.entry.set_entry(1)
else:
self.entry.set_entry(0)
class ReqListModel(QAbstractTableModel):
requestsLoading = pyqtSignal()
requestsLoaded = pyqtSignal()
HD_ID = 0
HD_VERB = 1
HD_HOST = 2
HD_PATH = 3
HD_SCODE = 4
HD_REQLEN = 5
HD_RSPLEN = 6
HD_TIME = 7
HD_TAGS = 8
HD_MNGL = 9
def __init__(self, client, *args, **kwargs):
QAbstractTableModel.__init__(self, *args, **kwargs)
self.client = client
self.header_order = [
self.HD_ID,
self.HD_VERB,
self.HD_HOST,
self.HD_PATH,
self.HD_SCODE,
self.HD_REQLEN,
self.HD_RSPLEN,
self.HD_TIME,
self.HD_TAGS,
self.HD_MNGL,
]
self.table_headers = {
self.HD_ID: "ID",
self.HD_VERB: "Method",
self.HD_HOST: "Host",
self.HD_PATH: "Path",
self.HD_SCODE: "S-Code",
self.HD_REQLEN: "Req Len",
self.HD_RSPLEN: "Rsp Len",
self.HD_TIME: "Time",
self.HD_TAGS: "Tags",
self.HD_MNGL: "Mngl",
}
self.reqs = []
self.sort_enabled = False
self.header_count = len(self.header_order)
self.row_count = len(self.reqs)
def headerData(self, section, orientation, role):
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
hd = self.header_order[section]
return self.table_headers[hd]
return QVariant()
def rowCount(self, parent):
return self.row_count
def columnCount(self, parent):
return self.header_count
def _gen_req_row(self, req):
MAX_PATH_LEN = 60
MAX_TAG_LEN = 40
reqid = self.client.get_reqid(req)
method = req.method
host = hostport(req)
path = max_len_str(req.url.path, MAX_PATH_LEN)
reqlen = str(req.content_length)
tags = max_len_str(', '.join(sorted(req.tags)), MAX_TAG_LEN)
if req.response:
scode = str(req.response.status_code) + ' ' + req.response.reason
rsplen = str(req.response.content_length)
else:
scode = "--"
rsplen = "--"
if req.time_start and req.time_end:
time_delt = req.time_end - req.time_start
reqtime = ("%.2f" % time_delt.total_seconds())
else:
reqtime = "--"
if req.unmangled and req.response and req.response.unmangled:
manglestr = "q/s"
elif req.unmangled:
manglestr = "q"
elif req.response and req.response.unmangled:
manglestr = "s"
else:
manglestr = "N/A"
return (req, reqid, method, host, path, scode, reqlen, rsplen, reqtime, tags, manglestr)
def data(self, index, role):
if role == Qt.BackgroundColorRole:
req = self.reqs[index.row()][0]
if index.column() == 2:
return host_color(hostport(req))
elif index.column() == 4:
if req.response:
return sc_color(str(req.response.status_code))
elif index.column() == 1:
return method_color(req.method)
return QVariant()
elif role == Qt.DisplayRole:
rowdata = self.reqs[index.row()]
return rowdata[index.column()+1]
return QVariant()
def _sort_reqs(self):
def skey(rowdata):
return dt_sort_key(rowdata[0])
if self.sort_enabled:
self.reqs = | |
<gh_stars>1-10
import copy, pickle, re
from shlex import split as shlexsplit
from . import config
from .helpers import *
from .errors import AssertionError, UnRecognisedCSVFormatError, UnrecognisedFileFormatError, ArgumentError
class _base_genelist:
def __init__(self):
"""
(Internal)
This is the base derived class for all genelists.
It contains methods available to all implementations of genelist.
"""
self.name = None
self.linearData = None
def __repr__(self):
return("<base genelist class>")
def __in__(self, key):
"""
(Override)
Confer:
if "key" in genelist:
"""
return(key in list(self.keys()))
def __bool__(self):
"""
Fixes:
if genelist: # contains something
True
and fixes:
len(genelist) = 0
if genelist: # Would pass even if the genelist is empty
False
"""
return(len(self) > 0)
#def __copy__(self):
# raise Exception, "__copy__() is NOT supported for genelists, use gl.deepcopy() or gl.shallowcopy()"
def __shallowcopy__(self):
raise Exception("__shallowcopy__() is NOT supposrted for genelists, use gl.deepcopy() or gl.shallowcopy()")
def __deepcopy__(self, fake_arg):
raise Exception("__deepcopy__() is NOT supported for genelists, use gl.deepcopy() or gl.shallowcopy()")
def deepcopy(self):
"""
Confer copy to mean a deepcopy as opposed to a shallowcopy.
This is required as genelists are compound lists.
"""
return(pickle.loads(pickle.dumps(self, -1))) # This is 2-3x faster and presumably uses less memory
def shallowcopy(self):
"""
(New)
Some weird behaviour here, I know, this is so I can still get access to
the shallow copy mechanism even though 90% of the operations are copies.
"""
return(copy.copy(self)) # But doesnt this just call __copy__() anyway?
def __len__(self):
"""
(Override)
get the length of the list
"""
return(len(self.linearData))
def __int__(self):
"""
(Override)
get the length of the list
NOTE: It's possible this is a bug/feature.
I don't remove it at the moment as I'm not sure if it is used anywhere.
"""
return(len(self.linearData))
def __iter__(self):
"""
(Override)
make the geneList behave like a normal iterator (list)
"""
for n in self.linearData:
yield n
def __getitem__(self, index):
"""
(Override)
confers a = geneList[0] behaviour
This is a very slow way to access the data, and may be a little inconsistent in the things
it returns.
NOTE:
a = genelist[0] # returns a single dict
a = genelist[0:10] # returns a new 10 item normal python list.
a = genelist["name"] returns a python list containing a vertical slice of all of the "name" keys
"""
newl = False
if isinstance(index, int):
# this should return a single dictionary.
return(self.linearData[index])
elif isinstance(index, str):
# returns all labels with that item.
return(self._findAllLabelsByKey(index))
elif isinstance(index, slice):
# returns a new genelist corresponding to the slice.
newl = self.shallowcopy()
newl.linearData = utils.qdeepcopy(self.linearData[index]) # separate the data so it can be modified.
newl._optimiseData()
return(newl) # deep copy the slice.
def __setitem__(self, index, *args):
"""
(Override)
Block key editing.
"""
raise AssertionError("Cannot modify list in-place")
def __hash__(self):
"""
(Override)
compute a sensible hash value
"""
try:
return(hash(self.name + str(self[0]) + str(self[-1]) + str(len(self)))) # hash data for comparison.
except Exception:
try:
return(hash(self.name + str(self[0]) + str(self[-1]))) # len() probably not available (delayedlist?).
except Exception: # I bet the list is empty.
return(hash(self.name))
def __and__(self, gene_list):
"""
(Override)
confer and like behaviour: c = a & b
"""
if not self.__eq__(gene_list):
return(geneList()) # returns an empty list.
newl = self.shallowcopy()
newl.linearData = []
for item1 in self.linearData:
for item2 in gene_list.linearData:
if item1 == item2:
newl.linearData.append(copy.deepcopy(item1))
newl._optimiseData()
return(newl)
def __or__(self, gene_list):
"""
(Override)
confer append like behaviour: c = a | b
OR does not keep duplicates.
"""
if not self.__eq__(gene_list): return(geneList())
newl = self.deepcopy()
alist = self.linearData + gene_list.linearData
# remove conserved duplicates;
ulist = []
newl = self.shallowcopy()
for item in alist:
if item not in ulist:
ulist.append(item)
newl.linearData.append(copy.deepcopy(item))
newl._optimiseData()
return(newl)
def __add__(self, gene_list):
"""
(Override)
confer append like behaviour: c = a + b
keeps duplicates (just concatenate's lists)
"""
mkeys = self._collectIdenticalKeys(gene_list)
if not mkeys: # unable to match.
config.log.warning("No matching keys, the resulting list would be meaningless")
return(False)
newl = self.deepcopy()
newl.linearData.extend(copy.deepcopy(gene_list.linearData))
newl._optimiseData()
return(newl)
def __sub__(self, gene_list):
"""
(Override)
confer c = a - b ability.
Actually xor?
"""
mkeys = self._collectIdenticalKeys(gene_list)
if not mkeys: # unable to match.
config.warning("Warning: No matching keys, unable to perform subtraction")
return(False)
newl = self.shallowcopy()
newl.linearData = []
dontAdd = False
for item in self.linearData: # do a map here...
for item2 in gene_list.linearData:
for k in mkeys:
if item[k] == item2[k]:
dontAdd = True
else:
dontAdd = False # all mkeys must match
if not dontAdd:
newl.linearData.append(copy.deepcopy(item))
dontAdd = False
newl._optimiseData()
return(newl)
def __eq__(self, gene_list):
"""
(Internal)
Are the lists equivalent?
lists now, must only have one identical key.
This is just testing the keys...
Wrong...
"""
# check the hash's first to see if they are identical.
# This is diabled as it can be very slow.
#if self.__hash__() == gene_list.__hash__():
# return(True)
for key in self.linearData[0]:
if key in gene_list.linearData[0]:
return(True) # just one key in common required.
return(False)
def __ne__(self, gene_list):
"""
(Internal)
Are the lists equivalent?
ie do they have the same keys?
"""
return(not self.__eq__(gene_list))
def keys(self):
"""
return a list of all the valid keys for this geneList
"""
return([key for key in self.linearData[0]]) # Not exhaustive
def _guessDataType(self, value):
"""
(Internal)
Take a guess at the most reasonable datatype to store value as.
returns the resulting data type based on a list of logical cooercions
(explain as I fail each cooercion).
Used internally in _loadCSV()
I expect this will get larger and larger with new datatypes, so it's here as
as a separate function.
Datatype coercion preference:
float > list > int > location > string
"""
try: # see if the element is a float()
if "." in value: # if no decimal point, prefer to save as a int.
return float(value)
elif 'e' in value: # See if we can coocere from scientific notation
return float(value)
else:
raise ValueError
except ValueError:
try:
# Potential error here if it is a list of strings?
if '[' in value and ']' in value and ',' in value and '.' in value: # Probably a Python list of floats
return [float(i) for i in value.strip(']').strip('[').split(',')]
elif '[' in value and ']' in value and ',' in value: # Probably a Python list of ints
return [int(i) for i in value.strip(']').strip('[').split(',')]
else:
raise ValueError
except ValueError:
try: # see if it's actually an int?
return int(value)
except ValueError:
try: # see if I can cooerce it into a location:
return location(loc=value)
except (TypeError, IndexError, AttributeError, AssertionError, ValueError): # this is not working, just store it as a string
return str(value).strip()
return("") # return an empty datatype.
# I think it is possible to get here. If the exception at int() or float() returns something other than a
# ValueError (Unlikely, Impossible?)
def _processKey(self, format, column):
"""
(Internal)
the inner part of _loadCSV() to determine what to do with the key.
Better in here too for security.
"""
d = {}
for key in format:
if not (key in ignorekeys): # ignore these tags
#if not key in d:
# d[key] = {}
if '__ignore_empty_columns' in format and format['__ignore_empty_columns']:
# check the column exists, if not, pad in an empty value
try:
column[format[key]]
except IndexError:
d[key] = '' # Better than None for downstream compatability
continue
if isinstance(format[key], dict) and "code" in format[key]:
# a code block insertion goes here - any valid lib and one line python code fragment
# store it as a dict with the key "code"
d[key] = eval(format[key]["code"])
elif isinstance(format[key], str) and "location" in format[key]:
# locations are very common, add support for them out of the box:
d[key] = eval(format[key])
else:
d[key] = self._guessDataType(column[format[key]])
elif key == "gtf_decorators": # special exceptions for gtf files
gtf = column[format["gtf_decorators"]].strip()
for item in gtf.split("; "):
if item:
item = item.strip()
ss = shlexsplit(item)
key = ss[0]
value = ss[1].strip('"')
d[key] = self._guessDataType(value)
return(d)
def save(self, filename=None, compressed=False):
"""
**Purpose**
Save the genelist as a binary representation.
This | |
0
while k < noutliers:
r = 0
while r < nrealizations:
y = util.getmeasurements(a, x, noisetype, var, outliers[k]) # get the measurements
# -------- ls solution
xhat = inv.leastsquares(y, a) # solving the problem with ls
error = np.linalg.norm(x - xhat)
averrorls[k] += error
# -------- m estimated scale solution
xpreliminary = xhat # we take the ls to estimate a preliminary scale
respreliminary = y - np.dot(a, xpreliminary)
estimatedscale = np.median(np.abs(respreliminary)) / .6745 # robust mad estimator for the scale
xhat = inv.mestimator(y, a, 'huber', clippinghuber, estimatedscale) # solving the problem with m
error = np.linalg.norm(x - xhat)
averrormes[k] += error
# -------- m real scale solution
xhat = inv.mestimator(y, a, 'huber', clippinghuber, realscale) # solving the problem with m
error = np.linalg.norm(x - xhat)
averrorm[k] += error
# -------- tau solution
# xhat, scale = inv.fasttau(y, a, 'optimal', clippingopt, ninitialsolutions) # solving the problem with tau
xhat, obj = ln.basictau(
a,
y,
'optimal',
clippingopt,
ninitialx=ninitialsolutions,
maxiter=100,
nbest=1,
lamb=0
)
error = np.linalg.norm(x - xhat)
averrortau[k] += error
r += 1 # update the number of realization
k += 1 # updated the number of outlier proportion
averrorls = averrorls / nrealizations # compute average
averrorm = averrorm / nrealizations
averrormes = averrormes / nrealizations
averrortau = averrortau / nrealizations
# store results
name_file = 'experiment_one.pkl'
fl = os.path.join(DATA_DIR, name_file)
f = open(fl, 'wb')
pickle.dump([averrorls, averrorm, averrormes, averrortau], f)
f.close()
fig = plt.figure()
plt.plot(outliers, averrorls, 'r--', label='ls')
plt.plot(outliers, averrorm, 'bs--', label='m estimator')
plt.plot(outliers, averrormes, 'g^-', label='m est. scale')
plt.plot(outliers, averrortau, 'kd-', label='tau')
plt.legend(loc=2)
plt.xlabel('% outliers')
plt.ylabel('error')
name_file = 'experiment_one.eps'
fl = os.path.join(FIGURES_DIR, name_file)
fig.savefig(fl, format='eps')
# plt.show()
# -------------------------------------------------------------------
# Experiment 2: l2 regularized case. Comparison LS, M, tau
# -------------------------------------------------------------------
def experimenttwo(nrealizations, outliers, measurementsize, sourcesize, source):
matrixtype = 'illposed' # type of sensing matrix
conditionnumber = 1000 # condition number of the matrix that we want
noisetype = 'outliers' # additive noise
clippinghuber = 1.345 # clipping parameter for the huber function
clippingopt = (0.4, 1.09) # clipping parameters for the opt function in the tau estimator
ninitialsolutions = 50 # how many initial solutions do we want in the tau estimator
realscale = 1
var = 3
x = source # load stored source
# x = util.getsource(sourcetype, sourcesize) # get the ground truth
a = util.getmatrix(sourcesize, matrixtype, measurementsize, conditionnumber) # get the sensing matrix
noutliers = outliers.size
nlmbd = 6 # how many different lambdas are we trying in each case
lmbdls = np.zeros((noutliers, nlmbd)) # every proportion of outliers need a different lambda
lmbdls[0, :] = np.logspace(0, 3, nlmbd) # lambdas for ls
lmbdls[1, :] = np.logspace(7, 10, nlmbd) # lambdas for ls
lmbdls[2, :] = np.logspace(8, 11, nlmbd) # lambdas for ls
lmbdls[3, :] = np.logspace(8, 11, nlmbd) # lambdas for ls
lmbdls[4, :] = np.logspace(9, 11, nlmbd) # lambdas for ls
lmbdm = np.zeros((noutliers, nlmbd)) # every proportion of outliers need a different lambda
lmbdm[0, :] = np.logspace(-1, 1, nlmbd) # lambdas for ls
lmbdm[1, :] = np.logspace(-1, 2, nlmbd) # lambdas for ls
lmbdm[2, :] = np.logspace(-1, 2, nlmbd) # lambdas for ls
lmbdm[3, :] = np.logspace(1, 3.5, nlmbd) # lambdas for ls
lmbdm[4, :] = np.logspace(1, 4, nlmbd) # lambdas for ls
lmbdmes = np.zeros((noutliers, nlmbd)) # every proportion of outliers need a different lambda
lmbdmes[0, :] = np.logspace(1, 4, nlmbd) # lambdas for ls
lmbdmes[1, :] = np.logspace(4, 6, nlmbd) # lambdas for ls
lmbdmes[2, :] = np.logspace(4, 6, nlmbd) # lambdas for ls
lmbdmes[3, :] = np.logspace(4, 6, nlmbd) # lambdas for ls
lmbdmes[4, :] = np.logspace(4, 6, nlmbd) # lambdas for ls
lmbdtau = np.zeros((noutliers, nlmbd)) # every proportion of outliers need a different lambda
lmbdtau[0, :] = np.logspace(-2, 1, nlmbd) # lambdas for ls
lmbdtau[1, :] = np.logspace(-2, 2, nlmbd) # lambdas for ls
lmbdtau[2, :] = np.logspace(-1, 2, nlmbd) # lambdas for ls
lmbdtau[3, :] = np.logspace(0, 2, nlmbd) # lambdas for ls
lmbdtau[4, :] = np.logspace(2, 4, nlmbd) # lambdas for ls
errorls = np.zeros((noutliers, nlmbd, nrealizations)) # store results for ls
errormes = np.zeros((noutliers, nlmbd, nrealizations)) # store results for m with an estimated scale
errorm = np.zeros((noutliers, nlmbd, nrealizations)) # store results for m
errortau = np.zeros((noutliers, nlmbd, nrealizations)) # store results for tau
k = 0
while k < noutliers:
t = 0
print 'outliers % s' % k
while t < nlmbd:
print 'lambda % s' % t
r = 0
while r < nrealizations:
y = util.getmeasurements(a, x, noisetype, var, outliers[k]) # get the measurements
# -------- ls solution
xhat = inv.ridge(y, a, lmbdls[k, t]) # solving the problem with ls
error = np.linalg.norm(x - xhat)
errorls[k, t, r] = error
# -------- m estimated scale solution
xpreliminary = xhat # we take the ls to estimate a preliminary scale
respreliminary = y - np.dot(a, xpreliminary)
estimatedscale = np.median(np.abs(respreliminary)) / .6745 # robust mad estimator for the scale
xhat = inv.mridge(y, a, 'huber', clippinghuber, estimatedscale, lmbdmes[k, t]) # solving the problem with m
error = np.linalg.norm(x - xhat)
errormes[k, t, r] = error
# -------- m real scale solution
xhat = inv.mridge(y, a, 'huber', clippinghuber, realscale, lmbdm[k, t]) # solving the problem with m
error = np.linalg.norm(x - xhat)
errorm[k, t, r] = error
# -------- tau solution
xhat, scale = ln.basictau(
a,
y,
'optimal',
clippingopt,
ninitialx=ninitialsolutions,
maxiter=100,
nbest=1,
regularization=ln.tikhonov_regularization,
lamb=lmbdtau[k, t]
)
error = np.linalg.norm(x - xhat)
errortau[k, t, r] = error
r += 1 # update the number of realization
t += 1 # update the number of lambda that we are trying
k += 1 # updated the number of outlier proportion
minls = np.min(errorls, 1)
minm = np.min(errorm, 1)
minmes = np.min(errormes, 1)
mintau = np.min(errortau, 1)
avgls = np.mean(minls, 1)
avgm = np.mean(minm, 1)
avgmes = np.mean(minmes, 1)
avgtau = np.mean(mintau, 1)
fone, axone = plt.subplots(noutliers, sharex=True) # plots to check if we are getting the best lambda
cnt = 0
while cnt < noutliers:
axone[cnt].plot(lmbdls[0, :], errorls[cnt, :, 1])
axone[cnt].set_xscale('log')
cnt += 1
axone[0].set_title('LS')
# plt.show()
ftwo, axtwo = plt.subplots(noutliers, sharex=True) # plots to check if we are getting the best lambda
cnt = 0
while cnt < noutliers:
axtwo[cnt].plot(lmbdls[0, :], errorm[cnt, :, 1])
axtwo[cnt].set_xscale('log')
cnt += 1
axtwo[0].set_title('M estimator')
# plt.show()
fthree, axthree = plt.subplots(noutliers, sharex=True) # plots to check if we are getting the best lambda
cnt = 0
while cnt < noutliers:
axthree[cnt].plot(lmbdmes[0, :], errormes[cnt, :, 1])
axthree[cnt].set_xscale('log')
cnt += 1
axthree[0].set_title('M estimator est. scale')
# plt.show()
ffour, axfour = plt.subplots(noutliers, sharex=True) # plots to check if we are getting the best lambda
cnt = 0
while cnt < noutliers:
axfour[cnt].plot(lmbdtau[0, :], errortau[cnt, :, 1])
axfour[cnt].set_xscale('log')
cnt += 1
axfour[0].set_title('tau estimator')
# plt.show()
# store results
name_file = 'experiment_two.pkl'
fl = os.path.join(DATA_DIR, name_file)
f = open(fl, 'wb')
pickle.dump([avgls, avgm, avgmes, avgtau], f)
f.close()
fig = plt.figure()
plt.plot(outliers, avgls, 'r--', label='ls')
plt.plot(outliers, avgm, 'bs--', label='m estimator')
plt.plot(outliers, avgmes, 'g^-', label='m est. scale')
plt.plot(outliers, avgtau, 'kd-', label='tau')
plt.legend(loc=2)
plt.xlabel('% outliers')
plt.ylabel('error')
name_file = 'experiment_two.eps'
fl = os.path.join(FIGURES_DIR, name_file)
fig.savefig(fl, format='eps')
# plt.show() # show figure
# -------------------------------------------------------------------
# Experiment 3: l1 regularized case. Comparison LS, M, tau
# -------------------------------------------------------------------
def experimentthree(nrealizations, outliers, measurementsize, sourcesize, source):
sourcetype = 'sparse' # kind of source we want
sparsity = 0.2
matrixtype = 'illposed' # type of sensing matrix
conditionnumber = 1000 # condition number of the matrix that we want
noisetype = 'outliers' # additive noise
var = 3 # variance of the noise
clippinghuber = 1.345 # clipping parameter for the huber function
clippingopt = (0.4, 1.09) # clipping parameters for the opt function in the tau estimator
ninitialsolutions = 10 # how many initial solutions do we want in the tau estimator
maxiter = 50
nlmbd = 5 # how many different lambdas are we trying in each case
realscale = | |
<gh_stars>0
"""
todo: Plan to set rules here as to how that HashableClass needs to be
overridden by users .... also do some one time validations here ....
should be done once entire library is loaded
This will solve many piled up todos in code and reduce overhead
Class validation mostly checks CodingErrors:
+ We might want to move class_validate and class_init code here and execute it
rarely to check some rules that we will set here.
+ Also note that some class_validation is placed in init_validate as
dataclass is not yet baked and annotated fields are not seen as
`dataclass.Field` by class validate. We aim to move that code here too.
todo: add a decorator to dataclass where we can configure class about the
things that are to be checked for coding errors. Example:
+ if properties/methods are cached
+ if method/property should never be overridden
+ if class should not be sublasses
+ if subclass should not have extra fields
+ etc.
You can then get this decorator inside init_subclass and run code
validations in one place ;)
Challenge is how decorator from subclasses will override decorator on
parent class.
todo: All LITERAL nested classes and Config class should subclass immediate
parents nested class
"""
import enum
import types
import dataclasses
import typing as t
from toolcraft import error as e
from toolcraft import util
from .marshalling import YamlRepr, HashableClass, FrozenEnum, Tracker
from .storage import Folder, StorageHashable, FileGroup, NpyFileGroup
from .storage.state import Config, Info, StateFile
LITERAL_CLASS_NAME = "LITERAL"
def check_classes_that_should_not_be_overridden():
_CLASSES_THAT_SHOULD_NOT_BE_OVERRIDDEN = [
Info
]
for cls in _CLASSES_THAT_SHOULD_NOT_BE_OVERRIDDEN:
_sub_classes = cls.available_sub_classes()
if len(_sub_classes) > 1:
e.code.CodingError(
msgs=[
f"You are not allowed to subclass class {cls}",
f"Please check classes: ",
_sub_classes[1:]
]
)
def check_things_to_be_cached(
to_check: dict = None
):
_THINGS_TO_BE_CACHED = {
Tracker: ['internal', ],
HashableClass: [
'hex_hash', 'store_fields_folder'
],
StorageHashable: [
'config', 'info', 'path',
],
Folder: ['items'],
FileGroup: ['file_keys'],
}
if to_check is not None:
_THINGS_TO_BE_CACHED = to_check
for sup_cls, things in _THINGS_TO_BE_CACHED.items():
for cls in sup_cls.available_sub_classes():
for _t in things:
# get
_method_or_prop = getattr(cls, _t)
# if abstract no sense in checking if cached
if getattr(_method_or_prop, '__isabstractmethod__', False):
continue
# check if cached
if not util.is_cached(_method_or_prop):
e.code.CodingError(
msgs=[
f"We expect you to cache property/method `{_t}` "
f"using decorator `@util.CacheResult` in "
f"class {cls}"
]
)
def check_things_not_to_be_cached(
to_check: dict = None
):
_THINGS_NOT_TO_BE_CACHED = {
Tracker: ['is_called', 'iterable_length'],
StateFile: ['is_available'],
}
if to_check is not None:
_THINGS_NOT_TO_BE_CACHED = to_check
for sup_cls, things in _THINGS_NOT_TO_BE_CACHED.items():
for cls in sup_cls.available_sub_classes():
for _t in things:
# get
_method_or_prop = getattr(cls, _t)
# if abstract no sense in checking if cached
if getattr(_method_or_prop, '__isabstractmethod__', False):
continue
# check if cached
if util.is_cached(_method_or_prop):
e.code.CodingError(
msgs=[
f"We expect you not to cache property/method "
f"`{_t}`. Do not use decorator "
f"`@util.CacheResult` in "
f"class {cls} for `{_t}`"
]
)
def check_things_not_to_be_overridden(
to_check: dict = None
):
_THINGS_NOT_TO_BE_OVERRIDDEN = {
YamlRepr: ['yaml'],
HashableClass: ['hex_hash', 'store_fields_folder'],
Folder: ['name'],
NpyFileGroup: ['get_files', 'get_file'],
Tracker: ['is_called'],
}
if to_check is not None:
_THINGS_NOT_TO_BE_OVERRIDDEN = to_check
for sup_cls, things in _THINGS_NOT_TO_BE_OVERRIDDEN.items():
for cls in sup_cls.available_sub_classes():
for _t in things:
if getattr(cls, _t) != getattr(sup_cls, _t):
e.code.CodingError(
msgs=[
f"Please do not override method/property "
f"`{_t}` in class {cls}"
]
)
def check_things_to_be_dataclasses():
_THINGS_TO_BE_DATACLASSES = [
HashableClass, StateFile
]
for sup_cls in _THINGS_TO_BE_DATACLASSES:
for cls in sup_cls.available_sub_classes():
# we expected all subclasses to be decorated with
# dataclass ...
_cls_dict_keys = cls.__dict__.keys()
# noinspection PyProtectedMember
if not (
dataclasses._FIELDS in _cls_dict_keys or
dataclasses._PARAMS in _cls_dict_keys
):
e.code.NotAllowed(
msgs=[
f"You missed to decorate subclass {cls} of {sup_cls} "
f"with `@dataclasses.dataclass` decorator"
]
)
# noinspection PyPep8Naming
def check_everyone_has_logger():
"""
todo: Later
"""
# ------------------------------------------------------01
# make sure that each module has _LOGGER
# noinspection PyPep8Naming
def check_YamlRepr():
_yaml_tag_check = {}
cls: t.Type[YamlRepr]
for cls in YamlRepr.available_sub_classes():
# ------------------------------------------------------01
# make sure LITERAL class is extended properly from the immediate
# parent which has LITERAL class or else go for super
# parent i.e. `YamlRepr.LITERAL`
try:
# noinspection PyUnresolvedReferences
_parent_literal_class = cls.__mro__[1].LITERAL
except AttributeError:
_parent_literal_class = YamlRepr.LITERAL
e.validation.ShouldBeSubclassOf(
value=cls.LITERAL, value_types=(_parent_literal_class, ),
msgs=[
f"We expect a nested class of class {cls} with name "
f"{LITERAL_CLASS_NAME!r} to "
f"extend the class {_parent_literal_class}"
]
)
# ------------------------------------------------------02
# check if all yaml tags are unique
_yaml_tag = cls.yaml_tag()
if _yaml_tag not in _yaml_tag_check.keys():
_yaml_tag_check[_yaml_tag] = cls
else:
e.code.CodingError(
msgs=[
f"The same yaml tag `{_yaml_tag}` seems to appear in both "
f"classes: ",
[
cls, _yaml_tag_check[_yaml_tag]
]
]
)
# noinspection PyPep8Naming
def check_HashableClass():
# some useful vars
_general_dunders_to_ignore = [
# python adds it
'__module__', '__dict__', '__weakref__', '__doc__',
# dataclass related
'__annotations__', '__abstractmethods__', '__dataclass_params__',
'__dataclass_fields__',
# dataclass adds this default dunders to all dataclasses ... we have
# no control over this ;(
'__init__', '__repr__', '__eq__', '__setattr__',
'__delattr__', '__hash__',
# we allow this
'__call__',
]
cls: t.Type[HashableClass]
for cls in HashableClass.available_sub_classes():
# ---------------------------------------------------------- 01
# class should not be local
if str(cls).find("<locals>") > -1:
e.validation.NotAllowed(
msgs=[
f"Hashable classes can only be first class classes.",
f"Do not define classes locally, declare them at module "
f"level.",
f"Check class {cls}"
]
)
# ---------------------------------------------------------- 02
# check all non dunder attributes
for _attr_k, _attr_v in util.fetch_non_dunder_attributes(cls):
# ------------------------------------------------------ 02.01
# if _attr_v is property, function or a method ... no need to check
# anything and we can continue
_allowed_types = (
property,
types.FunctionType,
types.MethodType,
util.HookUp,
)
# noinspection PyTypeChecker
if isinstance(_attr_v, _allowed_types):
continue
# ------------------------------------------------------ 02.02
# no attribute should start with _
if _attr_k.startswith('_'):
# if abstract class used this will be present
# the only field that starts with _ which we allow
if _attr_k == '_abc_impl':
continue
# anything else raise error
e.validation.NotAllowed(
msgs=[
f"Attribute {_attr_k} is not one of {_allowed_types} "
f"and it starts with `_`",
f"Please check attribute {_attr_k} of class {cls}"
]
)
# ------------------------------------------------------ 02.03
# if special helper classes that stores all LITERALS
if _attr_k == LITERAL_CLASS_NAME:
# NOTE: we already check if class LITERAL is correctly
# subclassed in super method .... so no need to do here
continue
# ------------------------------------------------------ 02.04
# check if _attr_k is in __annotations__ then it is dataclass field
# Note: dataclasses.fields will not work here as the class still
# does not know that it is dataclass
# todo: this is still not a complete solution as annotations will
# also have fields that are t.ClassVar and t.InitVar
# ... we will see this later how to deal with ... currently
# there is no easy way
if _attr_k in cls.__annotations__.keys():
# _ann_value is a typing.ClassVar raise error
# simple way to see if typing was used as annotation value
if hasattr(_attr_v, '__origin__'):
if _attr_v.__origin__ == t.ClassVar:
e.code.CodingError(
msgs=[
f"We do not allow class variable {_attr_k} "
f"... check class {cls}"
]
)
# if `dataclasses.InitVar` raise error
if isinstance(_attr_v, dataclasses.InitVar):
e.code.CodingError(
msgs=[
f"We co not allow using dataclass.InitVar.",
f"Please check annotated field {_attr_k} in "
f"class {cls}"
]
)
# if a vail dataclass field continue
continue
# ------------------------------------------------------ 02.05
# if we reached here we do not understand the class attribute so
# raise error
e.code.NotAllowed(
msgs=[
f"Found an attribute `{_attr_k}` with: ",
dict(
type=f"{type(_attr_v)}",
value=f"{_attr_v}",
),
f"Problem with attribute {_attr_k} of class {cls}",
f"It is neither one of {_allowed_types}, nor is it "
f"defined as dataclass field.",
f"Note that if you are directly assigning the annotated "
f"field it will not return dataclass field so please "
f"assign it with "
f"`dataclass.field(default=...)` or "
f"`dataclass.field(default_factory=...)`",
]
)
# ---------------------------------------------------------- 03
# do not override dunder methods
if cls != HashableClass:
for k in cls.__dict__.keys():
if k.startswith("__") and k.endswith("__"):
if k not in _general_dunders_to_ignore:
e.code.CodingError(
msgs=[
f"You are not allowed to override dunder "
f"methods in any subclass of {HashableClass}",
f"Please check class {cls} and avoid defining "
f"dunder method `{k}` inside it"
]
)
# ---------------------------------------------------------- 04
# if block_fields_in_subclasses then check there are no new fields in
# subclass
# todo: we know that this cause | |
upper decorators).
:return: None
"""
pre_defined_ce, upper_decorator = pre_defined_ce
# Include the registering info related to @task
impl_type = "METHOD"
impl_constraints = {}
impl_io = False
if __debug__:
logger.debug("Configuring core element.")
set_ce_signature = self.core_element.set_ce_signature
set_impl_signature = self.core_element.set_impl_signature
set_impl_type_args = self.core_element.set_impl_type_args
set_impl_constraints = self.core_element.set_impl_constraints
set_impl_type = self.core_element.set_impl_type
set_impl_io = self.core_element.set_impl_io
if pre_defined_ce:
# Core element has already been created in an upper decorator
# (e.g. @implements and @compss)
get_ce_signature = self.core_element.get_ce_signature
get_impl_constraints = self.core_element.get_impl_constraints
get_impl_type = self.core_element.get_impl_type
get_impl_type_args = self.core_element.get_impl_type_args
get_impl_io = self.core_element.get_impl_io
if get_ce_signature() is None:
set_ce_signature(impl_signature)
set_impl_signature(impl_signature)
elif get_ce_signature() != impl_signature and not upper_decorator:
# Specific for inheritance - not for @implements.
set_ce_signature(impl_signature)
set_impl_signature(impl_signature)
set_impl_type_args(impl_type_args)
else:
# If we are here that means that we come from an implements
# decorator, which means that this core element has already
# a signature
set_impl_signature(impl_signature)
set_impl_type_args(impl_type_args)
if get_impl_constraints() is None:
set_impl_constraints(impl_constraints)
if get_impl_type() is None:
set_impl_type(impl_type)
if get_impl_type_args() is None:
set_impl_type_args(impl_type_args)
# Need to update impl_type_args if task is PYTHON_MPI and
# if the parameter with layout exists.
if get_impl_type() == "PYTHON_MPI":
self.check_layout_params(get_impl_type_args())
set_impl_signature(".".join(("MPI", impl_signature)))
set_impl_type_args(impl_type_args + get_impl_type_args()[1:])
if get_impl_io() is None:
set_impl_io(impl_io)
else:
# @task is in the top of the decorators stack.
# Update the empty core_element
set_ce_signature(impl_signature)
set_impl_signature(impl_signature)
set_impl_constraints(impl_constraints)
set_impl_type(impl_type)
set_impl_type_args(impl_type_args)
set_impl_io(impl_io)
def check_layout_params(self, impl_type_args):
# type: (list) -> None
""" Checks the layout format.
:param impl_type_args: Parameter arguments.
:return: None
"""
# todo: replace these INDEXES with CONSTANTS
num_layouts = int(impl_type_args[8])
if num_layouts > 0:
for i in range(num_layouts):
param_name = impl_type_args[(9+(i*4))].strip()
if param_name:
if param_name in self.parameters:
if self.parameters[param_name].content_type != parameter.TYPE.COLLECTION: # noqa: E501
raise PyCOMPSsException("Parameter %s is not a collection!" % param_name) # noqa: E501
else:
raise PyCOMPSsException("Parameter %s does not exist!" % param_name) # noqa: E501
def register_task(self):
# type: () -> None
""" This function is used to register the task in the runtime.
This registration must be done only once on the task decorator
initialization, unless there is a signature change (this will mean
that the user has changed the implementation interactively).
:return: None
"""
if __debug__:
logger.debug("[@TASK] Registering the function %s in module %s" %
(self.function_name, self.module_name))
binding.register_ce(self.core_element)
def validate_processes_per_node(self, processes, processes_per_node):
# type: (list) -> None
""" Checks the processes per node property.
:param processes: Total processes of a task.
:param processes_per_node: Processes per node.
:return: None
"""
if processes < processes_per_node:
raise PyCOMPSsException("Processes is smaller than processes_per_node.")
if (processes % processes_per_node) > 0:
raise PyCOMPSsException("Processes is not a multiple of processes_per_node.")
def process_processes_per_node(self):
# type: () -> int
""" Retrieve the number of computing nodes.
This value can be defined by upper decorators and can also be defined
dynamically defined with a global or environment variable.
:return: The number of computing nodes.
"""
parsed_processes_per_node = None
if isinstance(self.processes_per_node, int):
# Nothing to do
parsed_processes_per_node = self.processes_per_node
elif isinstance(self.processes_per_node, str):
# Check if processes_per_node can be casted to string
# Check if processes_per_node is an environment variable
# Check if processes_per_node is a dynamic global variable
try:
# Cast string to int
parsed_processes_per_node = int(self.processes_per_node)
except ValueError:
# Environment variable
if self.processes_per_node.strip().startswith('$'):
# Computing nodes is an ENV variable, load it
env_var = self.processes_per_node.strip()[1:] # Remove $
if env_var.startswith('{'):
env_var = env_var[1:-1] # remove brackets
try:
parsed_processes_per_node = int(os.environ[env_var])
except ValueError:
raise PyCOMPSsException(
cast_env_to_int_error("ComputingNodes")
)
else:
# Dynamic global variable
try:
# Load from global variables
parsed_processes_per_node = \
self.user_function.__globals__.get(
self.processes_per_node
)
except AttributeError:
# This is a numba jit declared task
try:
parsed_processes_per_node = \
self.user_function.py_func.__globals__.get(
self.processes_per_node
)
except AttributeError:
# No more chances
# Ignore error and parsed_processes_per_node will
# raise the exception
pass
if parsed_processes_per_node is None:
raise PyCOMPSsException("ERROR: Wrong Computing Nodes value.")
if parsed_processes_per_node <= 0:
logger.warning("Registered processes_per_node is less than 1 (%s <= 0). Automatically set it to 1" % # noqa: E501
str(parsed_processes_per_node))
parsed_processes_per_node = 1
return parsed_processes_per_node
def process_computing_nodes(self):
# type: () -> int
""" Retrieve the number of computing nodes.
This value can be defined by upper decorators and can also be defined
dynamically defined with a global or environment variable.
:return: The number of computing nodes.
"""
parsed_computing_nodes = None
if isinstance(self.computing_nodes, int):
# Nothing to do
parsed_computing_nodes = self.computing_nodes
elif isinstance(self.computing_nodes, str):
# Check if computing_nodes can be casted to string
# Check if computing_nodes is an environment variable
# Check if computing_nodes is a dynamic global variable
try:
# Cast string to int
parsed_computing_nodes = int(self.computing_nodes)
except ValueError:
# Environment variable
if self.computing_nodes.strip().startswith('$'):
# Computing nodes is an ENV variable, load it
env_var = self.computing_nodes.strip()[1:] # Remove $
if env_var.startswith('{'):
env_var = env_var[1:-1] # remove brackets
try:
parsed_computing_nodes = int(os.environ[env_var])
except ValueError:
raise PyCOMPSsException(
cast_env_to_int_error("ComputingNodes")
)
else:
# Dynamic global variable
try:
# Load from global variables
parsed_computing_nodes = \
self.user_function.__globals__.get(
self.computing_nodes
)
except AttributeError:
# This is a numba jit declared task
try:
parsed_computing_nodes = \
self.user_function.py_func.__globals__.get(
self.computing_nodes
)
except AttributeError:
# No more chances
# Ignore error and parsed_computing_nodes will
# raise the exception
pass
if parsed_computing_nodes is None:
raise PyCOMPSsException("ERROR: Wrong Computing Nodes value.")
if parsed_computing_nodes <= 0:
logger.warning("Registered computing_nodes is less than 1 (%s <= 0). Automatically set it to 1" % # noqa: E501
str(parsed_computing_nodes))
parsed_computing_nodes = 1
return parsed_computing_nodes
def process_reduction(self):
# type: () -> (bool, int)
""" Process the reduction parameter.
:return: Is reduction and chunk size.
"""
# Deal with chunk size
parsed_chunk_size = None
if isinstance(self.chunk_size, int):
# Nothing to do
parsed_chunk_size = self.chunk_size
elif isinstance(self.chunk_size, str):
# Check if chunk_size can be casted to string
# Check if chunk_size is an environment variable
# Check if chunk_size is a dynamic global variable
try:
# Cast string to int
parsed_chunk_size = int(self.chunk_size)
except ValueError:
# Environment variable
if self.chunk_size.strip().startswith('$'):
# Chunk size is an ENV variable, load it
env_var = self.chunk_size.strip()[1:] # Remove $
if env_var.startswith('{'):
env_var = env_var[1:-1] # remove brackets
try:
parsed_chunk_size = int(os.environ[env_var])
except ValueError:
raise PyCOMPSsException(
cast_env_to_int_error('ChunkSize')
)
else:
# Dynamic global variable
try:
# Load from global variables
parsed_chunk_size = \
self.user_function.__globals__.get(
self.chunk_size
)
except AttributeError:
# This is a numba jit declared task
try:
parsed_chunk_size = \
self.user_function.py_func.__globals__.get(
self.chunk_size
)
except AttributeError:
# No more chances
# Ignore error and parsed_chunk_size will
# raise the exception
pass
if parsed_chunk_size is None:
parsed_chunk_size = 0
# Deal with chunk size
parsed_is_reduce = False
if isinstance(self.is_reduce, bool):
# Nothing to do
parsed_is_reduce = self.is_reduce
elif isinstance(self.is_reduce, str):
# Check if is_reduce can be casted to string
try:
# Cast string to int
parsed_is_reduce = bool(self.is_reduce)
except ValueError:
pass
if parsed_is_reduce is None:
parsed_is_reduce = False
return parsed_is_reduce, parsed_chunk_size
def check_task_hints(self):
# type: () -> (bool, bool, bool, int, bool, bool)
""" Process the @task hints.
:return: The value of all possible hints.
"""
deco_arg_getter = self.decorator_arguments.get
if "isReplicated" in self.decorator_arguments:
is_replicated = deco_arg_getter("isReplicated")
logger.warning("Detected deprecated isReplicated. Please, change it to is_replicated") # noqa: E501
else:
is_replicated = deco_arg_getter("is_replicated")
# Get is distributed
if "isDistributed" in self.decorator_arguments:
is_distributed = deco_arg_getter("isDistributed")
logger.warning("Detected deprecated isDistributed. Please, change it to is_distributed") # noqa: E501
else:
is_distributed = deco_arg_getter("is_distributed")
# Get time out
if "timeOut" in self.decorator_arguments:
time_out = deco_arg_getter("timeOut")
logger.warning("Detected deprecated timeOut. Please, change it to time_out") # noqa: E501
else:
time_out = deco_arg_getter("time_out")
# Get priority
has_priority = deco_arg_getter("priority")
# Check if the function is an instance method or a class method.
has_target = self.function_type == FunctionType.INSTANCE_METHOD
return is_replicated, is_distributed, time_out, has_priority, has_target # noqa: E501
def add_return_parameters(self, returns=None):
# type: (bool) -> int
""" Modify the return parameters accordingly to the return statement.
:return: Creates and modifies self.returns and returns the number of
returns.
"""
self.returns = OrderedDict()
if returns:
_returns = returns
else:
_returns = self.decorator_arguments["returns"]
# Note that "returns" is by default False
if not _returns:
return 0
# A return statement can be the following:
# 1) A type. This means "this task returns an | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility module that contains useful utilities functions for PySide
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import struct
import logging
import inspect
import contextlib
from tpDcc import dcc
from tpDcc.libs.python import python
from tpDcc.libs.resources.core import color
from tpDcc.libs.qt.core import consts
LOGGER = logging.getLogger(consts.LIB_ID)
QT_ERROR_MESSAGE = 'Qt.py is not available and Qt related functionality will not be available!'
QT_AVAILABLE = True
try:
from Qt.QtCore import Qt, Signal, QObject, QPoint, QSize
from Qt.QtWidgets import QApplication, QLayout, QVBoxLayout, QHBoxLayout, QWidget, QFrame, QLabel, QPushButton
from Qt.QtWidgets import QSizePolicy, QMessageBox, QInputDialog, QFileDialog, QMenu, QMenuBar
from Qt.QtGui import QFontDatabase, QPixmap, QIcon, QColor
from Qt import QtGui
from Qt import QtCompat
from Qt import __binding__
except ImportError as e:
QT_AVAILABLE = False
LOGGER.warning('Impossible to load Qt libraries. Qt dependant functionality will be disabled!')
if QT_AVAILABLE:
if __binding__ == 'PySide2':
try:
import shiboken2 as shiboken
except ImportError:
from PySide2 import shiboken2 as shiboken
elif __binding__ == 'PySide':
try:
import shiboken
except ImportError:
try:
from Shiboken import shiboken
except ImportError:
try:
from PySide import shiboken
except Exception:
pass
# ==============================================================================
MAX_INT = 2 ** (struct.Struct('i').size * 8 - 1) - 1
UI_EXTENSION = '.ui'
QWIDGET_SIZE_MAX = (1 << 24) - 1
FLOAT_RANGE_MIN = 0.1 + (-MAX_INT - 1.0)
FLOAT_RANGE_MAX = MAX_INT + 0.1
INT_RANGE_MIN = -MAX_INT
INT_RANGE_MAX = MAX_INT
CURRENT_DIR = os.path.expanduser('~')
# ==============================================================================
def is_pyqt():
"""
Returns True if the current Qt binding is PyQt
:return: bool
"""
return 'PyQt' in __binding__
def is_pyqt4():
"""
Retunrs True if the currente Qt binding is PyQt4
:return: bool
"""
return __binding__ == 'PyQt4'
def is_pyqt5():
"""
Retunrs True if the currente Qt binding is PyQt5
:return: bool
"""
return __binding__ == 'PyQt5'
def is_pyside():
"""
Returns True if the current Qt binding is PySide
:return: bool
"""
return __binding__ == 'PySide'
def is_pyside2():
"""
Returns True if the current Qt binding is PySide2
:return: bool
"""
return __binding__ == 'PySide2'
def get_ui_library():
"""
Returns the library that is being used
"""
try:
import PyQt5
qt = 'PyQt5'
except ImportError:
try:
import PyQt4
qt = 'PyQt4'
except ImportError:
try:
import PySide2
qt = 'PySide2'
except ImportError:
try:
import PySide
qt = 'PySide'
except ImportError:
raise ImportError("No valid Gui library found!")
return qt
def wrapinstance(ptr, base=None):
if ptr is None:
return None
ptr_type = long if python.is_python2() else int
ptr = ptr_type(ptr)
if 'shiboken' in globals():
if base is None:
qObj = shiboken.wrapInstance(ptr_type(ptr), QObject)
meta_obj = qObj.metaObject()
cls = meta_obj.className()
super_cls = meta_obj.superClass().className()
if hasattr(QtGui, cls):
base = getattr(QtGui, cls)
elif hasattr(QtGui, super_cls):
base = getattr(QtGui, super_cls)
else:
base = QWidget
try:
return shiboken.wrapInstance(ptr_type(ptr), base)
except Exception:
from PySide.shiboken import wrapInstance
return wrapInstance(ptr_type(ptr), base)
elif 'sip' in globals():
base = QObject
return shiboken.wrapinstance(ptr_type(ptr), base)
else:
print('Failed to wrap object ...')
return None
def unwrapinstance(object):
"""
Unwraps objects with PySide
"""
if python.is_python2():
return long(shiboken.getCppPointer(object)[0])
else:
return int(shiboken.getCppPointer(object)[0])
@contextlib.contextmanager
def app():
"""
Context to create a Qt app
>>> with with qtutils.app():
>>> w = QWidget(None)
>>> w.show()
:return:
"""
app_ = None
is_app_running = bool(QApplication.instance())
if not is_app_running:
app_ = QApplication(sys.argv)
install_fonts()
yield None
if not is_app_running:
sys.exit(app_.exec_())
def install_fonts(fonts_path):
"""
Install all the fonts in the given directory path
:param fonts_path: str
"""
if not os.path.isdir(fonts_path):
return
font_path = os.path.abspath(fonts_path)
font_data_base = QFontDatabase()
for filename in os.listdir(font_path):
if filename.endswith('.ttf'):
filename = os.path.join(font_path, filename)
result = font_data_base.addApplicationFont(filename)
if result > 0:
LOGGER.debug('Added font {}'.format(filename))
else:
LOGGER.debug('Impossible to add font {}'.format(filename))
def ui_path(cls):
"""
Returns the UI path for the given widget class
:param cls: type
:return: str
"""
name = cls.__name__
ui_path = inspect.getfile(cls)
dirname = os.path.dirname(ui_path)
ui_path = dirname + '/resource/ui' + name + UI_EXTENSION
if not os.path.exists(ui_path):
ui_path = dirname + '/ui/' + name + UI_EXTENSION
if not os.path.exists(ui_path):
ui_path = dirname + '/' + name + UI_EXTENSION
return ui_path
def load_widget_ui(widget, path=None):
"""
Loads UI of the given widget
:param widget: QWidget or QDialog
:param path: str
"""
if not path:
path = ui_path(widget.__class__)
cwd = os.getcwd()
try:
os.chdir(os.path.dirname(path))
widget.ui = QtCompat.loadUi(path, widget)
except Exception as e:
pass
# tpPyUtils.logger.debug('{} | {}'.format(e, traceback.format_exc()))
finally:
os.chdir(cwd)
def compat_ui_loader(ui_file, widget=None):
"""
Loads GUI from .ui file using compat module
In some DCCs, such as 3ds Max this function does not work properly. In those cases use load_ui function
:param ui_file: str, path to the UI file
:param widget: parent widget
"""
if not ui_file:
ui_file = ui_path(widget.__class__)
ui = QtCompat.loadUi(ui_file)
if not widget:
return ui
else:
for member in dir(ui):
if not member.startswith('__') and member != 'staticMetaObject':
setattr(widget, member, getattr(ui, member))
return ui
def get_signals(class_obj):
"""
Returns a list with all signals of a class
:param class_obj: QObject
"""
result = filter(lambda x: isinstance(x[1], Signal), vars(class_obj).iteritems())
if class_obj.__base__ and class_obj.__base__ != QObject:
result.extend(get_signals(class_obj.__base__))
return result
def safe_disconnect_signal(signal):
"""
Disconnects given signal in a safe way
:param signal: Signal
"""
try:
signal.disconnect()
except Exception:
pass
def safe_delete_later(widget):
"""
calls the deleteLater method on the given widget, but only
in the necessary Qt environment
:param widget: QWidget
"""
if __binding__ in ('PySide', 'PyQt4'):
widget.deleteLater()
def show_info(parent, title, info):
"""
Show a info QMessageBox with the given info
:return:
"""
return QMessageBox.information(parent, title, info)
def show_question(parent, title, question):
"""
Show a question QMessageBox with the given question text
:param question: str
:return:
"""
flags = QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No
return QMessageBox.question(parent, title, question, flags)
def show_warning(parent, title, warning):
"""
Shows a warning QMessageBox with the given warning text
:param parent: QWidget
:param title: str
:param warning: str
:return:
"""
return QMessageBox.warning(parent, title, warning)
def show_error(parent, title, error):
"""
Show a error QMessageBox with the given error
:return:
"""
return QMessageBox.critical(parent, title, error)
def clear_layout(layout):
"""
Removes all the widgets added in the given layout
:param layout: QLayout
"""
while layout.count():
child = layout.takeAt(0)
if child.widget() is not None:
child.widget().deleteLater()
elif child.layout() is not None:
clear_layout(child.layout())
# for i in reversed(range(layout.count())):
# item = layout.itemAt(i)
# if item:
# w = item.widget()
# if w:
# w.setParent(None)
def clear_stack_widget(stack_widget):
"""
Clears all the widgets stacked in the given stack widget
:param stack_widget: QStackWidget
"""
for i in range(stack_widget.count(), 0, -1):
widget = stack_widget.widget(i)
stack_widget.removeWidget(widget)
widget.deleteLater()
def layout_items(layout):
"""
Returns the items from the given layout and returns them
:param layout: QLayout, layout to retrieve items from
:return: list(QWidgetItem)
"""
return [layout.itemAt(i) for i in range(layout.count())]
def layout_widgets(layout):
"""
Returns the widgets from the given layout and returns them
:param layout: QLayout, layout to retrieve widgets from
:return: list(QWidget)
"""
return [layout.itemAt(i).widget() for i in range(layout.count())]
def image_to_clipboard(path):
"""
Copies the image at path to the system's global clipboard
:param path: str
"""
image = QtGui.QImage(path)
clipboard = QApplication.clipboard()
clipboard.setImage(image, mode=QtGui.QClipboard.Clipboard)
def get_horizontal_separator():
v_div_w = QWidget()
v_div_l = QVBoxLayout()
v_div_l.setAlignment(Qt.AlignLeft)
v_div_l.setContentsMargins(0, 0, 0, 0)
v_div_l.setSpacing(0)
v_div_w.setLayout(v_div_l)
v_div = QFrame()
v_div.setMinimumHeight(30)
v_div.setFrameShape(QFrame.VLine)
v_div.setFrameShadow(QFrame.Sunken)
v_div_l.addWidget(v_div)
return v_div_w
def get_rounded_mask(width, height, radius_tl=10, radius_tr=10, radius_bl=10, radius_br=10):
region = QtGui.QRegion(0, 0, width, height, QtGui.QRegion.Rectangle)
# top left
round = QtGui.QRegion(0, 0, 2 * radius_tl, 2 * radius_tl, QtGui.QRegion.Ellipse)
corner = QtGui.QRegion(0, 0, radius_tl, radius_tl, QtGui.QRegion.Rectangle)
region = region.subtracted(corner.subtracted(round))
# top right
round = QtGui.QRegion(width - 2 * radius_tr, 0, 2 * radius_tr, 2 * radius_tr, QtGui.QRegion.Ellipse)
corner = QtGui.QRegion(width - radius_tr, 0, radius_tr, radius_tr, QtGui.QRegion.Rectangle)
region = region.subtracted(corner.subtracted(round))
# bottom right
round = QtGui.QRegion(
width - 2 * radius_br, height - 2 * radius_br, 2 * radius_br, 2 * radius_br, QtGui.QRegion.Ellipse)
corner = QtGui.QRegion(width - radius_br, height - radius_br, radius_br, radius_br, QtGui.QRegion.Rectangle)
region = region.subtracted(corner.subtracted(round))
# bottom left
round = QtGui.QRegion(0, height - 2 * radius_bl, 2 * radius_bl, 2 * radius_br, QtGui.QRegion.Ellipse)
corner = QtGui.QRegion(0, height - radius_bl, radius_bl, radius_bl, QtGui.QRegion.Rectangle)
region = region.subtracted(corner.subtracted(round))
return region
def distance_point_to_line(p, v0, v1):
"""
Returns the distance from the given point to line created by the two given v0 and v1 points
:param p: QPoint
:param v0: QPoint
:param v1: QPoint
:return:
"""
v = QtGui.QVector2D(v1 - v0)
w = QtGui.QVector2D(p - v0)
c1 = QtGui.QVector2D.dotProduct(w, v)
c2 = QtGui.QVector2D.dotProduct(v, v)
b = c1 * 1.0 / c2
pb = v0 + v.toPointF() * b
return QtGui.QVector2D(p - pb).length()
def qhash(inputstr):
instr = ""
if python.is_python2():
if isinstance(inputstr, str):
instr = inputstr
| |
<reponame>cameron-freshworks/ppr<gh_stars>0
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API endpoints for maintaining financing statements and updates to financing statements."""
# pylint: disable=too-many-return-statements
from http import HTTPStatus
from flask import g, jsonify, request, current_app
from flask_restx import Namespace, Resource, cors
from registry_schemas import utils as schema_utils
from ppr_api.exceptions import BusinessException, DatabaseException
from ppr_api.models import AccountBcolId, EventTracking, FinancingStatement, Registration, User, UserExtraRegistration
from ppr_api.models import utils as model_utils
from ppr_api.models.registration_utils import AccountRegistrationParams
from ppr_api.reports import ReportTypes
from ppr_api.resources import utils as resource_utils, financing_utils as fs_utils
from ppr_api.services.authz import authorized, is_sbc_office_account, \
is_staff_account, is_bcol_help, is_all_staff_account
from ppr_api.services.payment.exceptions import SBCPaymentException
from ppr_api.utils.auth import jwt
from ppr_api.utils.util import cors_preflight
from ppr_api.callback.utils.exceptions import ReportDataException
API = Namespace('financing-statements', description='Endpoints for maintaining financing statements and updates.')
@cors_preflight('GET,POST,OPTIONS')
@API.route('', methods=['GET', 'POST', 'OPTIONS'])
class FinancingResource(Resource):
"""Resource for maintaining Financing Statements."""
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get():
"""Get the list of financing statements created by the header account ID."""
try:
# Quick check: must provide an account ID.
account_id = resource_utils.get_account_id(request)
if account_id is None:
return resource_utils.account_required_response()
# Verify request JWT and account ID
if not authorized(account_id, jwt):
return resource_utils.unauthorized_error_response(account_id)
# Try to fetch financing statement list for account ID
try:
statement_list = FinancingStatement.find_all_by_account_id(account_id)
return jsonify(statement_list), HTTPStatus.OK
except Exception as db_exception: # noqa: B902; return nicer default error
return resource_utils.db_exception_response(db_exception, account_id, 'GET financing statements')
except BusinessException as exception:
return resource_utils.business_exception_response(exception)
except Exception as default_exception: # noqa: B902; return nicer default error
return resource_utils.default_exception_response(default_exception)
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def post():
"""Create a new financing statement."""
try:
# Quick check: must be staff or provide an account ID.
account_id = resource_utils.get_account_id(request)
if account_id is None:
return resource_utils.account_required_response()
# Verify request JWT and account ID. BCOL helpdesk is not allowed to submit this request.
if not authorized(account_id, jwt) or is_bcol_help(account_id):
return resource_utils.unauthorized_error_response(account_id)
request_json = request.get_json(silent=True)
# Validate request data against the schema.
valid_format, errors = schema_utils.validate(request_json, 'financingStatement', 'ppr')
extra_validation_msg = resource_utils.validate_financing(request_json)
if not valid_format or extra_validation_msg != '':
return resource_utils.validation_error_response(errors, fs_utils.VAL_ERROR, extra_validation_msg)
# Set up the financing statement registration, pay, and save the data.
statement = fs_utils.pay_and_save_financing(request, request_json, account_id)
response_json = statement.json
if resource_utils.is_pdf(request):
# Return report if request header Accept MIME type is application/pdf.
return fs_utils.get_registration_report(statement.registration[0], response_json,
ReportTypes.FINANCING_STATEMENT_REPORT.value,
jwt.get_token_auth_header(), HTTPStatus.CREATED)
resource_utils.enqueue_registration_report(statement.registration[0], response_json,
ReportTypes.FINANCING_STATEMENT_REPORT.value)
return response_json, HTTPStatus.CREATED
except DatabaseException as db_exception:
return resource_utils.db_exception_response(db_exception, account_id, 'POST financing statement')
except SBCPaymentException as pay_exception:
return resource_utils.pay_exception_response(pay_exception, account_id)
except BusinessException as exception:
return resource_utils.business_exception_response(exception)
except Exception as default_exception: # noqa: B902; return nicer default error
return resource_utils.default_exception_response(default_exception)
@cors_preflight('GET,OPTIONS')
@API.route('/<path:registration_num>', methods=['GET', 'OPTIONS'])
class GetFinancingResource(Resource):
"""Resource to get an individual financing statement by registration number."""
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get(registration_num):
"""Get a financing statement by registration number."""
try:
if registration_num is None:
return resource_utils.path_param_error_response('registration number')
# Quick check: must be staff or provide an account ID.
account_id = resource_utils.get_account_id(request)
if account_id is None:
return resource_utils.account_required_response()
# Verify request JWT and account ID
if not authorized(account_id, jwt):
return resource_utils.unauthorized_error_response(account_id)
# Try to fetch financing statement by registration number
# Not found throws a business exception.
statement = FinancingStatement.find_by_registration_number(registration_num,
account_id,
is_all_staff_account(account_id))
# Extra check account name matches either registering party or a secured party name.
if resource_utils.is_pdf(request):
resource_utils.check_access_financing(jwt.get_token_auth_header(),
is_all_staff_account(account_id), account_id, statement)
# Set to false to exclude change history.
statement.include_changes_json = False
# Set to false as default to generate json with original financing statement data.
current_param = request.args.get(fs_utils.CURRENT_PARAM)
if current_param is None or not isinstance(current_param, (bool, str)):
statement.current_view_json = False
elif isinstance(current_param, str) and current_param.lower() in ['true', '1', 'y', 'yes']:
statement.current_view_json = True
elif isinstance(current_param, str):
statement.current_view_json = False
else:
statement.current_view_json = current_param
if resource_utils.is_pdf(request):
# Return report if request header Accept MIME type is application/pdf.
return fs_utils.get_registration_report(statement.registration[0], statement.json,
ReportTypes.FINANCING_STATEMENT_REPORT.value,
jwt.get_token_auth_header())
return statement.json, HTTPStatus.OK
except BusinessException as exception:
return resource_utils.business_exception_response(exception)
except DatabaseException as db_exception:
return resource_utils.db_exception_response(db_exception, account_id,
'GET financing statement id=' + registration_num)
except Exception as default_exception: # noqa: B902; return nicer default error
return resource_utils.default_exception_response(default_exception)
@cors_preflight('POST,OPTIONS')
@API.route('/<path:registration_num>/amendments', methods=['POST', 'OPTIONS'])
class AmendmentResource(Resource):
"""Resource to register an amendment statement by registration number."""
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def post(registration_num):
"""Amend a financing statement by registration number."""
try:
if registration_num is None:
return resource_utils.path_param_error_response('registration number')
# Quick check: must be staff or provide an account ID.
account_id = resource_utils.get_account_id(request)
if account_id is None:
return resource_utils.account_required_response()
# Verify request JWT and account ID. BCOL helpdesk is not allowed to submit this request.
if not authorized(account_id, jwt) or is_bcol_help(account_id):
return resource_utils.unauthorized_error_response(account_id)
request_json = request.get_json(silent=True)
# Validate request data against the schema.
valid_format, errors = schema_utils.validate(request_json, 'amendmentStatement', 'ppr')
extra_validation_msg = resource_utils.validate_registration(request_json)
if not valid_format or extra_validation_msg != '':
return resource_utils.validation_error_response(errors, fs_utils.VAL_ERROR, extra_validation_msg)
# payload base registration number must match path registration number
if registration_num != request_json['baseRegistrationNumber']:
return resource_utils.path_data_mismatch_error_response(registration_num,
'base registration number',
request_json['baseRegistrationNumber'])
# Fetch base registration information: business exception thrown if not
# found or historical.
statement = FinancingStatement.find_by_registration_number(registration_num, account_id,
is_staff_account(account_id), True)
# Verify base debtor (bypassed for staff)
if not statement.validate_debtor_name(request_json['debtorName'], is_staff_account(account_id)):
return resource_utils.base_debtor_invalid_response()
# Verify delete party and collateral ID's
resource_utils.validate_delete_ids(request_json, statement)
# Set up the registration, pay, and save the data.
registration = fs_utils.pay_and_save(request,
request_json,
model_utils.REG_CLASS_AMEND,
statement,
registration_num,
account_id)
response_json = registration.verification_json('amendmentRegistrationNumber')
# If get to here request was successful, enqueue verification statements for secured parties.
resource_utils.queue_secured_party_verification(registration)
if resource_utils.is_pdf(request):
# Return report if request header Accept MIME type is application/pdf.
return fs_utils.get_registration_report(registration, response_json,
ReportTypes.FINANCING_STATEMENT_REPORT.value,
jwt.get_token_auth_header(), HTTPStatus.CREATED)
resource_utils.enqueue_registration_report(registration, response_json,
ReportTypes.FINANCING_STATEMENT_REPORT.value)
return response_json, HTTPStatus.CREATED
except DatabaseException as db_exception:
return resource_utils.db_exception_response(db_exception, account_id,
'POST Amendment id=' + registration_num)
except SBCPaymentException as pay_exception:
return resource_utils.pay_exception_response(pay_exception, account_id)
except BusinessException as exception:
return resource_utils.business_exception_response(exception)
except Exception as default_exception: # noqa: B902; return nicer default error
return resource_utils.default_exception_response(default_exception)
@cors_preflight('GET,OPTIONS')
@API.route('/<path:registration_num>/amendments/<path:amendment_registration_num>', methods=['GET', 'OPTIONS'])
class GetAmendmentResource(Resource):
"""Resource to get an individual amendment registration statement by registration number."""
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get(registration_num, amendment_registration_num):
"""Get an amendment registration statement by registration number."""
try:
if amendment_registration_num is None:
return resource_utils.path_param_error_response('amendment registration number')
# Quick check: must be staff or provide an account ID.
account_id = resource_utils.get_account_id(request)
if account_id is None:
return resource_utils.account_required_response()
# Verify request JWT and account ID
if not authorized(account_id, jwt):
return resource_utils.unauthorized_error_response(account_id)
# Try to fetch registration statement by registration number
statement = Registration.find_by_registration_number(amendment_registration_num,
account_id,
is_all_staff_account(account_id),
registration_num)
# If requesting a verification statement report, check the account name matches either
# the registering party or a secured party name.
if resource_utils.is_pdf(request):
resource_utils.check_access_registration(jwt.get_token_auth_header(),
is_all_staff_account(account_id), account_id,
statement)
response_json = statement.verification_json('amendmentRegistrationNumber')
if resource_utils.is_pdf(request):
# Return report if request header Accept MIME type is application/pdf.
return fs_utils.get_registration_report(statement, response_json,
ReportTypes.FINANCING_STATEMENT_REPORT.value,
jwt.get_token_auth_header())
return response_json, HTTPStatus.OK
except BusinessException as exception:
return resource_utils.business_exception_response(exception)
except DatabaseException as db_exception:
return resource_utils.db_exception_response(db_exception, account_id,
'GET Amendment id=' + amendment_registration_num)
except Exception as default_exception: # noqa: B902; return nicer default error
return resource_utils.default_exception_response(default_exception)
@cors_preflight('GET,OPTIONS')
@API.route('/<path:registration_num>/changes/<path:change_registration_num>', methods=['GET', 'OPTIONS'])
class GetChangeResource(Resource):
"""Resource to get an individual change registration statement by registration number."""
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get(registration_num, change_registration_num):
"""Get a change registration statement by registration number."""
try:
if change_registration_num is None:
return resource_utils.path_param_error_response('change registration number')
# Quick check: must be staff or provide an account ID.
account_id = resource_utils.get_account_id(request)
if account_id is None:
return resource_utils.account_required_response()
# Verify request JWT and account ID
if not authorized(account_id, jwt):
return resource_utils.unauthorized_error_response(account_id)
# Try to fetch registration statement by registration number
statement = Registration.find_by_registration_number(change_registration_num,
account_id,
is_all_staff_account(account_id),
registration_num)
# If requesting a verification statement report, check the account name matches either
# the registering party or a secured party name.
if resource_utils.is_pdf(request):
resource_utils.check_access_registration(jwt.get_token_auth_header(),
is_all_staff_account(account_id), account_id,
statement)
response_json = statement.verification_json('changeRegistrationNumber')
if resource_utils.is_pdf(request):
# Return report if request header Accept MIME type is application/pdf.
return fs_utils.get_registration_report(statement, response_json,
ReportTypes.FINANCING_STATEMENT_REPORT.value,
jwt.get_token_auth_header())
return response_json, HTTPStatus.OK
except BusinessException as exception:
return resource_utils.business_exception_response(exception)
except DatabaseException as db_exception:
return resource_utils.db_exception_response(db_exception, account_id,
'GET Change id=' + change_registration_num)
except Exception as default_exception: # noqa: B902; return nicer default error
return resource_utils.default_exception_response(default_exception)
@cors_preflight('POST,OPTIONS')
@API.route('/<path:registration_num>/renewals', methods=['POST', | |
is updatable.
type: list
suboptions:
icmp_options:
description:
- "Optional and valid only for ICMP. Use to specify a particular ICMP type and code
as defined in
L(ICMP Parameters,http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml).
If you specify ICMP as the protocol but omit this object, then all ICMP types and
codes are allowed. If you do provide this object, the type is required and the code is optional.
To enable MTU negotiation for ingress internet traffic, make sure to allow type 3 (\\"Destination
Unreachable\\") code 4 (\\"Fragmentation Needed and Don't Fragment was Set\\"). If you need to specify
multiple codes for a single type, create a separate security list rule for each."
type: dict
suboptions:
code:
description:
- The ICMP code (optional).
type: int
type:
description:
- The ICMP type.
type: int
required: true
is_stateless:
description:
- A stateless rule allows traffic in one direction. Remember to add a corresponding
stateless rule in the other direction if you need to support bidirectional traffic. For
example, if ingress traffic allows TCP destination port 80, there should be an egress
rule to allow TCP source port 80. Defaults to false, which means the rule is stateful
and a corresponding rule is not necessary for bidirectional traffic.
type: bool
protocol:
description:
- "The transport protocol. Specify either `all` or an IPv4 protocol number as
defined in
L(Protocol Numbers,http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml).
Options are supported only for ICMP (\\"1\\"), TCP (\\"6\\"), and UDP (\\"17\\")."
type: str
required: true
source:
description:
- Conceptually, this is the range of IP addresses that a packet coming into the instance
can come from.
- "Allowed values:"
- " * IP address range in CIDR notation. For example: `192.168.1.0/24`"
- " * The `cidrBlock` value for a L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/), if you're
setting up a security list rule for traffic coming from a particular `Service` through
a service gateway. For example: `oci-phx-objectstorage`."
type: str
required: true
source_type:
description:
- Type of source for the rule. The default is `CIDR_BLOCK`.
- " * `CIDR_BLOCK`: If the rule's `source` is an IP address range in CIDR notation."
- " * `SERVICE_CIDR_BLOCK`: If the rule's `source` is the `cidrBlock` value for a
L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/) (the rule is for traffic coming from a
particular `Service` through a service gateway)."
type: str
choices:
- "CIDR_BLOCK"
- "SERVICE_CIDR_BLOCK"
tcp_options:
description:
- Optional and valid only for TCP. Use to specify particular destination ports for TCP rules.
If you specify TCP as the protocol but omit this object, then all destination ports are allowed.
type: dict
suboptions:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
udp_options:
description:
- Optional and valid only for UDP. Use to specify particular destination ports for UDP rules.
If you specify UDP as the protocol but omit this object, then all destination ports are allowed.
type: dict
suboptions:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
description:
description:
- An optional description of your choice for the rule.
type: str
vcn_id:
description:
- The OCID of the VCN the security list belongs to.
- Required for create using I(state=present).
type: str
security_list_id:
description:
- The OCID of the security list.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
purge_security_rules:
description:
- Purge security rules from security list which are not present in the provided group security list.
If I(purge_security_rules=no), provided security rules would be appended to existing security
rules. I(purge_security_rules) and I(delete_security_rules) are mutually exclusive.
- This parameter is updatable.
type: bool
default: "true"
delete_security_rules:
description:
- Delete security rules from existing security list which are present in the
security rules provided by I(ingress_security_rules) and/or I(egress_security_rules).
If I(delete_security_rules=yes), security rules provided by I(ingress_security_rules)
and/or I(egress_security_rules) would be deleted to existing security list, if they
are part of existing security list. If they are not part of existing security list,
they will be ignored. I(purge_security_rules) and I(delete_security_rules) are mutually
exclusive.
- This parameter is updatable.
type: bool
default: "false"
state:
description:
- The state of the SecurityList.
- Use I(state=present) to create or update a SecurityList.
- Use I(state=absent) to delete a SecurityList.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create security_list
oci_network_security_list:
vcn_id: ocid1.vcn.oc1.phx.unique_ID
display_name: MyPrivateSubnetSecurityList
ingress_security_rules:
- protocol: 6
source: 10.0.1.0/24
tcp_options:
destination_port_range:
min: 1521
max: 1521
- protocol: 6
source: 10.0.2.0/24
tcp_options:
destination_port_range:
min: 1521
max: 1521
egress_security_rules:
- protocol: 6
destination: 10.0.2.0/24
tcp_options:
destination_port_range:
min: 1521
max: 1521
compartment_id: ocid1.compartment.oc1..unique_ID
- name: Update security_list using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_network_security_list:
compartment_id: ocid1.compartment.oc1..unique_ID
defined_tags: {'Operations': {'CostCenter': 'US'}}
display_name: MyPrivateSubnetSecurityList
egress_security_rules:
- destination: 10.0.2.0/24
protocol: 6
freeform_tags: {'Department': 'Finance'}
ingress_security_rules:
- protocol: 6
source: 10.0.1.0/24
purge_security_rules: false
delete_security_rules: true
- name: Update security_list
oci_network_security_list:
defined_tags: {'Operations': {'CostCenter': 'US'}}
display_name: MyPrivateSubnetSecurityList
security_list_id: ocid1.securitylist.oc1..xxxxxxEXAMPLExxxxxx
- name: Delete security_list
oci_network_security_list:
security_list_id: ocid1.securitylist.oc1..xxxxxxEXAMPLExxxxxx
state: absent
- name: Delete security_list using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_network_security_list:
compartment_id: ocid1.compartment.oc1..unique_ID
display_name: MyPrivateSubnetSecurityList
state: absent
"""
RETURN = """
security_list:
description:
- Details of the SecurityList resource acted upon by the current operation
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment containing the security list.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
returned: on success
type: string
sample: display_name_example
egress_security_rules:
description:
- Rules for allowing egress IP packets.
returned: on success
type: complex
| |
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _parse_directive(self) -> FortielNode:
"""Parse a directive."""
# Parse directive head and proceed to the specific parse function.
directive = self._matches_line(_FORTIEL_DIRECTIVE)['directive']
head = type(self)._parse_head(directive)
if head is None:
message = 'empty directive'
raise FortielSyntaxError(message, self._file_path, self._line_number)
if (func := {'use': self._parse_use_directive,
'let': self._parse_let_directive,
'define': self._parse_define_directive,
'del': self._parse_del_directive,
'if': self._parse_if_directive,
'ifdef': self._parse_ifdef_directive,
'ifndef': self._parse_ifndef_directive,
'do': self._parse_do_directive,
'for': self._parse_for_directive,
'macro': self._parse_macro_directive}.get(head)) is not None:
return func()
# Determine the error type:
# either the known directive is misplaced, either the directive is unknown.
if head in map(_make_name, {'else', 'else if', 'end if', 'end do',
'section', 'finally', 'pattern', 'end macro'}):
message = f'misplaced directive <{head}>'
raise FortielSyntaxError(message, self._file_path, self._line_number)
message = f'unknown or mistyped directive <{head}>'
raise FortielSyntaxError(message, self._file_path, self._line_number)
@staticmethod
def _parse_head(directive: Optional[str]) -> Optional[str]:
# Empty directives does not have a head.
if directive is None or directive == '':
return None
# ELSE is merged with IF, END is merged with any following word.
head_words = directive.split(' ', 2)
head = head_words[0].lower()
if len(head_words) > 1:
second_word = head_words[1].lower()
if head == 'end' or (head == 'else' and second_word == 'if'):
head += second_word
return head
def _matches_directive(self, *expected_heads: str) -> Optional[str]:
match = self._matches_line(_FORTIEL_DIRECTIVE)
if match is not None:
directive = match['directive'].lower()
head = type(self)._parse_head(directive)
if head in map(_make_name, expected_heads):
return head
return None
def _match_directive_syntax(
self, pattern: Pattern[str], *groups: str) -> Union[str, Tuple[str, ...]]:
directive = self._matches_line(_FORTIEL_DIRECTIVE)['directive'].rstrip()
if (match := pattern.match(directive)) is None:
head = type(self)._parse_head(directive)
message = f'invalid <{head}> directive syntax'
raise FortielSyntaxError(message, self._file_path, self._line_number)
self._advance_line()
return match.group(*groups)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _parse_use_directive(self) -> FortielUseNode:
"""Parse USE directive."""
node = FortielUseNode(
self._file_path, self._line_number,
self._match_directive_syntax(_FORTIEL_USE, 'path'))
# Remove quotes.
node.imported_file_path = node.imported_file_path[1:-1]
return node
def _parse_let_directive(self) -> FortielLetNode:
"""Parse LET directive."""
# Note that we are not evaluating or
# validating define arguments and body here.
node = FortielLetNode(
self._file_path, self._line_number,
*self._match_directive_syntax(_FORTIEL_LET, 'name', 'arguments', 'value_expression'))
if is_reserved(node.name):
message = f'name `{node.name}` is a reserved word'
raise FortielSyntaxError(message, node.file_path, node.line_number)
# Split and verify arguments.
if node.arguments is not None:
node.arguments = list(map(
(lambda arg: re.sub(r'\s', '', arg)), node.arguments.split(',')))
naked_arguments = map((lambda arg: arg.replace('*', '')), node.arguments)
if (dup := _find_duplicate(naked_arguments)) is not None:
message = f'duplicate argument `{dup}` of the functional <let>'
raise FortielSyntaxError(message, node.file_path, node.line_number)
if len(bad_arguments := list(filter(is_reserved, naked_arguments))) != 0:
message = f'<let> arguments `{"`, `".join(bad_arguments)}` are reserved words'
raise FortielSyntaxError(message, node.file_path, node.line_number)
return node
def _parse_define_directive(self) -> FortielLetNode:
"""Parse DEFINE directive."""
# Note that we are not evaluating or validating define segment here.
name, segment = \
self._match_directive_syntax(_FORTIEL_DEFINE, 'name', 'segment')
node = FortielLetNode(
self._file_path, self._line_number,
name, arguments=None, value_expression=f"'{segment}'")
if is_reserved(node.name):
message = f'name `{node.name}` is a reserved word'
raise FortielSyntaxError(message, node.file_path, node.line_number)
return node
def _parse_del_directive(self) -> FortielDelNode:
"""Parse DEL directive."""
# Note that we are not evaluating or validating define name here.
node = FortielDelNode(
self._file_path, self._line_number,
self._match_directive_syntax(_FORTIEL_DEL, 'names'))
# Split names.
node.names = list(map(str.strip, node.names.split(',')))
return node
def _parse_if_directive(self) -> FortielIfNode:
"""Parse IF/ELSE IF/ELSE/END IF directive."""
# Note that we are not evaluating or validating condition expressions here.
node = FortielIfNode(
self._file_path, self._line_number,
self._match_directive_syntax(_FORTIEL_IF, 'condition_expression'))
while not self._matches_directive('else if', 'else', 'end if'):
node.then_nodes.append(self._parse_statement())
if self._matches_directive('else if'):
while not self._matches_directive('else', 'end if'):
elif_node = FortielElifNode(
self._file_path, self._line_number,
self._match_directive_syntax(_FORTIEL_ELIF, 'condition_expression'))
while not self._matches_directive('else if', 'else', 'end if'):
elif_node.then_nodes.append(self._parse_statement())
node.elif_nodes.append(elif_node)
if self._matches_directive('else'):
self._match_directive_syntax(_FORTIEL_ELSE)
while not self._matches_directive('end if'):
node.else_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_IF)
return node
def _parse_ifdef_directive(self) -> FortielIfNode:
"""Parse IFDEF/ELSE/END IF directive."""
node = FortielIfNode(
self._file_path, self._line_number,
f'defined("{self._match_directive_syntax(_FORTIEL_IFDEF, "name")}")')
while not self._matches_directive('else', 'end if'):
node.then_nodes.append(self._parse_statement())
if self._matches_directive('else'):
self._match_directive_syntax(_FORTIEL_ELSE)
while not self._matches_directive('end if'):
node.else_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_IF)
return node
def _parse_ifndef_directive(self) -> FortielIfNode:
"""Parse IFNDEF/ELSE/END IF directive."""
node = FortielIfNode(
self._file_path, self._line_number,
f'not defined("{self._match_directive_syntax(_FORTIEL_IFNDEF, "name")}")')
while not self._matches_directive('else', 'end if'):
node.then_nodes.append(self._parse_statement())
if self._matches_directive('else'):
self._match_directive_syntax(_FORTIEL_ELSE)
while not self._matches_directive('end if'):
node.else_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_IF)
return node
def _parse_do_directive(self) -> FortielDoNode:
"""Parse DO/END DO directive."""
# Note that we are not evaluating or validating loop bound expression here.
node = FortielDoNode(
self._file_path, self._line_number,
*self._match_directive_syntax(_FORTIEL_DO, 'index_name', 'ranges_expression'))
if is_reserved(node.index_name):
message = f'<do> loop index name `{node.index_name}` is a reserved word'
raise FortielSyntaxError(message, node.file_path, node.line_number)
while not self._matches_directive('end do'):
node.loop_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_DO)
return node
def _parse_for_directive(self) -> FortielForNode:
"""Parse FOR/END FOR directive."""
# Note that we are not evaluating or validating loop expressions here.
node = FortielForNode(
self._file_path, self._line_number,
*self._match_directive_syntax(_FORTIEL_FOR, 'index_names', 'iterable_expression'))
node.index_names = list(map(str.strip, node.index_names.split(',')))
if len(bad_names := list(filter(is_reserved, node.index_names))) != 0:
message = f'<for> loop index names `{"`, `".join(bad_names)}` are reserved words'
raise FortielSyntaxError(message, node.file_path, node.line_number)
while not self._matches_directive('end for'):
node.loop_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_FOR)
return node
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _parse_call_segment(self) -> FortielCallSegmentNode:
"""Parse call segment."""
# Call directive uses different syntax, so it cannot be parsed with common routines.
if (match := self._matches_line(_FORTIEL_CALL)) is None:
message = 'invalid call segment syntax'
raise FortielSyntaxError(message, self._file_path, self._line_number)
# Note that we are not evaluating or matching call arguments and sections here.
node = FortielCallSegmentNode(
self._file_path, self._line_number, *match.group('spaces', 'name', 'argument'))
node.name = _make_name(node.name)
node.argument = node.argument.strip()
self._advance_line()
return node
def _parse_macro_directive(self) -> FortielMacroNode:
"""Parse MACRO/END MACRO directive."""
node = FortielMacroNode(
self._file_path, self._line_number,
(match := self._match_directive_syntax(_FORTIEL_MACRO, 'name', 'pattern'))[0])
node.name = _make_name(node.name)
node.pattern_nodes = self._parse_pattern_directives_list(node, pattern=match[1])
if self._matches_directive('section'):
while not self._matches_directive('finally', 'end macro'):
section_node = FortielSectionNode(
self._file_path, self._line_number,
*(match := self._match_directive_syntax(
_FORTIEL_SECTION, 'name', 'once', 'pattern'))[0:2])
section_node.name = _make_name(section_node.name)
section_node.once = section_node.once is not None
section_node.pattern_nodes = \
self._parse_pattern_directives_list(section_node, pattern=match[2])
node.section_nodes.append(section_node)
if self._matches_directive('finally'):
self._match_directive_syntax(_FORTIEL_FINALLY)
while not self._matches_directive('end macro'):
node.finally_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_MACRO)
return node
def _parse_pattern_directives_list(
self, node: Union[FortielMacroNode, FortielSectionNode],
pattern: Optional[str]) -> List[FortielPatternNode]:
"""Parse PATTERN directive list."""
pattern_nodes: List[FortielPatternNode] = []
if pattern is not None:
pattern_node = FortielPatternNode(node.file_path, node.line_number, pattern)
while not self._matches_directive('pattern', 'section', 'finally', 'end macro'):
pattern_node.match_nodes.append(self._parse_statement())
pattern_nodes.append(pattern_node)
elif not self._matches_directive('pattern'):
message = 'expected <pattern> directive'
raise FortielSyntaxError(message, self._file_path, self._line_number)
if self._matches_directive('pattern'):
while not self._matches_directive('section', 'finally', 'end macro'):
pattern_node = FortielPatternNode(
self._file_path, self._line_number,
self._match_directive_syntax(_FORTIEL_PATTERN, 'pattern'))
while not self._matches_directive('pattern', 'section', 'finally', 'end macro'):
pattern_node.match_nodes.append(self._parse_statement())
pattern_nodes.append(pattern_node)
# Compile the patterns.
for pattern_node in pattern_nodes:
try:
pattern_node.pattern = _compile_re(pattern_node.pattern)
except re.error as error:
message = f'invalid pattern regular expression `{pattern_node.pattern}`'
raise FortielSyntaxError(
message, pattern_node.file_path, pattern_node.line_number) from error
return pattern_nodes
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-= Fortiel Directives Executor =-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
FortielPrintFunc = Callable[[str], None]
_FORTIEL_INLINE_EVAL: Final = _compile_re(r'\${(?P<expression>.+?)}\$', True)
_FORTIEL_INLINE_SHORT_EVAL: Final = _compile_re(r'[$@](?P<expression>\w+)\b', True)
_FORTIEL_INLINE_SHORT_LOOP: Final = _compile_re(r'''
(?P<comma_before>,\s*)?
[\^@](?P<expression>:|\w+) (?P<comma_after>\s*,)?''', True)
_FORTIEL_INLINE_LOOP: Final = _compile_re(r'''
(?P<comma_before>,\s*)?
[\^@]{ (?P<expression>.*?) ([\^@]\|[\^@] (?P<ranges_expression>.*?) )? }[\^@]
(?P<comma_after>\s*,)?''', True)
_FORTIEL_CMDARG_DEFINE: Final = _compile_re(r'(?P<name>\w+)(?:\s*=\s*(?P<value>.*))')
# TODO: implement builtins correctly.
_FORTIEL_BUILTINS_NAMES = [
'__INDEX__', '__FILE__', '__LINE__', '__DATE__', '__TIME__']
class FortielExecutor:
"""Fortiel syntax tree executor."""
def __init__(self, options: FortielOptions):
self._scope: Dict[str, Any] = {}
self._macros: Dict[str, FortielMacroNode] = {}
self._imported_files_paths: Set[str] = set()
self._options: FortielOptions = options
self._scope['defined'] = self._defined
for define in self._options.defines:
define_name, define_value = \
_FORTIEL_CMDARG_DEFINE.match(define).group('name', 'value')
define_value = self._evaluate_expression(define_value, '<shell>', 1)
self._scope[define_name] = define_value
def _defined(self, name: str) -> bool:
return name in self._scope
@property
def _loop_index(self) -> Optional[int]:
return self._scope.get('__LOOP_INDEX__')
@_loop_index.setter
def _loop_index(self, index: Optional[int]) -> None:
self._scope['__LOOP_INDEX__'] = index
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _evaluate_expression(self, expression: str, file_path: str, line_number: int) -> Any:
"""Evaluate Python expression."""
try:
# TODO: when we should correctly remove the line continuations?
expression = expression.replace('&\n', '\n')
self._scope.update(__FILE__=file_path, __LINE__=line_number)
value = eval(expression, self._scope)
return value
except Exception as error:
error_text = str(error)
error_text = error_text.replace('<head>', f'expression `{expression}`')
error_text = error_text.replace('<string>', f'expression `{expression}`')
message = f'Python expression evaluation error: {error_text}'
raise FortielRuntimeError(message, file_path, line_number) from error
def _evaluate_ranges_expression(
self, expression: str, file_path: str, line_number: int) -> range:
"""Evaluate Python ranges expression"""
ranges = self._evaluate_expression(expression, file_path, line_number)
if not (isinstance(ranges, tuple) and (2 <= len(ranges) <= 3) and
list(map(type, ranges)) == len(ranges) * [int]):
message = \
'tuple of two or three integers inside the <do> ' + \
f'directive ranges is expected, got `{expression}`'
raise FortielRuntimeError(message, file_path, line_number)
(start, stop), step = ranges[0:2], (ranges[2] if len(ranges) == 3 else 1)
return range(start, stop + step, step)
def _evaluate_line(self, line: str, file_path: str, line_number: int) -> | |
in featureList:
testData.append(np.array(data_dict[data])[:100])
testData = np.array(testData)
predict(model,testData)
init_DataDict()
# for _ in range(100):
# data_dict[data].popleft()
if(self.current_option == self.button_rows-1):
self.current_option = -1
self.next_option()
class MessageSentPage(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cols = 1
self.rows = 3
self.button_rows = 1
self.current_option = 0
self.current_screen = False
self.text1 = "Mesaj: "
self.text2 = "Kime: "
self.label1 = Button(text=self.text1,background_normal="buttons/e_button.png", font_size=35)
self.add_widget(self.label1)
self.label2 = Button(text=self.text2,background_normal="buttons/e_button.png", font_size=35)
self.add_widget(self.label2)
self.button1 = Button(text="Ana Menü",background_normal="buttons/home_button.png", font_size=35)
self.button1.bind(on_press=self.main_menu_button)
self.add_widget(self.button1)
Clock.schedule_interval(lambda dt: self.callback_f(), GLOBAL_TIMER_VALUE)
#self.update_texts(self.text1, self.text2)
def main_menu_button(self, instances):
self.set_current_screen(False)
main_app.screen_manager.current = "Main"
main_app.main_page.set_current_screen(True)
"""
def next_option(self):
if(self.current_option < self.button_rows-1):
self.current_option += 1
self.update_texts()
def previous_option(self):
if(self.current_option > 0):
self.current_option -= 1
self.update_texts()
"""
def update_texts(self, new_text1, new_text2):
print(new_text1, new_text2)
self.label1.text = self.text1 + new_text1
self.label2.text = self.text2 + new_text2 + "\n\nGönderildi"
if(self.current_option == 0):
self.button1.background_color = COLOR_CHOOSEN
else:
self.button1.background_color = COLOR_OTHERS
def choose_current_option(self):
if(self.current_option == 0):
self.main_menu_button(1)
else:
print("ERROR - current_option = ", self.current_option)
def set_current_screen(self, status):
self.current_screen = status
def callback_f(self):
if(self.current_screen):
read_data()
if len(data_dict['rawValue']) > 100:
blink = find_peak(np.array(data_dict['rawValue'])[:100])
if blink == 1:
self.main_menu_button(1)
testData = []
for data in featureList:
testData.append(np.array(data_dict[data])[:100])
testData = np.array(testData)
predict(model,testData)
init_DataDict()
# for _ in range(100):
# data_dict[data].popleft()
class KeyboardPage(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cols = 1
self.rows = 2
self.button_rows = 1
self.label_text = ""
self.current_row = 0
self.current_col = 4
self.selecting_cols = True
self.current_screen = False
self.label1 = Label(text=self.label_text, size_hint_y=None, height=100)
self.add_widget(self.label1)
self.key_layout = GridLayout(rows=5, cols=5)
self.add_widget(self.key_layout)
self.button11 = Button(text="A")
self.button11.bind(on_press=self.button11_f)
self.key_layout.add_widget(self.button11)
self.button12 = Button(text="B")
self.button12.bind(on_press=self.button12_f)
self.key_layout.add_widget(self.button12)
self.button13 = Button(text="C")
self.button13.bind(on_press=self.button13_f)
self.key_layout.add_widget(self.button13)
self.button14 = Button(text="D")
self.button14.bind(on_press=self.button14_f)
self.key_layout.add_widget(self.button14)
self.button15 = Button(text="E")
self.button15.bind(on_press=self.button15_f)
self.key_layout.add_widget(self.button15)
self.button21 = Button(text="F")
self.button21.bind(on_press=self.button21_f)
self.key_layout.add_widget(self.button21)
self.button22 = Button(text="G")
self.button22.bind(on_press=self.button22_f)
self.key_layout.add_widget(self.button22)
self.button23 = Button(text="H")
self.button23.bind(on_press=self.button23_f)
self.key_layout.add_widget(self.button23)
self.button24 = Button(text="I")
self.button24.bind(on_press=self.button24_f)
self.key_layout.add_widget(self.button24)
self.button25 = Button(text="J")
self.button25.bind(on_press=self.button25_f)
self.key_layout.add_widget(self.button25)
self.button31 = Button(text="K")
self.button31.bind(on_press=self.button31_f)
self.key_layout.add_widget(self.button31)
self.button32 = Button(text="L")
self.button32.bind(on_press=self.button32_f)
self.key_layout.add_widget(self.button32)
self.button33 = Button(text="M")
self.button33.bind(on_press=self.button33_f)
self.key_layout.add_widget(self.button33)
self.button34 = Button(text="N")
self.button34.bind(on_press=self.button34_f)
self.key_layout.add_widget(self.button34)
self.button35 = Button(text="O")
self.button35.bind(on_press=self.button35_f)
self.key_layout.add_widget(self.button35)
self.button41 = Button(text="P")
self.button41.bind(on_press=self.button41_f)
self.key_layout.add_widget(self.button41)
self.button42 = Button(text="R")
self.button42.bind(on_press=self.button42_f)
self.key_layout.add_widget(self.button42)
self.button43 = Button(text="S")
self.button43.bind(on_press=self.button43_f)
self.key_layout.add_widget(self.button43)
self.button44 = Button(text="T")
self.button44.bind(on_press=self.button44_f)
self.key_layout.add_widget(self.button44)
self.button45 = Button(text="U")
self.button45.bind(on_press=self.button45_f)
self.key_layout.add_widget(self.button45)
self.button51 = Button(text="V")
self.button51.bind(on_press=self.button51_f)
self.key_layout.add_widget(self.button51)
self.button52 = Button(text="Y")
self.button52.bind(on_press=self.button52_f)
self.key_layout.add_widget(self.button52)
self.button53 = Button(text="Z")
self.button53.bind(on_press=self.button53_f)
self.key_layout.add_widget(self.button53)
self.button54 = Button(text="[Space]")
self.button54.bind(on_press=self.button54_f)
self.key_layout.add_widget(self.button54)
self.button55 = Button(text="Ana Menü")
self.button55.bind(on_press=self.button55_f)
self.key_layout.add_widget(self.button55)
Clock.schedule_interval(lambda dt: self.callback_f(), GLOBAL_TIMER_VALUE)
def change_selecting_option(self):
if(self.selecting_cols):
self.selecting_cols = False
self.current_row = 4
else:
self.selecting_cols = True
self.current_row = 4
self.current_col = 4
def button11_f(self, instances):
if not self.selecting_cols:
self.label_text += "A"
self.label1.text = self.label_text
self.change_selecting_option()
def button12_f(self, instances):
if not self.selecting_cols:
self.label_text += "B"
self.label1.text = self.label_text
self.change_selecting_option()
def button13_f(self, instances):
if not self.selecting_cols:
self.label_text += "C"
self.label1.text = self.label_text
self.change_selecting_option()
def button14_f(self, instances):
if not self.selecting_cols:
self.label_text += "D"
self.label1.text = self.label_text
self.change_selecting_option()
def button15_f(self, instances):
if not self.selecting_cols:
self.label_text += "E"
self.label1.text = self.label_text
self.change_selecting_option()
def button21_f(self, instances):
if not self.selecting_cols:
self.label_text += "F"
self.label1.text = self.label_text
self.change_selecting_option()
def button22_f(self, instances):
if not self.selecting_cols:
self.label_text += "G"
self.label1.text = self.label_text
self.change_selecting_option()
def button23_f(self, instances):
if not self.selecting_cols:
self.label_text += "H"
self.label1.text = self.label_text
self.change_selecting_option()
def button24_f(self, instances):
if not self.selecting_cols:
self.label_text += "I"
self.label1.text = self.label_text
self.change_selecting_option()
def button25_f(self, instances):
if not self.selecting_cols:
self.label_text += "J"
self.label1.text = self.label_text
self.change_selecting_option()
def button31_f(self, instances):
if not self.selecting_cols:
self.label_text += "K"
self.label1.text = self.label_text
self.change_selecting_option()
def button32_f(self, instances):
if not self.selecting_cols:
self.label_text += "L"
self.label1.text = self.label_text
self.change_selecting_option()
def button33_f(self, instances):
if not self.selecting_cols:
self.label_text += "M"
self.label1.text = self.label_text
self.change_selecting_option()
def button34_f(self, instances):
if not self.selecting_cols:
self.label_text += "N"
self.label1.text = self.label_text
self.change_selecting_option()
def button35_f(self, instances):
if not self.selecting_cols:
self.label_text += "O"
self.label1.text = self.label_text
self.change_selecting_option()
def button41_f(self, instances):
if not self.selecting_cols:
self.label_text += "P"
self.label1.text = self.label_text
self.change_selecting_option()
def button42_f(self, instances):
if not self.selecting_cols:
self.label_text += "R"
self.label1.text = self.label_text
self.change_selecting_option()
def button43_f(self, instances):
if not self.selecting_cols:
self.label_text += "S"
self.label1.text = self.label_text
self.change_selecting_option()
def button44_f(self, instances):
if not self.selecting_cols:
self.label_text += "T"
self.label1.text = self.label_text
self.change_selecting_option()
def button45_f(self, instances):
if not self.selecting_cols:
self.label_text += "U"
self.label1.text = self.label_text
self.change_selecting_option()
def button51_f(self, instances):
if not self.selecting_cols:
self.label_text += "V"
self.label1.text = self.label_text
self.change_selecting_option()
def button52_f(self, instances):
if not self.selecting_cols:
self.label_text += "Y"
self.label1.text = self.label_text
self.change_selecting_option()
def button53_f(self, instances):
if not self.selecting_cols:
self.label_text += "Z"
self.label1.text = self.label_text
self.change_selecting_option()
def button54_f(self, instances):
if not self.selecting_cols:
self.label_text += " "
self.label1.text = self.label_text
self.change_selecting_option()
def button55_f(self, instances):
if not self.selecting_cols:
self.selecting_cols = True
self.set_current_screen(False)
main_app.screen_manager.current = "Main"
main_app.main_page.set_current_screen(True)
self.label_text = ""
self.change_selecting_option()
def main_menu_button(self, instances):
if not self.selecting_cols:
self.selecting_cols = True
self.set_current_screen(False)
main_app.screen_manager.current = "Main"
main_app.main_page.set_current_screen(True)
self.label_text = ""
self.change_selecting_option()
def choose_current_option(self):
if(self.current_row == 0):
if(self.current_col == 0):
self.button11_f(1)
elif(self.current_col == 1):
self.button12_f(1)
elif(self.current_col == 2):
self.button13_f(1)
elif(self.current_col == 3):
self.button14_f(1)
elif(self.current_col == 4):
self.button15_f(1)
else:
print("HHHHHAAAAATTTTTAAAA", self.current_row, self.current_col)
elif(self.current_row == 1):
if(self.current_col == 0):
self.button21_f(1)
elif(self.current_col == 1):
self.button22_f(1)
elif(self.current_col == 2):
self.button23_f(1)
elif(self.current_col == 3):
self.button24_f(1)
elif(self.current_col == 4):
self.button25_f(1)
else:
print("HHHHHAAAAATTTTTAAAA", self.current_row, self.current_col)
elif(self.current_row == 2):
if(self.current_col == 0):
self.button31_f(1)
elif(self.current_col == 1):
self.button32_f(1)
elif(self.current_col == 2):
self.button33_f(1)
elif(self.current_col == 3):
self.button34_f(1)
elif(self.current_col == 4):
self.button35_f(1)
else:
print("HHHHHAAAAATTTTTAAAA", self.current_row, self.current_col)
elif(self.current_row == 3):
if(self.current_col == 0):
self.button41_f(1)
elif(self.current_col == 1):
self.button42_f(1)
elif(self.current_col == 2):
self.button43_f(1)
elif(self.current_col == 3):
self.button44_f(1)
elif(self.current_col == 4):
self.button45_f(1)
else:
print("HHHHHAAAAATTTTTAAAA", self.current_row, self.current_col)
elif(self.current_row == 4):
if(self.current_col == 0):
self.button51_f(1)
elif(self.current_col == 1):
self.button52_f(1)
elif(self.current_col == 2):
self.button53_f(1)
elif(self.current_col == 3):
self.button54_f(1)
elif(self.current_col == 4):
self.button55_f(1)
else:
print("HHHHHAAAAATTTTTAAAA", self.current_row, self.current_col)
else:
print("HHHHHAAAAATTTTTAAAA", self.current_row, self.current_col)
def update_for_cols(self):
if(self.current_col == 0):
self.button11.background_color = COLOR_CHOOSEN
self.button21.background_color = COLOR_CHOOSEN
self.button31.background_color = COLOR_CHOOSEN
self.button41.background_color = COLOR_CHOOSEN
self.button51.background_color = COLOR_CHOOSEN
self.button12.background_color = COLOR_OTHERS
self.button22.background_color = COLOR_OTHERS
self.button32.background_color = COLOR_OTHERS
self.button42.background_color = COLOR_OTHERS
self.button52.background_color = COLOR_OTHERS
self.button13.background_color = COLOR_OTHERS
self.button23.background_color = COLOR_OTHERS
self.button33.background_color = COLOR_OTHERS
self.button43.background_color = COLOR_OTHERS
self.button53.background_color = COLOR_OTHERS
self.button14.background_color = COLOR_OTHERS
self.button24.background_color = COLOR_OTHERS
self.button34.background_color = COLOR_OTHERS
self.button44.background_color = COLOR_OTHERS
self.button54.background_color = COLOR_OTHERS
self.button15.background_color = COLOR_OTHERS
self.button25.background_color = COLOR_OTHERS
self.button35.background_color = COLOR_OTHERS
self.button45.background_color = COLOR_OTHERS
self.button55.background_color = COLOR_OTHERS
elif(self.current_col == 1):
self.button12.background_color = COLOR_CHOOSEN
self.button22.background_color = COLOR_CHOOSEN
self.button32.background_color = COLOR_CHOOSEN
self.button42.background_color = COLOR_CHOOSEN
self.button52.background_color = COLOR_CHOOSEN
self.button11.background_color = COLOR_OTHERS
self.button21.background_color = COLOR_OTHERS
self.button31.background_color = COLOR_OTHERS
self.button41.background_color = COLOR_OTHERS
self.button51.background_color = COLOR_OTHERS
self.button13.background_color = COLOR_OTHERS
self.button23.background_color = COLOR_OTHERS
self.button33.background_color = COLOR_OTHERS
self.button43.background_color = COLOR_OTHERS
self.button53.background_color = COLOR_OTHERS
self.button14.background_color = COLOR_OTHERS
self.button24.background_color = COLOR_OTHERS
self.button34.background_color = COLOR_OTHERS
self.button44.background_color = COLOR_OTHERS
self.button54.background_color = COLOR_OTHERS
self.button15.background_color = COLOR_OTHERS
self.button25.background_color = COLOR_OTHERS
self.button35.background_color = COLOR_OTHERS
self.button45.background_color = COLOR_OTHERS
self.button55.background_color = COLOR_OTHERS
elif(self.current_col == 2):
self.button13.background_color = COLOR_CHOOSEN
self.button23.background_color = COLOR_CHOOSEN
self.button33.background_color = COLOR_CHOOSEN
self.button43.background_color = COLOR_CHOOSEN
self.button53.background_color = COLOR_CHOOSEN
self.button11.background_color = COLOR_OTHERS
self.button21.background_color = COLOR_OTHERS
self.button31.background_color = COLOR_OTHERS
self.button41.background_color = COLOR_OTHERS
self.button51.background_color = COLOR_OTHERS
self.button12.background_color = COLOR_OTHERS
self.button22.background_color = COLOR_OTHERS
self.button32.background_color = COLOR_OTHERS
self.button42.background_color = COLOR_OTHERS
self.button52.background_color = COLOR_OTHERS
self.button14.background_color = COLOR_OTHERS
self.button24.background_color = COLOR_OTHERS
self.button34.background_color = COLOR_OTHERS
self.button44.background_color = COLOR_OTHERS
self.button54.background_color = COLOR_OTHERS
self.button15.background_color = COLOR_OTHERS
self.button25.background_color = COLOR_OTHERS
self.button35.background_color = COLOR_OTHERS
self.button45.background_color = COLOR_OTHERS
self.button55.background_color = COLOR_OTHERS
elif(self.current_col == 3):
self.button14.background_color = COLOR_CHOOSEN
self.button24.background_color = COLOR_CHOOSEN
self.button34.background_color = COLOR_CHOOSEN
self.button44.background_color = COLOR_CHOOSEN
self.button54.background_color = COLOR_CHOOSEN
self.button11.background_color = COLOR_OTHERS
self.button21.background_color = COLOR_OTHERS
self.button31.background_color = COLOR_OTHERS
self.button41.background_color = COLOR_OTHERS
self.button51.background_color = COLOR_OTHERS
self.button12.background_color = COLOR_OTHERS
self.button22.background_color = COLOR_OTHERS
self.button32.background_color = COLOR_OTHERS
self.button42.background_color = COLOR_OTHERS
self.button52.background_color = COLOR_OTHERS
self.button13.background_color = COLOR_OTHERS
self.button23.background_color = COLOR_OTHERS
self.button33.background_color = COLOR_OTHERS
self.button43.background_color = COLOR_OTHERS
self.button53.background_color = COLOR_OTHERS
self.button15.background_color = COLOR_OTHERS
self.button25.background_color = COLOR_OTHERS
self.button35.background_color = COLOR_OTHERS
self.button45.background_color = COLOR_OTHERS
self.button55.background_color = COLOR_OTHERS
elif(self.current_col == 4):
self.button15.background_color = COLOR_CHOOSEN
self.button25.background_color = COLOR_CHOOSEN
self.button35.background_color = COLOR_CHOOSEN
self.button45.background_color = COLOR_CHOOSEN
self.button55.background_color = COLOR_CHOOSEN
self.button11.background_color = COLOR_OTHERS
self.button21.background_color = COLOR_OTHERS
self.button31.background_color = COLOR_OTHERS
self.button41.background_color = COLOR_OTHERS
self.button51.background_color = COLOR_OTHERS
self.button12.background_color = COLOR_OTHERS
self.button22.background_color = COLOR_OTHERS
self.button32.background_color = COLOR_OTHERS
self.button42.background_color = COLOR_OTHERS
self.button52.background_color = COLOR_OTHERS
self.button13.background_color = COLOR_OTHERS
self.button23.background_color | |
#!/usr/bin/python3
# run time halt 20,24 sec
# PiJuice sw 1.4 is installed for Python 3
# sudo find / -name "pijuice.py" /usr/lib/python3.5/dist-packages/pijuice.py
#https://feeding.cloud.geek.nz/posts/time-synchronization-with-ntp-and-systemd/
# on systemd apt-get purge ntp to use only systemd-timesyncd.service
# edit /etc/systemd/timesyncd.conf
# systemctl restart systemd-timesyncd.service
# timedatectl status
#to enable NTP synchronized
# timedatectl set-ntp true
#The system is configured to read the RTC time in the local time zone.
#This mode can not be fully supported. It will create various problems
#with time zone changes and daylight saving time adjustments. The RTC
#time is never updated, it relies on external facilities to maintain it.
#If at all possible, use RTC in UTC by calling
#'timedatectl set-local-rtc 0'.
# timedatectl set-local-rtc 0
# to set date
#sudo date -s 16:31
# read hwclock
# sudo hwclock -r
# set hwclock with system time
# sudo hwclock -w --systohc same time, not utc vs local
# set hwclock with date
# sudo hwclock --set --date --localtime "1/4/2013 23:10:45" 4 jan
#set system time from hwclock
# sudo hwclock -s --hctosys
# use --debug
# use --utc or --localtime when setting hwclock
"""
# juice internal led 2
juice ok. blink blue 2x. else solid red
ntp ok. blink green 1x, 100ms, intensity 50 . else use hwclock blink red
pic sent. blink green 2x, 100ms, intensity 200 else fails blink red . else nigth blink blue
halt. blink blue 2x. else stay on blink red 2x
"""
# After the initial set of system time using the Linux date command and the copy to PiJuice RTC sudo hwclock -w, you simply just need to run at system startup do a 'sudo hwclock -s' to copy the time from the RTC to the system clock e.g. in /etc/rc.local. This is also assuming that your ID EEPROM is set to 0x50 in which case the RTC driver is loaded at boot.
# lsmod to check module Force loading the module by adding the following line to /boot/config.txt:
# dtoverlay=i2c-rtc,ds1339
# i2cdetect must shows UU instead of 68 https://github.com/PiSupply/PiJuice/issues/186
# hwclock -r read hwckock to stdout -w date to hw -s hw to date
#sudo ntpd -gq force ntp update
from __future__ import print_function
import RPi.GPIO as GPIO
import os
import time
# python2
#import httplib, urllib
import http.client, urllib
import datetime
# python 2
#import thread
import _thread
#https://github.com/vshymanskyy/blynk-library-python
#pip install blynk-library-python
import BlynkLib
# sudo apt-get install ntp
# systemctl
# sudo apt-get install ntpdate (client)
#pip install ntplib
import ntplib
#sudo pip install thingspeak
#https://thingspeak.readthedocs.io/en/latest/
import thingspeak
import pijuice
import subprocess
import sys
import logging
import subprocess
print('juice camera v2.1')
# wakeup every x mn
DELTA_MIN=20 # mn
limit_soc=15 # limit send pushover . a bit higher than the HALT_POWER_OFF in juice config
#Low values should be typically between 5-10%
# in juice config, minimum charge is 10%, min voltage 3.2V. wakeup on charge 70%
print ('send pushover when soc is below %d' %(limit_soc))
pin_halt=26
pin_led=16 # not used anymore
# Blynk Terminal V8, button V18
bbutton=18 # halt or tun
bterminal=8
bsoc=20
bsleep=21 #programmable sleep time
btemp=22 # of bat
bvbat=23 # of pijuice
bcount=24 # increment at each run
# use external led
led = False
#button value 2 un-initialized
button="2"
# THIS IS A string
count = 0
sleep_time=60
print ("STARTING. set system time from hwclock: ")
subprocess.call(["sudo", "hwclock", "--hctosys"])
# --systohc to set hwclock
# to measure execution time
start_time=datetime.datetime.now()
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin_led,GPIO.OUT) # external led
# on at start, while running
GPIO.output(pin_led, GPIO.HIGH)
# connect to ground to keep running
GPIO.setup(pin_halt,GPIO.IN,pull_up_down=GPIO.PUD_UP)
t=20
print ("sleep to make sure boot is over and juice started:", t, ' sec')
time.sleep(t)
halt = GPIO.input(pin_halt)
print ("state of keep running pin is (pullup, no jumper = HIGH = HALT)", halt)
if halt ==0:
print(" WARNING: will keep running!!!!")
if led: # led is false. could use external led blink. use internal led instead
# flash led to signal start
print ("flash external led")
flash(5,0.2)
# debug, info, warning, error, critical
log_file = "/home/pi/beecamjuice/log.log"
print ("logging to: " , log_file)
logging.basicConfig(filename=log_file,level=logging.INFO)
logging.info(str(datetime.datetime.now())+ '\n-------------- beecamjuice starting ...' )
s = os.popen('date').read()
print ("system date: ", s)
logging.info(str(datetime.datetime.now())+ ' system date at start: ' + s )
s = os.popen('sudo hwclock -r').read()
logging.info(str(datetime.datetime.now())+ ' read hw clock at start: ' + s )
print ("read hw clock: " , s)
# cycle , delay . flash external led if present
def flash(c,d): # external led
for i in range(0,c):
GPIO.output(pin_led, GPIO.HIGH)
time.sleep(d)
GPIO.output(pin_led, GPIO.LOW)
time.sleep(d)
# python3
def send_pushover(s):
# P2 conn = http.HTTPSConnection("api.pushover.net:443")
conn = http.client.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
#urllib has been split up in Python 3. The urllib.urlencode() function is now urllib.parse.urlencode(), and the urllib.urlopen() function is now urllib.request.urlopen().
# P2 urllib.urlencode({
urllib.parse.urlencode({
"token": "your token",
"user": "your token",
"message": s,
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse()
logging.info(str(datetime.datetime.now())+ ' send pushover ...' + s )
#https://github.com/PiSupply/PiJuice/issues/91
# Record start time for testing
#txt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ' -- Started\n'
#with open('/home/pi/beecamjuice/test.log','a') as f:
# f.write(txt)
while not os.path.exists('/dev/i2c-1'):
time.sleep(0.1)
try:
pj = pijuice.PiJuice(1, 0x14)
except:
# cannot use internal led to signal error. pj not created
print("Cannot create pijuice object")
logging.error(str(datetime.datetime.now())+ '!!!! cannot create pijuice object, exit and keep running ...' )
send_pushover("PiJuice: cannot create PiJuice object. will exit")
sys.exit()
status = pj.status.GetStatus()
print ('juice object created:', status )
status = status ['error']
if status == 'NO_ERROR':
print ("PiJuice Status OK")
# internal led 2 'D2'. blue [r,g,b] range 0-255
#pj.SetLedState('D2', [0, 0 , 200])
# blink x times, Blue 500 ms, off 500 ms
pj.status.SetLedBlink('D2',2, [0,0,200], 500, [0, 0, 0], 500)
# executed on juice microcontroler. if next set led too quick, will overwrite
else:
# internal led. solid red RGB
pj.status.SetLedState('D2', [200, 0 ,0])
print ("PiJuice Status ERROR")
logging.error(str(datetime.datetime.now())+ ' PiJuice status ERROR' )
enable_wakeup(pj) # in case
shut(pj) # otherwize was staying on, sucking power. sucker
print ("juice firmware version: ", pj.config.GetFirmwareVersion()['data']['version'])
# dict
soc = pj.status.GetChargeLevel()
soc = "%0.0f" %(soc['data'])
print ("soc ", soc)
logging.info(str(datetime.datetime.now())+ ' soc: ' + str(soc) )
soc = int(soc)
if soc < limit_soc:
logging.info(str(datetime.datetime.now())+ ' soc too low: ' + str(soc) )
time.sleep(0.4)
vbat = pj.status.GetBatteryVoltage()
vbat = "%0.1f" %(vbat['data']/1000.0)
print ("vbat on board battery voltage", vbat)
logging.info(str(datetime.datetime.now())+ ' vbat: ' + str(vbat) )
time.sleep(0.4)
ibat = pj.status.GetBatteryCurrent()
time.sleep(0.4)
ibat = pj.status.GetBatteryCurrent() # false read ?
ibat = ibat['data']
print ("ibat current supplied from the battery", ibat)
logging.debug(str(datetime.datetime.now())+ ' ibat: ' + str(ibat) )
# iio 200 ma, Vio 5V ibat 300 ma when not charging, -2000 when charging
# Vbat is the on-board battery voltage and ibat is the current that is being supplied from the battery.
time.sleep(0.4)
temp=pj.status.GetBatteryTemperature()
temp = "%0.0f" %(temp['data'])
print ("temp ", temp)
logging.debug(str(datetime.datetime.now())+ ' temp: ' + str(temp) )
# vio is the voltage that is on the IO pin weather this is input or output and the iio is the current being provided or drawn.
# When reading analog read on IO1 it will output the same as vio.
time.sleep(0.4)
vio=pj.status.GetIoVoltage()
vio = vio['data']
print ("vio voltage on IO, input or output", vio)
logging.debug(str(datetime.datetime.now())+ ' vio: ' + str(vio) )
time.sleep(0.4)
iio=pj.status.GetIoCurrent()
iio = iio['data']
print ("iio current drawn or supplied on IO", iio)
logging.debug(str(datetime.datetime.now())+ ' iio: ' + str(iio) )
"""
time.sleep(0.4)
print ("reading analog in")
lipovbat=pj.status.GetIoAnalogInput(1)
print (lipovbat)
lipovbat= "%0.1f" %(2.0 * lipovbat['data']/1000.0) # pont diviseur. 3.3v logic max 3.6
print ("lipo vbat Volt", lipovbat)
logging.info(str(datetime.datetime.now())+ ' lipo bat Volt: ' + str(lipovbat) )
"""
print ("reset fault flag")
pj.status.ResetFaultFlags(['powerOffFlag', 'sysPowerOffFlag'])
# thinkspeak
print ( "publish to thinkspeak" )
try:
thing = thingspeak.Channel(285664, api_key="<KEY>", \
write_key="<KEY>", fmt='json', timeout=None)
response = thing.update({1:vbat,2:soc,3:temp})
print (response)
except:
print("Thingspeak failed")
# program alarm
def set_alarm(sleep_tile,pj):
# Set RTC alarm x minutes from now
# RTC is kept in UTC or localtime
a={}
a['year'] = 'EVERY_YEAR'
a['month'] = 'EVERY_MONTH'
a['day'] = 'EVERY_DAY'
a['hour'] = 'EVERY_HOUR'
t = datetime.datetime.utcnow()
#a['minute'] = (t.minute + DELTA_MIN) % 60
a['minute'] = (t.minute + sleep_time) % 60
a['second'] = 0
try:
print ("setting RTC alarm " , a['minute'] )
status = pj.rtcAlarm.SetAlarm(a)
print ("rtc Set alarm status: " + str(status)) # if not str exception cannot concatenate str and dict
logging.info(str(datetime.datetime.now())+ ' rtc Set alarm status: ' + str(status) )
if status['error'] != 'NO_ERROR':
print('Cannot set RTC alarm')
logging.error(str(datetime.datetime.now())+ ' Cannot set RTC alarm; will exit and keep running ' )
blynk.virtual_write(bterminal, 'cannot set RTC. RUN')
send_pushover("PiJuice: cannot set RTC alarm. will exit and keep running")
time.sleep(5)
sys.exit()
else:
print('RTC Get Alarm: ' + str(pj.rtcAlarm.GetAlarm()))
logging.info(str(datetime.datetime.now())+ ' RTC Get Alarm: ' + str(pj.rtcAlarm.GetAlarm()) )
except Exception as e:
logging.info(str(datetime.datetime.now())+ ' !!! EXCEPTION in enable wakeup' + str(e))
blynk.virtual_write(bterminal, 'Exception Wakeup\n')
send_pushover("PiJuice: Exception Wakeupup enable")
def enable_wakeup(pj):
# Enable wakeup, otherwise power to the RPi will not be applied when the RTC alarm goes off
s= pj.rtcAlarm.SetWakeupEnabled(True)
print ("enable wakeup to power PI on RTC alarm " + str(s))
logging.info(str(datetime.datetime.now())+ ' enable wakeup: ' + str(s) )
time.sleep(0.4)
# set power off
def power_off(delay,pj):
try:
# remove 5V power to PI
pj.power.SetPowerOff(delay)
logging.info(str(datetime.datetime.now())+ ' setpoweroff after ' + str(delay))
except Exception as e:
print ("exception in setpoweroff: ", str(e))
logging.error(str(datetime.datetime.now())+ ' exception in setpoweroff ' + str(e) )
# blynk
def blynk_thread(now):
# remove first word dow so that it fits in android screen
print (now)
n=now.split()
del n[0]
now=""
for i in n:
now=now+i+' '
print (now)
print(" BLYNK_START blynk thread started "), now
def blynk_connected():
print(" BLYNK_CONNECTED Blynk has connected. | |
<reponame>deeso/dhp<filename>src/docker_honey/simple_commands/boto.py
__copyright__ = """
Copyright 2020 Cisco Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
import os
import boto3
from .consts import *
from .util import *
import traceback
import time
class Commands(object):
AWS_SECRET_ACCESS_KEY = None
AWS_ACCESS_KEY_ID = None
CURRENT_REGION = 'us-east-2'
KMS_KEYS = {}
KMS_ARNS = {}
KMS_INFOS = {}
KMS_ALIASES = {}
DEFAULT_KMS_IDS = {}
DEFAULT_KMS_ALIASES = {}
DEFAULT_KMS_ARNS = {}
REGIONS = [CURRENT_REGION]
LOGGER = get_stream_logger(__name__ + '.Commands')
IMAGE_CATALOG = {}
IMAGE_AMI_IDS = {}
IMAGE_AMI_NAMES = {}
@classmethod
def get_image_infos(cls, region, **kargs):
cls.set_region(region)
if region in cls.IMAGE_CATALOG:
return cls.IMAGE_CATALOG[region]
ec2 = cls.get_ec2(**kargs)
cls.LOGGER.info("Getting AMI info {} for all images".format(region))
rsp = ec2.describe_images()
cls.IMAGE_CATALOG[region] = []
image_datas = rsp['Images']
for image_data in image_datas:
image_info = {
"image_architecture": image_data.get('Architecture', '').lower(),
"image_platform_details": image_data.get('PlatformDetails', '').lower(),
"image_public": image_data.get('Public', False),
"image_name": image_data.get('Name', '').lower(),
"image_type": image_data.get('ImageType', '').lower(),
"image_description": image_data.get('Description', '').lower(),
"image_id": image_data.get('ImageId', '').lower(),
"image_state": image_data.get('State', '').lower(),
"image_block_mappings": image_data.get('BlockDeviceMappings', []),
"image_owner_alias": image_data.get('ImageOwnerAlias', '').lower(),
"image_creation_date": image_data.get('CreationDate', ''),
"image_owner_id": image_data.get('OwnerId', '').lower(),
"image_virtualization_type": image_data.get('VirtualizationType', '').lower(),
}
cls.IMAGE_CATALOG[region].append(image_info)
cls.IMAGE_AMI_IDS[region] = {i['image_id']:i for i in cls.IMAGE_CATALOG[region]}
cls.IMAGE_AMI_NAMES[region] = {i['image_name']:i for i in cls.IMAGE_CATALOG[region]}
return sorted(cls.IMAGE_AMI_NAMES[region].values(), reverse=True, key=lambda x:x["image_creation_date"])
@classmethod
def extract_ami_paramaters(cls, instance_config):
parameters = {
"image_architecture": instance_config.get('image_architecture', None),
"image_platform_details": instance_config.get('platform_details', None),
"image_public": instance_config.get('image_public', True),
"image_name": instance_config.get('image_name', None),
"image_image_type": instance_config.get('image_type', None),
"image_description_keywords": instance_config.get('image_description_keywords', None),
"image_image_id": instance_config.get('image_id', None),
"image_owner_alias": instance_config.get('image_owner_alias', None),
"image_owner_id": instance_config.get('image_owner_id', None),
"image_virtualization_type": instance_config.get('image_virtualization_type', "hvm"),
}
if isinstance(parameters["image_description_keywords"], list) and len(parameters["image_description_keywords"]) > 0:
parameters["image_description_keywords"] = [i.lower() for i in parameters["image_description_keywords"]]
else:
parameters["image_description_keywords"] = None
return {k:v if not isinstance(v, str) else v.lower() for k, v in parameters.items() if v is not None or (isinstance(v, str) and len(v) > 0)}
@classmethod
def match_description(cls, keywords, description, any_words=False):
if any_words:
return any([description.find(w) > -1 for w in keywords])
return all([description.find(w) > -1 for w in keywords])
@classmethod
def find_matching_images(cls, ami_info, image_infos, match_keys=MATCH_KEYS,
match_desc=True):
match_these = {k: ami_info[k] for k in match_keys if k in ami_info and ami_info[k] is not None}
keywords = ami_info.get('image_description_keywords', None)
if keywords is None and match_desc:
cls.LOGGER.critical("No keyword provided to match a AMI".format())
raise Exception("No keyword provided to match a AMI".format())
others_good = []
for amii in image_infos:
desc = amii.get('image_description', '')
if (desc is None or len(desc) == 0) and match_desc:
continue
all_matches = []
for k in match_these:
if amii.get(k, None) is None:
continue
all_matches.append(match_these[k] == amii[k])
if all(all_matches):
others_good.append(amii)
if not match_desc:
return [i['image_id'] for i in others_good]
good_desc = []
for ii in others_good:
desc = ii.get('image_description', '')
if cls.match_description(keywords, desc):
good_desc.append(ii)
return [i['image_id'] for i in good_desc]
@classmethod
def get_image_id(cls, instance_name, region, boto_config, any_words=False, return_one=True):
cls.set_region(region)
instance_description = cls.get_instance_description(instance_name, boto_config)
ami_info = cls.extract_ami_paramaters(instance_description)
ami_id = ami_info.get('image_id', None)
ami_name = ami_info.get('image_name', None)
ami_id = ami_id if isinstance(ami_id, str) and len(ami_id) > 0 else None
ami_name = ami_name if isinstance(ami_name, str) and len(ami_name) > 0 else None
if ami_name is not None and ami_name in cls.IMAGE_AMI_NAMES[region]:
ami_id = cls.IMAGE_AMI_NAMES[region]['image_id']
if ami_id is not None and ami_id in cls.IMAGE_AMI_IDS[region]:
cls.LOGGER.info("Using AMI image Id ({}) in {}".format(ami_id, ami_region))
return ami_id
image_infos = cls.get_image_infos(region, **boto_config)
keywords = ami_info.get('image_description_keywords', None)
matching_images = cls.find_matching_images(ami_info, image_infos)
if len(matching_images) > 0 and return_one:
return matching_images[0]
elif len(matching_images) > 0:
return matching_images
cls.LOGGER.critical("Unable to identify an AMI image Id in {}".format(region))
raise Exception("Unable to identify an AMI image Id in {}".format(region))
@classmethod
def get_instance_type(cls, instance_name, boto_config):
instance_description = cls.get_instance_description(instance_name, boto_config)
if 'instance_type' in instance_description:
return instance_description['instance_type']
return boto_config.get('instance_type', 't2.micro')
@classmethod
def get_instance_key_info(cls, instance_name, config, **kargs):
instance_config = cls.get_instance_description(instance_name, config)
base_keyname = instance_config.get('base_keyname', 'aws-instance-key')
keyname_fmt = instance_config.get('keyname_fmt', "{base_keyname}.pem")
_kargs = kargs.copy()
_kargs['base_keyname'] = base_keyname
key_info = {
'key_path': config.get('ssh_key_path', './ssh_keys/'),
'key_name': keyname_fmt.format(**_kargs),
'recreate': instance_config.get('recreate_keypair', False)
}
return key_info
@classmethod
def create_tag_specs(cls, resource_type, tags, tag_config_type='key_value'):
tag_spec = None
if tag_config_type == 'raw':
tag_spec = tags
elif tag_config_type == 'key_value':
tag_spec = {'ResourceType': resource_type,
'Tags': [{'Key':k, 'Value': v} for k, v in tags.items()]
}
return tag_spec
@classmethod
def get_tag_specs_configs(cls, boto_config, tag_specs=None, tag_specs_names=None, resource_type='instance'):
if tag_specs is None:
tag_specs = boto_config.get('tag_specs', [])
if tag_specs_names and len(tag_specs_names) > 0:
tag_specs = [i for i in tag_specs if i['name'] in tag_specs_names]
tag_specifications = []
for tag_config in tag_specs:
rtype = tag_config.get('resource_type', resource_type)
if rtype != resource_type:
continue
tags = tag_config.get('tags', {})
tag_config_type = tag_config.get('tag_config_type', 'key_value')
if rtype:
tag_specifications.append(cls.create_tag_specs(rtype, tags, tag_config_type))
return tag_specifications
@classmethod
def get_instance_description(cls, instance_name, boto_config):
configs = boto_config.get('instance_descriptions', [])
for config in configs:
x = config.get('name', None)
if x and x == instance_name:
return config
return {}
@classmethod
def get_instance_names(cls, boto_config):
configs = boto_config.get('instance_descriptions', [])
names = []
for config in configs:
x = config.get('name', None)
if x:
names.append(x)
return names
@classmethod
def get_instance_descriptions(cls, boto_config):
configs = boto_config.get('instance_descriptions', [])
iconfigs = {}
for config in configs:
x = config.get('name', None)
if x:
iconfigs[x] = config
return iconfigs
@classmethod
def get_instance_config_elements(cls, instance_name, element, boto_config):
description = cls.get_instance_description(instance_name, boto_config)
if description is None:
return None
citems = boto_config.get(element, [])
configs = []
instance_items = description.get(element, [])
for item in citems:
if 'name' in item and item['name'] in instance_items:
configs.append(item)
return configs
@classmethod
def get_volume_tags_configs(cls, volume_name, boto_config):
tag_spec_names = boto_config.get('volumes', {}).get(volume_name, {}).get('tag_specs', None)
if tag_specs_names:
return cls.get_tag_specs_configs(config, tag_specs_names=tag_spec_names, resource_type='volume')
return None
@classmethod
def get_volume_description(cls, volume_name, boto_config):
volume_configs = boto_config.get('volumes', [])
if len(volume_configs) == 0:
return None
vcs = cls.get_volume_descriptions(boto_config)
return vcs.get(volume_name, None)
@classmethod
def get_volume_descriptions(cls, boto_config):
volume_configs = boto_config.get('volumes', [])
if len(volume_configs) == 0:
return None
return {config.get('name'): config for config in volume_configs if 'name'}
@classmethod
def get_volume_device_descriptions(cls, instance_name, volume_names, boto_config):
volume_names = [] if not isinstance(volume_names, list) else volume_names
instance_config = cls.get_instance_description(instance_name, boto_config)
device_configs = instance_config.get('volume_devices', [])
return device_configs
@classmethod
def get_instance_security_group_configs(cls, instance_name, boto_config):
return cls.get_instance_config_elements(instance_name, 'security_groups', boto_config)
@classmethod
def get_instance_tag_specifications(cls, instance_name, boto_config):
instance_config = cls.get_instance_description(instance_name, boto_config)
tag_specs_names = instance_config.get('tag_specs', None)
if tag_specs_names:
return cls.get_tag_specs_configs(boto_config, tag_specs_names=tag_specs_names, resource_type='instance')
return None
@classmethod
def get_instance_volumes_configs(cls, instance_name, boto_config):
return cls.get_instance_config_elements(instance_name, 'volumes', boto_config)
@classmethod
def get_instance_security_group(cls, instance_name, boto_config):
description = cls.get_instance_description(instance_name, boto_config)
if description is None:
return None
sgs = boto_config.get('security_groups', [])
sg_config = []
instance_sgs = description.get('security_groups', [])
for sg in sgs:
if 'name' in sg and sg['name'] in instance_sgs:
sg_config.append(sg)
return sg
@classmethod
def set_config(cls, **kargs):
cls.AWS_ACCESS_KEY_ID = kargs.get('aws_access_key_id', None)
cls.AWS_SECRET_ACCESS_KEY= kargs.get('aws_secret_access_key', None)
cls.REGIONS = kargs.get('regions', cls.REGIONS)
cls.REGIONS = kargs.get('regions', cls.REGIONS)
# cls.update_current_kms_defaults(**kargs)
@classmethod
def create_tags_keywords(cls, *extra_args):
tags = {}
for k,v in zip(extra_args[::2],extra_args[1::2]):
key = None
if k.startswith(TAG_MARKER):
key = k[len(TAG_MARKER):]
else:
continue
key = key.replace('-','_')
tags[key] = v
return tags
@classmethod
def set_region(cls, region, **kargs):
if region is not None and region != cls.CURRENT_REGION:
cls.CURRENT_REGION = region
@classmethod
def get_region(cls):
return cls.CURRENT_REGION
@classmethod
def get_current_region(cls):
return cls.CURRENT_REGION
@classmethod
def get_ec2(cls, ec2=None, region=None, aws_secret_access_key=None, aws_access_key_id=None, **kargs):
if region is None:
region = cls.get_current_region()
if ec2 is None:
# cls.LOGGER.debug("Creating ec2 client for {} in {}".format(region, aws_access_key_id))
aws_secret_access_key = aws_secret_access_key if aws_secret_access_key else cls.AWS_SECRET_ACCESS_KEY
aws_access_key_id = aws_access_key_id if aws_access_key_id else cls.AWS_ACCESS_KEY_ID
ec2 = boto3.client('ec2',
region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
return ec2
@classmethod
def get_kms_key(cls, kms=None, region=None, aws_secret_access_key=None, aws_access_key_id=None,
key_name=None, key_id=None, **kargs):
if key_name is None and key_id is None:
if cls.DEFAULT_KMS_IDS[region]:
return cls.DEFAULT_KMS_IDS[region]
kms = cls.get_kms(region=region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
return kms
@classmethod
def update_current_kms_defaults(cls, region=None, **kargs):
# FIXME this code is unreliable when switching between regions
# TODO this is all wrong and does not take into
# account when regions change, because the keys will also change
# with the given region
kms = cls.get_kms(region=region, **kargs)
aliases = kms.list_aliases()
rsp = kms.list_keys()
cls.DEFAULT_KMS_IDS[region] = None
cls.KMS_ALIASES[region] = aliases['Aliases']
cls.KMS_KEYS[region] = {k['KeyId']: k for k in rsp['Keys']}
cls.KMS_ARNS[region] = {k['KeyId']: k['KeyArn'] for | |
gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type AverageUploadRateLimitInBitsPerSec: integer
:param AverageUploadRateLimitInBitsPerSec:
The average upload bandwidth rate limit in bits per second.
:type AverageDownloadRateLimitInBitsPerSec: integer
:param AverageDownloadRateLimitInBitsPerSec:
The average download bandwidth rate limit in bits per second.
:rtype: dict
:returns:
"""
pass
def update_chap_credentials(self, TargetARN: str, SecretToAuthenticateInitiator: str, InitiatorName: str, SecretToAuthenticateTarget: str = None) -> Dict:
"""
Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target. By default, a gateway does not have CHAP enabled; however, for added security, you might use it.
.. warning::
When you update CHAP credentials, all existing connections on the target are closed and initiators must reconnect with the new credentials.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateChapCredentials>`_
**Request Syntax**
::
response = client.update_chap_credentials(
TargetARN='string',
SecretToAuthenticateInitiator='string',
InitiatorName='string',
SecretToAuthenticateTarget='string'
)
**Response Syntax**
::
{
'TargetARN': 'string',
'InitiatorName': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
- **TargetARN** *(string) --*
The Amazon Resource Name (ARN) of the target. This is the same target specified in the request.
- **InitiatorName** *(string) --*
The iSCSI initiator that connects to the target. This is the same initiator name specified in the request.
:type TargetARN: string
:param TargetARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return the TargetARN for specified VolumeARN.
:type SecretToAuthenticateInitiator: string
:param SecretToAuthenticateInitiator: **[REQUIRED]**
The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.
.. note::
The secret key must be between 12 and 16 bytes when encoded in UTF-8.
:type InitiatorName: string
:param InitiatorName: **[REQUIRED]**
The iSCSI initiator that connects to the target.
:type SecretToAuthenticateTarget: string
:param SecretToAuthenticateTarget:
The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).
Byte constraints: Minimum bytes of 12. Maximum bytes of 16.
.. note::
The secret key must be between 12 and 16 bytes when encoded in UTF-8.
:rtype: dict
:returns:
"""
pass
def update_gateway_information(self, GatewayARN: str, GatewayName: str = None, GatewayTimezone: str = None) -> Dict:
"""
Updates a gateway's metadata, which includes the gateway's name and time zone. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.
.. note::
For Gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID rather than the gateway name. However, changing the name of the gateway has no effect on the gateway's ARN.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateGatewayInformation>`_
**Request Syntax**
::
response = client.update_gateway_information(
GatewayARN='string',
GatewayName='string',
GatewayTimezone='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'GatewayName': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the ARN of the gateway that was updated.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **GatewayName** *(string) --*
The name you configured for your gateway.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayName: string
:param GatewayName:
The name you configured for your gateway.
:type GatewayTimezone: string
:param GatewayTimezone:
A value that indicates the time zone of the gateway.
:rtype: dict
:returns:
"""
pass
def update_gateway_software_now(self, GatewayARN: str) -> Dict:
"""
Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.
.. note::
When you make this request, you get a ``200 OK`` success response immediately. However, it might take some time for the update to complete. You can call DescribeGatewayInformation to verify the gateway is in the ``STATE_RUNNING`` state.
.. warning::
A software update forces a system restart of your gateway. You can minimize the chance of any disruption to your applications by increasing your iSCSI Initiators' timeouts. For more information about increasing iSCSI Initiator timeouts for Windows and Linux, see `Customizing Your Windows iSCSI Settings <https://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorWindowsClient.html#CustomizeWindowsiSCSISettings>`__ and `Customizing Your Linux iSCSI Settings <https://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorRedHatClient.html#CustomizeLinuxiSCSISettings>`__ , respectively.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateGatewaySoftwareNow>`_
**Request Syntax**
::
response = client.update_gateway_software_now(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the of the gateway that was updated.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def update_maintenance_start_time(self, GatewayARN: str, HourOfDay: int, MinuteOfHour: int, DayOfWeek: int = None, DayOfMonth: int = None) -> Dict:
"""
Updates a gateway's weekly maintenance start time information, including day and time of the week. The maintenance time is the time in your gateway's time zone.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateMaintenanceStartTime>`_
**Request Syntax**
::
response = client.update_maintenance_start_time(
GatewayARN='string',
HourOfDay=123,
MinuteOfHour=123,
DayOfWeek=123,
DayOfMonth=123
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the of the gateway whose maintenance start time is updated.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type HourOfDay: integer
:param HourOfDay: **[REQUIRED]**
The hour component of the maintenance start time represented as *hh* , where *hh* is the hour (00 to 23). The hour of the day is in the time zone of the gateway.
:type MinuteOfHour: integer
:param MinuteOfHour: **[REQUIRED]**
The minute component of the maintenance start time represented as *mm* , where *mm* is the minute (00 to 59). The minute of the hour is in the time zone of the gateway.
:type DayOfWeek: integer
:param DayOfWeek:
The day of the week component of the maintenance start time week represented as an ordinal number from 0 to 6, where 0 represents Sunday and 6 Saturday.
:type DayOfMonth: integer
:param DayOfMonth:
The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.
.. note::
This value is only available for tape and volume gateways.
:rtype: dict
:returns:
"""
pass
def update_nfs_file_share(self, FileShareARN: str, KMSEncrypted: bool = None, KMSKey: str = None, NFSFileShareDefaults: Dict = None, DefaultStorageClass: str = None, ObjectACL: str = None, ClientList: List = None, Squash: str = None, ReadOnly: bool = None, GuessMIMETypeEnabled: bool = None, RequesterPays: bool = None) -> Dict:
"""
Updates a Network File System (NFS) file share. This operation is only supported in the file gateway type.
.. note::
To leave a file share field unchanged, set the corresponding input field to null.
Updates the following file share setting:
* Default storage class for your S3 bucket
* Metadata defaults for your S3 bucket
* Allowed NFS clients for your file share
* Squash settings
* Write status of your file share
.. note::
To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported in file gateways.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateNFSFileShare>`_
**Request Syntax**
::
response = client.update_nfs_file_share(
FileShareARN='string',
KMSEncrypted=True|False,
KMSKey='string',
NFSFileShareDefaults={
'FileMode': 'string',
| |
<filename>sdk/python/pulumi_azure_native/devtestlab/v20150521preview/virtual_machine_resource.py<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VirtualMachineResourceArgs', 'VirtualMachineResource']
@pulumi.input_type
class VirtualMachineResourceArgs:
def __init__(__self__, *,
lab_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
artifact_deployment_status: Optional[pulumi.Input['ArtifactDeploymentStatusPropertiesArgs']] = None,
artifacts: Optional[pulumi.Input[Sequence[pulumi.Input['ArtifactInstallPropertiesArgs']]]] = None,
compute_id: Optional[pulumi.Input[str]] = None,
created_by_user: Optional[pulumi.Input[str]] = None,
created_by_user_id: Optional[pulumi.Input[str]] = None,
custom_image_id: Optional[pulumi.Input[str]] = None,
disallow_public_ip_address: Optional[pulumi.Input[bool]] = None,
fqdn: Optional[pulumi.Input[str]] = None,
gallery_image_reference: Optional[pulumi.Input['GalleryImageReferenceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
is_authentication_with_ssh_key: Optional[pulumi.Input[bool]] = None,
lab_subnet_name: Optional[pulumi.Input[str]] = None,
lab_virtual_network_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
os_type: Optional[pulumi.Input[str]] = None,
owner_object_id: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
ssh_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VirtualMachineResource resource.
:param pulumi.Input[str] lab_name: The name of the lab.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input['ArtifactDeploymentStatusPropertiesArgs'] artifact_deployment_status: The artifact deployment status for the virtual machine.
:param pulumi.Input[Sequence[pulumi.Input['ArtifactInstallPropertiesArgs']]] artifacts: The artifacts to be installed on the virtual machine.
:param pulumi.Input[str] compute_id: The resource identifier (Microsoft.Compute) of the virtual machine.
:param pulumi.Input[str] created_by_user: The email address of creator of the virtual machine.
:param pulumi.Input[str] created_by_user_id: The object identifier of the creator of the virtual machine.
:param pulumi.Input[str] custom_image_id: The custom image identifier of the virtual machine.
:param pulumi.Input[bool] disallow_public_ip_address: Indicates whether the virtual machine is to be created without a public IP address.
:param pulumi.Input[str] fqdn: The fully-qualified domain name of the virtual machine.
:param pulumi.Input['GalleryImageReferenceArgs'] gallery_image_reference: The Microsoft Azure Marketplace image reference of the virtual machine.
:param pulumi.Input[str] id: The identifier of the resource.
:param pulumi.Input[bool] is_authentication_with_ssh_key: A value indicating whether this virtual machine uses an SSH key for authentication.
:param pulumi.Input[str] lab_subnet_name: The lab subnet name of the virtual machine.
:param pulumi.Input[str] lab_virtual_network_id: The lab virtual network identifier of the virtual machine.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] name: The name of the resource.
:param pulumi.Input[str] notes: The notes of the virtual machine.
:param pulumi.Input[str] os_type: The OS type of the virtual machine.
:param pulumi.Input[str] owner_object_id: The object identifier of the owner of the virtual machine.
:param pulumi.Input[str] password: The <PASSWORD> the virtual machine <PASSWORD>.
:param pulumi.Input[str] provisioning_state: The provisioning status of the resource.
:param pulumi.Input[str] size: The size of the virtual machine.
:param pulumi.Input[str] ssh_key: The SSH key of the virtual machine administrator.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[str] type: The type of the resource.
:param pulumi.Input[str] user_name: The user name of the virtual machine.
"""
pulumi.set(__self__, "lab_name", lab_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if artifact_deployment_status is not None:
pulumi.set(__self__, "artifact_deployment_status", artifact_deployment_status)
if artifacts is not None:
pulumi.set(__self__, "artifacts", artifacts)
if compute_id is not None:
pulumi.set(__self__, "compute_id", compute_id)
if created_by_user is not None:
pulumi.set(__self__, "created_by_user", created_by_user)
if created_by_user_id is not None:
pulumi.set(__self__, "created_by_user_id", created_by_user_id)
if custom_image_id is not None:
pulumi.set(__self__, "custom_image_id", custom_image_id)
if disallow_public_ip_address is not None:
pulumi.set(__self__, "disallow_public_ip_address", disallow_public_ip_address)
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if gallery_image_reference is not None:
pulumi.set(__self__, "gallery_image_reference", gallery_image_reference)
if id is not None:
pulumi.set(__self__, "id", id)
if is_authentication_with_ssh_key is not None:
pulumi.set(__self__, "is_authentication_with_ssh_key", is_authentication_with_ssh_key)
if lab_subnet_name is not None:
pulumi.set(__self__, "lab_subnet_name", lab_subnet_name)
if lab_virtual_network_id is not None:
pulumi.set(__self__, "lab_virtual_network_id", lab_virtual_network_id)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if owner_object_id is not None:
pulumi.set(__self__, "owner_object_id", owner_object_id)
if password is not None:
pulumi.set(__self__, "password", password)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if size is not None:
pulumi.set(__self__, "size", size)
if ssh_key is not None:
pulumi.set(__self__, "ssh_key", ssh_key)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if type is not None:
pulumi.set(__self__, "type", type)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter(name="labName")
def lab_name(self) -> pulumi.Input[str]:
"""
The name of the lab.
"""
return pulumi.get(self, "lab_name")
@lab_name.setter
def lab_name(self, value: pulumi.Input[str]):
pulumi.set(self, "lab_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="artifactDeploymentStatus")
def artifact_deployment_status(self) -> Optional[pulumi.Input['ArtifactDeploymentStatusPropertiesArgs']]:
"""
The artifact deployment status for the virtual machine.
"""
return pulumi.get(self, "artifact_deployment_status")
@artifact_deployment_status.setter
def artifact_deployment_status(self, value: Optional[pulumi.Input['ArtifactDeploymentStatusPropertiesArgs']]):
pulumi.set(self, "artifact_deployment_status", value)
@property
@pulumi.getter
def artifacts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ArtifactInstallPropertiesArgs']]]]:
"""
The artifacts to be installed on the virtual machine.
"""
return pulumi.get(self, "artifacts")
@artifacts.setter
def artifacts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ArtifactInstallPropertiesArgs']]]]):
pulumi.set(self, "artifacts", value)
@property
@pulumi.getter(name="computeId")
def compute_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource identifier (Microsoft.Compute) of the virtual machine.
"""
return pulumi.get(self, "compute_id")
@compute_id.setter
def compute_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_id", value)
@property
@pulumi.getter(name="createdByUser")
def created_by_user(self) -> Optional[pulumi.Input[str]]:
"""
The email address of creator of the virtual machine.
"""
return pulumi.get(self, "created_by_user")
@created_by_user.setter
def created_by_user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_by_user", value)
@property
@pulumi.getter(name="createdByUserId")
def created_by_user_id(self) -> Optional[pulumi.Input[str]]:
"""
The object identifier of the creator of the virtual machine.
"""
return pulumi.get(self, "created_by_user_id")
@created_by_user_id.setter
def created_by_user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_by_user_id", value)
@property
@pulumi.getter(name="customImageId")
def custom_image_id(self) -> Optional[pulumi.Input[str]]:
"""
The custom image identifier of the virtual machine.
"""
return pulumi.get(self, "custom_image_id")
@custom_image_id.setter
def custom_image_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_image_id", value)
@property
@pulumi.getter(name="disallowPublicIpAddress")
def disallow_public_ip_address(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the virtual machine is to be created without a public IP address.
"""
return pulumi.get(self, "disallow_public_ip_address")
@disallow_public_ip_address.setter
def disallow_public_ip_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disallow_public_ip_address", value)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
The fully-qualified domain name of the virtual machine.
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="galleryImageReference")
def gallery_image_reference(self) -> Optional[pulumi.Input['GalleryImageReferenceArgs']]:
"""
The Microsoft Azure Marketplace image reference of the virtual machine.
"""
return pulumi.get(self, "gallery_image_reference")
@gallery_image_reference.setter
def gallery_image_reference(self, value: Optional[pulumi.Input['GalleryImageReferenceArgs']]):
pulumi.set(self, "gallery_image_reference", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of the resource.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="isAuthenticationWithSshKey")
def is_authentication_with_ssh_key(self) -> Optional[pulumi.Input[bool]]:
"""
A value indicating whether this virtual machine uses an SSH key for authentication.
"""
return pulumi.get(self, "is_authentication_with_ssh_key")
@is_authentication_with_ssh_key.setter
def is_authentication_with_ssh_key(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_authentication_with_ssh_key", value)
@property
@pulumi.getter(name="labSubnetName")
def lab_subnet_name(self) -> Optional[pulumi.Input[str]]:
"""
The lab subnet name of the virtual machine.
"""
return pulumi.get(self, "lab_subnet_name")
@lab_subnet_name.setter
def lab_subnet_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lab_subnet_name", value)
@property
@pulumi.getter(name="labVirtualNetworkId")
def lab_virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The lab virtual network identifier of the virtual machine.
"""
return pulumi.get(self, "lab_virtual_network_id")
@lab_virtual_network_id.setter
def lab_virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lab_virtual_network_id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
The notes of the virtual machine.
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[pulumi.Input[str]]:
"""
The OS type of the virtual machine.
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter(name="ownerObjectId")
def owner_object_id(self) -> Optional[pulumi.Input[str]]:
"""
The object identifier of the owner of the virtual machine.
"""
return pulumi.get(self, "owner_object_id")
@owner_object_id.setter
def owner_object_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "owner_object_id", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password of the virtual machine administrator.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
"""
The size of the virtual machine.
"""
return pulumi.get(self, "size")
@size.setter
def | |
of
# world, in case of errors, PROXY's return HTML, this function parses
# the error and adds it to the action_result.
if 'html' in response.headers.get('Content-Type', ''):
return self._process_html_response(response, action_result)
# if no content-type is to be parsed, handle an empty response
if not response.text:
return self._process_empty_response(response, action_result)
# everything else is actually an error at this point
message = "Can't process response from server. Status Code: {0} Data from server: {1}".\
format(response.status_code, self._handle_py_ver_compat_for_input_str(
response.text.replace('{', '{{').replace('}', '}}')))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _handle_py_ver_compat_for_input_str(self, input_str):
"""
This method returns the encoded|original string based on the Python version.
:param python_version: Python major version
:param input_str: Input string to be processed
:return: input_str (Processed input string based on following logic 'input_str - Python 3; encoded input_str - Python 2')
"""
try:
if input_str and self._python_version == 2:
input_str = UnicodeDammit(
input_str).unicode_markup.encode('utf-8')
except Exception:
self.debug_print(
"Error occurred while handling python 2to3 compatibility for the input string")
return input_str
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
try:
if hasattr(e, 'args'):
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = "Error code unavailable"
error_msg = e.args[0]
else:
error_code = "Error code unavailable"
error_msg = "Unknown error occurred. Please check the asset configuration and|or action parameters."
except Exception:
error_code = "Error code unavailable"
error_msg = "Unknown error occurred. Please check the asset configuration and|or action parameters."
try:
error_msg = self._handle_py_ver_compat_for_input_str(error_msg)
except TypeError:
error_msg = "Error occurred while connecting to the Mattermost server. "
error_msg += "Please check the asset configuration and|or the action parameters."
except Exception:
error_msg = "Unknown error occurred. Please check the asset configuration and|or action parameters."
return "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
def _make_rest_call(self, url, action_result, headers=None, params=None, data=None, method="get",
verify=False, timeout=None, files=None):
""" This function is used to make the REST call.
:param url: url for making REST call
:param action_result: Object of ActionResult class
:param headers: Request headers
:param params: Request parameters
:param data: Request body
:param method: GET/POST/PUT/DELETE/PATCH (Default will be GET)
:param verify: Verify server certificate
:param timeout: Timeout of request
:param files: File to be uploaded
:return: Status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message),
response obtained by making an API call
"""
resp_json = None
# If no headers are passed, set empty headers
if not headers:
headers = {}
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid method: {0}".format(method)),
resp_json)
try:
request_response = request_func(url, data=data, headers=headers, params=params, verify=verify,
timeout=timeout, files=files)
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. Details: {0}".
format(self._get_error_message_from_exception(e))), resp_json)
return self._process_response(request_response, action_result)
def _handle_update_request(self, url, action_result, params=None, data=None, verify=False, method="get",
files=None):
""" This method is used to call maker_rest_call using different authentication methods.
:param url: REST URL that needs to be called
:param action_result: Object of ActionResult class
:param params: Request params
:param data: Request data
:param verify: Verify server certificate(Default: True)
:param method: GET/POST/PUT/DELETE/PATCH (Default will be GET)
:param files: File to be uploaded
:return: Status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message),
response obtained by making an API call
"""
# If the personal access token is provided
if self._personal_token:
headers = {
'Authorization': 'Bearer {}'.format(self._personal_token)
}
ret_val, response = self._make_rest_call(url=url, action_result=action_result, headers=headers, data=data,
params=params, verify=verify, method=method, files=files)
if phantom.is_fail(ret_val):
# If error is not 401 or other config parameters are not provided, return error
if '401' not in action_result.get_message() or not self._access_token:
return action_result.get_status(), None
else:
return phantom.APP_SUCCESS, response
if self._access_token:
# Call using access_token
headers = {
'Authorization': 'Bearer {}'.format(self._access_token)
}
ret_val, response = self._make_rest_call(url=url, action_result=action_result, headers=headers, data=data,
params=params, verify=verify, method=method, files=files)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
return phantom.APP_SUCCESS, response
return action_result.set_status(phantom.APP_ERROR, status_message='Authentication failed'), None
def _handle_test_connectivity(self, param):
""" This function is used to handle the test connectivity action.
:param param: Dictionary of input parameters
:return: Status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
action_result = self.add_action_result(ActionResult(dict(param)))
app_state = {}
# If none of the config parameters are present, return error
if not(self._client_id and self._client_secret) and not self._personal_token:
self.save_progress(MATTERMOST_TEST_CONNECTIVITY_FAILED_MSG)
return action_result.set_status(phantom.APP_ERROR,
status_message=MATTERMOST_CONFIG_PARAMS_REQUIRED_CONNECTIVITY)
self.save_progress(MATTERMOST_MAKING_CONNECTION_MSG)
url = '{0}{1}'.format(MATTERMOST_API_BASE_URL.format(server_url=self._server_url),
MATTERMOST_CURRENT_USER_ENDPOINT)
if self._personal_token:
headers = {
'Authorization': 'Bearer {}'.format(self._personal_token)
}
ret_val, _ = self._make_rest_call(
url=url, action_result=action_result, headers=headers)
if phantom.is_fail(ret_val):
# If error is not 401 or other config parameters are not provided, return error
if '401' not in action_result.get_message() or not (self._client_id and self._client_secret):
self.save_progress(MATTERMOST_TEST_CONNECTIVITY_FAILED_MSG)
return action_result.get_status()
else:
self.save_progress(MATTERMOST_TEST_CONNECTIVITY_PASSED_MSG)
return action_result.set_status(phantom.APP_SUCCESS)
if self._client_id and self._client_secret:
# If client_id and client_secret is provided, go for interactive login
ret_val = self._handle_interactive_login(
app_state, action_result=action_result)
if phantom.is_fail(ret_val):
self.save_progress(MATTERMOST_TEST_CONNECTIVITY_FAILED_MSG)
return action_result.get_status()
# Call using access_token
headers = {
'Authorization': 'Bearer {}'.format(self._access_token)
}
ret_val, _ = self._make_rest_call(
url=url, action_result=action_result, headers=headers)
if phantom.is_fail(ret_val):
self.save_progress(MATTERMOST_TEST_CONNECTIVITY_FAILED_MSG)
return action_result.get_status()
self.save_progress(MATTERMOST_TEST_CONNECTIVITY_PASSED_MSG)
return action_result.set_status(phantom.APP_SUCCESS)
return action_result.set_status(phantom.APP_ERROR, status_message='Authentication failed')
def _handle_interactive_login(self, app_state, action_result):
""" This function is used to handle the interactive login during test connectivity
while client_id and client_secret is provided.
:param action_result: Object of ActionResult class
:return: status(success/failure)
"""
ret_val, app_rest_url = self._get_app_rest_url(action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
# Append /result to create redirect_uri
redirect_uri = '{0}/result'.format(app_rest_url)
app_state['redirect_uri'] = redirect_uri
self.save_progress(MATTERMOST_OAUTH_URL_MSG)
self.save_progress(redirect_uri)
# Get asset ID
asset_id = self.get_asset_id()
# Authorization URL used to make request for getting code which is used to generate access token
authorization_url = MATTERMOST_AUTHORIZE_URL.format(server_url=self._server_url, client_id=self._client_id,
redirect_uri=redirect_uri, state=asset_id)
app_state['authorization_url'] = authorization_url
# URL which would be shown to the user
url_for_authorize_request = '{0}/start_oauth?asset_id={1}&'.format(
app_rest_url, asset_id)
_save_app_state(app_state, asset_id, self)
self.save_progress(MATTERMOST_AUTHORIZE_USER_MSG)
self.save_progress(url_for_authorize_request)
# Wait for 15 seconds for authorization
time.sleep(MATTERMOST_AUTHORIZE_WAIT_TIME)
# Wait for 105 seconds while user login to Mattermost
status = self._wait(action_result=action_result)
# Empty message to override last message of waiting
self.send_progress('')
if phantom.is_fail(status):
return action_result.get_status()
self.save_progress(MATTERMOST_CODE_RECEIVED_MSG)
self._state = _load_app_state(asset_id, self)
# if code is not available in the state file
if not self._state or not self._state.get('code'):
return action_result.set_status(phantom.APP_ERROR,
status_message=MATTERMOST_TEST_CONNECTIVITY_FAILED_MSG)
current_code = self._state['code']
self.save_state(self._state)
_save_app_state(self._state, asset_id, self)
self.save_progress(MATTERMOST_GENERATING_ACCESS_TOKEN_MSG)
# Generate access_token using code
request_data = {
'client_id': self._client_id,
'client_secret': self._client_secret,
'code': current_code,
'grant_type': 'authorization_code',
'redirect_uri': redirect_uri
}
ret_val, response = self._make_rest_call(url=MATTERMOST_ACCESS_TOKEN_URL.format(
server_url=self._server_url), action_result=action_result, method='post', data=request_data)
if phantom.is_fail(ret_val):
return action_result.get_status()
# If there is any error while generating access_token, API returns 200 with error and error_description fields
if not response.get(MATTERMOST_ACCESS_TOKEN):
if response.get('message'):
return action_result.set_status(phantom.APP_ERROR, status_message=self._handle_py_ver_compat_for_input_str(response['message']))
return action_result.set_status(phantom.APP_ERROR, status_message='Error while generating access_token')
self._state['token'] = response
self._access_token = response[MATTERMOST_ACCESS_TOKEN]
self.save_state(self._state)
_save_app_state(self._state, asset_id, self)
self._state = self.load_state()
# Scenario -
#
# If the corresponding state file doesn't have the correct owner, owner group or permissions,
# the newly generated token is not being saved to the state file
# and the automatic workflow for the token has been stopped.
# So we have to check that token from response and the tokens
# which are saved to state file after successful generation of the new tokens are same or not.
if self._access_token != self._state.get('token', {}).get(MATTERMOST_ACCESS_TOKEN):
message = "Error occurred while saving the newly generated access token (in place of the expired token) in the state file."
message += " Please check the owner, owner group, and the permissions of the state file. The Phantom "
message += "user should have the correct access rights and ownership for the corresponding state file "
message += "(refer to the readme file for more information)."
return action_result.set_status(phantom.APP_ERROR, message)
return phantom.APP_SUCCESS
def _get_app_rest_url(self, action_result):
""" Get URL for making rest calls.
:param action_result: object of ActionResult class
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message),
URL to make rest calls
"""
ret_val, phantom_base_url = self._get_phantom_base_url(action_result)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
ret_val, asset_name = self._get_asset_name(action_result)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
self.save_progress(
'Using Phantom base URL as: {0}'.format(phantom_base_url))
app_json = self.get_app_json()
app_name = app_json['name']
app_dir_name = _get_dir_name_from_app_name(app_name)
url_to_app_rest = '{0}/rest/handler/{1}_{2}/{3}'.format(phantom_base_url.rstrip('/'), app_dir_name, app_json['appid'],
asset_name)
return phantom.APP_SUCCESS, url_to_app_rest
def _get_phantom_base_url(self, action_result):
""" Get base url of phantom.
:param action_result: object of ActionResult class
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message),
base url of phantom
"""
mattermost_phantom_base_url = self.get_phantom_base_url()
url = '{}rest{}'.format(
mattermost_phantom_base_url, MATTERMOST_PHANTOM_SYS_INFO_URL)
ret_val, resp_json = self._make_rest_call(
action_result=action_result, url=url, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
import pprint
import re
import codecs
import json
"""
Your task is to wrangle the data and transform the shape of the data
into the model we mentioned earlier. The output should be a list of dictionaries
that look like this:
{
"id": "2406124091",
"type: "node",
"visible":"true",
"created": {
"version":"2",
"changeset":"17206049",
"timestamp":"2013-08-03T16:43:42Z",
"user":"linuxUser16",
"uid":"1219059"
},
"pos": [41.9757030, -87.6921867],
"address": {
"housenumber": "5157",
"postcode": "60625",
"street": "North Lincoln Ave"
},
"amenity": "restaurant",
"cuisine": "mexican",
"name": "<NAME>",
"phone": "1 (773)-271-5176"
}
You have to complete the function 'shape_element'.
We have provided a function that will parse the map file, and call the function with the element
as an argument. You should return a dictionary, containing the shaped data for that element.
We have also provided a way to save the data in a file, so that you could use
mongoimport later on to import the shaped data into MongoDB. You could also do some cleaning
before doing that, like in the previous exercise, but for this exercise you just have to
shape the structure.
In particular the following things should be done:
- you should process only 2 types of top level tags: "node" and "way"
- all attributes of "node" and "way" should be turned into regular key/value pairs, except:
- attributes in the CREATED array should be added under a key "created"
- attributes for latitude and longitude should be added to a "pos" array,
for use in geospacial indexing. Make sure the values inside "pos" array are floats
and not strings.
- if second level tag "k" value contains problematic characters, it should be ignored
- if second level tag "k" value starts with "addr:", it should be added to a dictionary "address"
- if second level tag "k" value does not start with "addr:", but contains ":", you can process it
same as any other tag.
- if there is a second ":" that separates the type/direction of a street,
the tag should be ignored, for example:
<tag k="addr:housenumber" v="5158"/>
<tag k="addr:street" v="North Lincoln Avenue"/>
<tag k="addr:street:name" v="Lincoln"/>
<tag k="addr:street:prefix" v="North"/>
<tag k="addr:street:type" v="Avenue"/>
<tag k="amenity" v="pharmacy"/>
should be turned into:
{...
"address": {
"housenumber": 5158,
"street": "North Lincoln Avenue"
}
"amenity": "pharmacy",
...
}
- for "way" specifically:
<nd ref="305896090"/>
<nd ref="1719825889"/>
should be turned into
"node_refs": ["305896090", "1719825889"]
"""
# REGEX to check for all lower case characters in a string
lower = re.compile(r'^([a-z]|_)*$')
# REGEX to check for colon values
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
# REGEX to check for mongodb specific characters
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
'''
Create the CREATED dictionary to store the a node's meta data
'''
CREATED = [ "version", "changeset", "timestamp", "user", "uid"]
'''
Create the POSITION Dictionary, which contains the latititude and
the longititued. Lat is in the 0 position Lon is in the 1 position,
This will be used as a lookup dictionary to determine if a key
exits in an element
'''
POSITION = ["lat","lon"]
def shape_element(element):
'''
shape_element will peform the following tasks:
- if second level tag "k" value contains problematic characters, it should be ignored
- if second level tag "k" value starts with "addr:", it should be added to a dictionary "address"
- if second level tag "k" value does not start with "addr:", but contains ":", you can process it
same as any other tag.
- if there is a second ":" that separates the type/direction of a street,
the tag should be ignored, for example:
'''
# Create the node dictionary
node = {}
# Add the created object to the node dictionary
node['created'] = {}
# For Lat and Lon we will store these in a 'pos' (position)
# we need lat, lon and in specific order (LAT, LON)
node['pos'] =[0 for i in range(2)]
# Search only through the node and way types
if element.tag == "node" or element.tag == "way" :
# add the type to the node, the tag of the element
node['type'] = element.tag
# Search through the node and way types
# to build the CREATED and POSITION dictionaries
for k,v in element.attrib.iteritems():
# CREATE VALUES {"version", "changeset", "timestamp", "user", "uid"}
if k in CREATED:
node['created'][k] = v
#TODO: make sure time is formated from string to date
# Lat is in first position, Lon second position
# In JSON and mongodb we need to represent the Lat and Lon as floats
elif k in POSITION:
if k=="lat":
node['pos'][0]=(float(v))
else: # Lon
node['pos'][1]=(float(v))
# Key was not in the CREATED or POSITION dictionary
# Add a new key value pair
else:
node[k] = v
'''
Setup processing for the TAGS - Addresses and other meta data for the
node and way objects
'''
# Instantiate the address dictionary
address = {}
'''
Search all the subelements and prepare valid tags for processing
Any ignored data will be emitted to the console
'''
for tag in element.iter("tag"):
if is_valid_tag(tag) == True:
# address attributes - create the dictionary object to hold
# the attributes.
# use a slice of the item from beginning for 5 characters
if tag.attrib['k'][:5] == "addr:":
# Set the keyName to the text to the RIGHT of the colon, dropping "addr:"
newKey = tag.attrib['k'][5:]
# if there is a second ":" that separates the
# type/direction of a street ignore it - Per Assignment
if newKey.count(":")> 0:
print "found colon, and it's not address - ignoring it", newKey
else:
# Add new key to the address object, and assign the
# value to the key
address[newKey] = tag.attrib['v']
# we have a generic tag item with no colon, to be added root on the node/way object
elif tag.attrib['k'].count(":") < 1:
plainKey = tag.attrib['k']
#print "Plain KEY", tag.attrib['k'], tag.attrib['v']
node[plainKey] = tag.attrib['v']
# For keys similar to the "addr:" key process these keys like the generic keys
elif tag.attrib['k'].count(":") == 1 and tag.attrib['k'][:5] != "addr:" and tag.attrib['k'][5:] != "created" :
# Get the length to the colon, and get the text from the
# right of colon to the end for the key.
# We are going to strip off the first text to the left of
# the colon, for readability and mongodb
keyIndex = tag.attrib['k'].find(":")
# increment by one so we start at the new key name
keyIndex += 1
# Get the key name and create a dictionary for this key and value
oddKey = tag.attrib['k'][keyIndex:]
node[oddKey] = tag.attrib['v']
else:
print "Ingnore tag it is invalid" , tag.attrib['k'], tag.attrib['v']
# Search for any node_refs in the sub arrays - just for the way tag, per instructions
node_refs = []
if element.tag =="way":
for ndref in element.iter("nd"):
node_refs.append(ndref.attrib['ref'])
# Check to see if we have any node_refs, if we do add the node_refs to the node
if len(node_refs) > 0:
node['node_refs'] = node_refs
# Check to see if we have any addresses, if we have addresses add the addresses to the node
if len(address)>0:
node['address'] = address
return node
else:
return None
def is_valid_tag(element):
'''
Check for Valid Tags and return true for valid tags false for invalid
'''
isValid = True
if problemchars.search(element.attrib['k']):
isValid = False
else: # Count all the others as valid
isValid = True
return isValid
def process_map(file_in, pretty = False):
'''
Process map reads in the OpenStreet Map file
and writes out to file the JSON data structure
file_in is the path and filename, pretty parameter formats the json
'''
# Keep the same filename and just append .json to the filename
file_out = "{0}.2.json".format(file_in)
data = []
with codecs.open(file_out, "w") as fo:
# Go element by element to read the file
for _, element in ET.iterparse(file_in):
el = shape_element(element)
# If we have an element add it to the dictionary
# and write the data to a file
if el:
data.append(el)
if pretty:
fo.write(json.dumps(el, indent=2)+"\n")
else:
fo.write(json.dumps(el) + "\n")
return data
def test():
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2011 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_test_openrave import *
class RunRobot(EnvironmentSetup):
def __init__(self,collisioncheckername):
self.collisioncheckername = collisioncheckername
def setup(self):
EnvironmentSetup.setup(self)
self.env.SetCollisionChecker(RaveCreateCollisionChecker(self.env,self.collisioncheckername))
def test_dualarm_grabbing(self):
with self.env:
robot = self.LoadRobot('robots/schunk-lwa3-dual.robot.xml')
body = self.env.ReadKinBodyXMLFile('data/box3.kinbody.xml')
self.env.Add(body)
T = eye(4)
T[1,3] = -1.18
T[2,3] = 0.712
body.SetTransform(T)
robot.SetActiveManipulator('leftarm')
assert(self.env.CheckCollision(robot))
robot.Grab(body)
assert(not self.env.CheckCollision(robot))
robot.SetDOFValues(array([ 0.00000000e+00, -1.43329144e+00, -3.99190831e-15, -1.86732388e+00, 5.77239752e-01, -3.37631690e-07, 6.67713991e-08, 0.00000000e+00, -1.70089030e+00, -6.42544150e-01, -1.25030589e+00, -3.33493233e-08, -5.58212676e-08, 1.60115015e-08]))
assert(robot.CheckSelfCollision())
def test_basic(self):
with self.env:
for robotfile in g_robotfiles:
self.env.Reset()
robot = self.LoadRobot(robotfile)
assert(robot.GetDOF() == robot.GetActiveDOF())
assert(robot.GetLinks()[0].GetParent().GetActiveDOF() == robot.GetActiveDOF())
def test_collisionmaprobot(self):
env=self.env
xml = """<environment>
<robot file="robots/collisionmap.robot.xml">
</robot>
</environment>
"""
self.LoadDataEnv(xml)
with env:
robot=env.GetRobots()[0]
assert(robot.GetXMLId().lower()=='collisionmaprobot')
robot.SetDOFValues([9/180.0*pi,1/180.0*pi],[1,2])
assert(robot.CheckSelfCollision())
robot.SetDOFValues([0/180.0*pi,1/180.0*pi],[1,2])
assert(not robot.CheckSelfCollision())
env.Reset()
robot=self.LoadRobot('robots/collisionmap.robot.xml')
assert(robot.GetXMLId().lower()=='collisionmaprobot')
def test_grabcollision(self):
env=self.env
self.LoadEnv('robots/man1.zae') # load a simple scene
with env:
robot = env.GetRobots()[0] # get the first robot
leftarm = robot.GetManipulator('leftarm')
rightarm = robot.GetManipulator('rightarm')
self.LoadEnv('data/mug1.kinbody.xml');
leftmug = env.GetKinBody('mug')
self.LoadEnv('data/mug2.kinbody.xml')
rightmug = env.GetKinBody('mug2')
env.StopSimulation()
leftMugGrabPose = array([[ 0.99516672, -0.0976999 , 0.00989374, 0.14321238],
[ 0.09786028, 0.99505007, -0.01728364, 0.94120538],
[-0.00815616, 0.01816831, 0.9998017 , 0.38686624],
[ 0. , 0. , 0. , 1. ]])
leftmug.SetTransform(leftMugGrabPose)
rightMugGrabPose = array([[ 9.99964535e-01, -1.53668225e-08, 8.41848925e-03, -1.92047462e-01],
[ -8.40134174e-03, -6.37951940e-02, 9.97927606e-01, 9.22815084e-01],
[ 5.37044369e-04, -9.97963011e-01, -6.37929291e-02, 4.16847348e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
rightmug.SetTransform(rightMugGrabPose);
assert(not env.CheckCollision(leftmug,rightmug))
grabJointAngles = array([ -3.57627869e-07, 0.00000000e+00, -1.46997878e-15, -1.65528119e+00, -1.23030146e-08, -8.41909389e-11, 0.00000000e+00], dtype=float32)
robot.SetDOFValues(grabJointAngles,rightarm.GetArmIndices())
robot.SetDOFValues(grabJointAngles,leftarm.GetArmIndices())
robot.SetActiveManipulator(rightarm)
robot.Grab(rightmug)
robot.SetActiveManipulator(leftarm)
robot.Grab(leftmug)
assert(not robot.CheckSelfCollision())
assert(not env.CheckCollision(robot))
self.log.debug('Now changing arm joint angles so that the two mugs collide. The checkSelfCollision returns:')
collisionJointAngles = array([ -2.38418579e-07, 0.00000000e+00, -2.96873480e-01, -1.65527940e+00, -3.82479293e-08, -1.23165381e-10, 1.35525272e-20]);
robot.SetDOFValues(collisionJointAngles,rightarm.GetArmIndices())
robot.SetDOFValues(collisionJointAngles,leftarm.GetArmIndices())
assert(robot.CheckSelfCollision())
assert(not env.CheckCollision(robot))
grabbedinfos = robot.GetGrabbedInfo()
grabbedbodies = robot.GetGrabbed()
# try saving the grabbed state
robot.ReleaseAllGrabbed()
robot.ResetGrabbed(grabbedinfos)
grabbedinfo2 = robot.GetGrabbedInfo()
assert(set([g._grabbedname for g in grabbedinfo2]) == set([b.GetName() for b in grabbedbodies]))
robot.ReleaseAllGrabbed()
assert(env.CheckCollision(leftmug,rightmug))
def test_grabcollision_dynamic(self):
self.log.info('test if can handle grabbed bodies being enabled/disabled')
env=self.env
robot = self.LoadRobot('robots/barrettwam.robot.xml')
with env:
target = env.ReadKinBodyURI('data/mug1.kinbody.xml')
env.Add(target,True)
manip=robot.GetActiveManipulator()
target.SetTransform(manip.GetEndEffector().GetTransform())
assert(env.CheckCollision(robot,target))
self.log.info('check disabling target')
target.Enable(False)
robot.Grab(target,manip.GetEndEffector())
assert(not robot.CheckSelfCollision())
target.Enable(True)
assert(not robot.CheckSelfCollision())
target.Enable(False)
assert(not robot.CheckSelfCollision())
target.GetLinks()[0].Enable(True)
assert(not robot.CheckSelfCollision())
self.log.info('check disabling links')
robot.Enable(False)
assert(not robot.CheckSelfCollision())
robot.RegrabAll()
assert(not robot.CheckSelfCollision())
robot.Enable(True)
assert(not robot.CheckSelfCollision())
def test_grabcollision_dynamic2(self):
self.log.info('more tests for dynamic bodies and self-collisions')
env=self.env
with env:
robot = self.LoadRobot('robots/barrettwam.robot.xml')
b=RaveCreateKinBody(env,'')
b.InitFromBoxes(array([[0,0,0,0.05,1,0.05]]),True)
b.SetName('obstacle')
env.Add(b)
Tbody=eye(4)
Tbody[2,3] = 1
b.SetTransform(Tbody)
b2=RaveCreateKinBody(env,'')
b2.InitFromBoxes(array([[0,0,0,0.2,0.2,0.2]]),True)
b2.SetName('obstacle2')
b2.GetLinks()[0].GetGeometries()[0].SetDiffuseColor([0,1,0])
env.Add(b2)
Tbody2=eye(4)
Tbody2[0:3,3] = [0.7,0,0.3]
b2.SetTransform(Tbody2)
manip=robot.GetActiveManipulator()
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot,IkParameterizationType.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
robot.Grab(b)
robot.SetActiveDOFs(manip.GetArmIndices())
posegoal = array([ 1.03713883e-02, 7.52075143e-01, 6.58889422e-01, 1.18381978e-02, 3.04044037e-01, -5.96046308e-10, 1.61406347e-01])
b2.Enable(False)
sols = manip.FindIKSolutions(posegoal,IkFilterOptions.CheckEnvCollisions)
assert(len(sols)>0)
# test the solution
with robot:
for sol in sols:
robot.SetActiveDOFValues(sol)
assert(not robot.CheckSelfCollision())
assert(not env.CheckCollision(robot))
sols = manip.FindIKSolutions(posegoal,IkFilterOptions.IgnoreSelfCollisions)
assert(len(sols)>0)
# test the solution
with robot:
# make sure there is at least one self-collision
hasself = False
for sol in sols:
robot.SetActiveDOFValues(sol)
if robot.CheckSelfCollision():
hasself = True
assert(hasself)
b2.Enable(True)
sols = manip.FindIKSolutions(posegoal,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions)
assert(len(sols)>0)
with robot:
for sol in sols:
robot.SetActiveDOFValues(sol)
assert(not robot.CheckSelfCollision())
b.Enable(False)
manip.GetEndEffector().Enable(False)
sols = manip.FindIKSolutions(posegoal,IkFilterOptions.CheckEnvCollisions)
assert(len(sols)>0)
with robot:
for sol in sols:
robot.SetActiveDOFValues(sol)
assert(not robot.CheckSelfCollision())
assert(not env.CheckCollision(robot))
b2.Enable(True)
b.Enable(True)
manip.GetEndEffector().Enable(True)
sols = manip.FindIKSolutions(posegoal,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions)
assert(len(sols)>0)
with robot:
for sol in sols:
robot.SetActiveDOFValues(sol)
assert(not robot.CheckSelfCollision())
def test_ikcollision(self):
self.log.info('test if can solve IK during collisions')
env=self.env
with env:
robot = self.LoadRobot('robots/pr2-beta-static.zae')
target = env.ReadKinBodyURI('data/mug1.kinbody.xml')
env.Add(target,True)
T=target.GetTransform()
T[0:3,3] = [-0.342,0,0.8]
target.SetTransform(T)
floor = RaveCreateKinBody(env,'')
floor.InitFromBoxes(array([[0,0,0,2,2,0.01]]),True)
floor.SetName('floor')
env.Add(floor,True)
assert(env.CheckCollision(robot))
manip=robot.SetActiveManipulator('leftarm')
manip2 = robot.GetManipulator('rightarm')
robot.SetActiveDOFs(manip.GetArmIndices())
assert(not manip.CheckEndEffectorCollision(manip.GetTransform()))
assert(not manip2.CheckEndEffectorCollision(manip2.GetTransform()))
assert(not manip.CheckEndEffectorCollision(manip.GetIkParameterization(IkParameterizationType.Transform6D)))
assert(not manip2.CheckEndEffectorCollision(manip2.GetIkParameterization(IkParameterizationType.Transform6D)))
# with bullet, robot gets into self-collision when first angle reaches 0.5
robot.SetActiveDOFValues([0.678, 0, 1.75604762, -1.74228108, 0, 0, 0])
assert(not robot.CheckSelfCollision())
Tmanip = manip.GetTransform()
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions) is not None)
basemanip = interfaces.BaseManipulation(robot)
out=basemanip.MoveToHandPosition(matrices=[Tmanip],execute=False)
assert(out is not None)
# self colliding
robot.SetActiveDOFValues([ 2.20622614e-01, 0.00000000e+00, 1.75604762e+00, -1.74228108e+00, 0.00000000e+00, -9.56775092e-16, 0.00000000e+00])
assert(robot.CheckSelfCollision())
Tmanip = manip.GetTransform()
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions) is None)
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions) is None)
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorEnvCollisions|IkFilterOptions.IgnoreEndEffectorSelfCollisions) is not None)
assert(not manip.CheckEndEffectorCollision(Tmanip))
box = RaveCreateKinBody(env,'')
box.InitFromBoxes(array([[0,0,0,0.05,0.05,0.2]]),True)
box.SetName('box')
env.Add(box,True)
box.SetTransform(manip.GetTransform())
robot.Grab(box)
robot.SetActiveDOFValues([ 0.5, 0.00000000e+00, 1.57, -1.74228108e+00, 3.23831570e-16, 0.00000000e+00, 0.00000000e+00])
assert(robot.CheckSelfCollision())
Tmanip = manip.GetTransform()
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
assert(not robot.CheckSelfCollision())
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorEnvCollisions) is None)
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorSelfCollisions) is not None)
assert(not robot.CheckSelfCollision())
assert(not manip.CheckEndEffectorCollision(Tmanip))
robot.SetActiveDOFValues([ 0.00000000e+00, 0.858, 2.95911693e+00, -0.1, 0.00000000e+00, -3.14018492e-16, 0.00000000e+00])
Tmanip = manip.GetTransform()
assert(manip.FindIKSolution(Tmanip,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions) is not None)
# test if initial colliding attachments are handled correctly
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
T = manip.GetTransform()
T[0,3] += 0.2
target.SetTransform(T)
assert(not robot.CheckSelfCollision())
assert(env.CheckCollision(box,target))
assert(manip.CheckEndEffectorCollision(manip.GetTransform()))
assert(not manip2.CheckEndEffectorCollision(manip2.GetTransform()))
robot.Grab(target)
assert(robot.IsGrabbing(target))
assert(not robot.CheckSelfCollision())
robot.RegrabAll()
assert(not robot.CheckSelfCollision())
robot.Release(target)
assert(not robot.IsGrabbing(target))
box2 = RaveCreateKinBody(env,'')
box2.InitFromBoxes(array([[0,0,0,0.05,0.05,0.2]]),True)
box2.SetName('box2')
env.Add(box2,True)
box2.SetTransform(manip2.GetTransform())
robot.Grab(box2,grablink=manip2.GetEndEffector())
assert(not manip2.CheckEndEffectorCollision(manip2.GetTransform()))
robot.Grab(target)
Tmanip = manip.GetTransform()
assert(not manip.CheckEndEffectorCollision(Tmanip))
robot.SetActiveDOFValues([ 0.00000000e+00, 0.858, 2.95911693e+00, -1.57009246e-16, 0.00000000e+00, -3.14018492e-16, 0.00000000e+00])
assert(not manip.CheckEndEffectorCollision(Tmanip))
def test_checkendeffector(self):
self.log.info('test if can check end effector collisions with ik params')
env=self.env
self.LoadEnv('data/katanatable.env.xml')
robot=env.GetRobots()[0]
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.TranslationDirection5D)
if not ikmodel.load():
ikmodel.autogenerate()
with env:
robot.SetActiveDOFs(ikmodel.manip.GetArmIndices())
robot.SetActiveDOFValues([ 0, 0.89098841, 0.92174268, -1.32022237, 0])
ikparam=ikmodel.manip.GetIkParameterization(IkParameterizationType.TranslationDirection5D)
assert(not env.CheckCollision(robot))
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
assert(not ikmodel.manip.CheckEndEffectorCollision(ikparam))
T = eye(4)
T[2,3] = -0.1
ikparam2 = IkParameterization(ikparam)
ikparam2.MultiplyTransform(T)
assert(ikmodel.manip.FindIKSolution(ikparam2,0) is not None)
assert(ikmodel.manip.FindIKSolution(ikparam2,IkFilterOptions.CheckEnvCollisions) is None)
assert(ikmodel.manip.CheckEndEffectorCollision(ikparam2))
assert(ikmodel.manip.FindIKSolution(ikparam2,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions) is not None)
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.Translation3D)
if not ikmodel.load():
ikmodel.autogenerate()
with env:
robot.SetActiveDOFs(ikmodel.manip.GetArmIndices())
robot.SetActiveDOFValues([ 0, 0.89098841, 0.92174268, -1.32022237, 0])
ikparam=ikmodel.manip.GetIkParameterization(IkParameterizationType.Translation3D)
robot.SetActiveDOFValues(zeros(robot.GetActiveDOF()))
try:
ikmodel.manip.CheckEndEffectorCollision(ikparam)
raise ValueError('expected exception')
except openrave_exception:
pass
T = eye(4)
T[2,3] = -0.1
ikparam2 = IkParameterization(ikparam)
ikparam2.MultiplyTransform(T)
assert(ikmodel.manip.FindIKSolution(ikparam2,0) is not None)
assert(ikmodel.manip.FindIKSolution(ikparam2,IkFilterOptions.CheckEnvCollisions) is None)
assert(ikmodel.manip.FindIKSolution(ikparam2,IkFilterOptions.CheckEnvCollisions|IkFilterOptions.IgnoreEndEffectorCollisions) is not None)
def test_badtrajectory(self):
self.log.info('create a discontinuous trajectory and check if robot throws exception')
env=self.env
robot=self.LoadRobot('robots/mitsubishi-pa10.zae')
with env:
orgvalues = robot.GetActiveDOFValues()
lower,upper = robot.GetDOFLimits()
traj=RaveCreateTrajectory(env,'')
traj.Init(robot.GetActiveConfigurationSpecification())
traj.Insert(0,r_[orgvalues,upper+0.1])
assert(traj.GetNumWaypoints()==2)
try:
ret=planningutils.RetimeActiveDOFTrajectory(traj,robot,False)
assert(ret==PlannerStatus.HasSolution)
self.RunTrajectory(robot,traj)
raise ValueError('controller did not throw limit expected exception!')
except Exception, e:
pass
traj.Init(robot.GetActiveConfigurationSpecification())
traj.Insert(0,r_[lower,upper])
assert(traj.GetNumWaypoints()==2)
try:
ret=planningutils.RetimeActiveDOFTrajectory(traj,robot,False,maxvelmult=10)
assert(ret==PlannerStatus.HasSolution)
self.RunTrajectory(robot,traj)
raise ValueError('controller did not throw velocity limit expected exception!')
except Exception, e:
pass
def test_bigrange(self):
env=self.env
robot=self.LoadRobot('robots/kuka-kr5-r650.zae')
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
with env:
j=robot.GetJointFromDOFIndex(ikmodel.manip.GetArmIndices()[-1])
lower,upper = j.GetLimits()
assert( upper-lower > 3*pi )
robot.SetDOFValues(lower+0.1,[j.GetDOFIndex()])
assert(transdist(robot.GetDOFValues([j.GetDOFIndex()]),lower+0.1) <= g_epsilon)
robot.SetDOFValues(ones(len(ikmodel.manip.GetArmIndices())),ikmodel.manip.GetArmIndices(),True)
ikparam = ikmodel.manip.GetIkParameterization(IkParameterization.Type.Transform6D)
sols = ikmodel.manip.FindIKSolutions(ikparam,IkFilterOptions.CheckEnvCollisions)
assert(len(sols)==8)
# add a filter
numrepeats = [0]
indices = []
def customfilter(solution, manip, ikparam):
out = manip.GetIkSolver().SendCommand('GetRobotLinkStateRepeatCount')
if out=='1':
numrepeats[0] += 1
out = manip.GetIkSolver().SendCommand('GetSolutionIndices')
for index in out.split()[1:]:
indices.append(int(index))
return IkReturnAction.Success
handle = ikmodel.manip.GetIkSolver().RegisterCustomFilter(0,customfilter)
sols = ikmodel.manip.FindIKSolutions(ikparam,IkFilterOptions.CheckEnvCollisions)
assert(len(sols)==8)
assert(numrepeats[0]==4)
indices.sort()
assert(indices == [0,3,4,7,0x20000,0x20003,0x20004,0x20007])
handle.Close()
# customfilter shouldn't be executed anymore
sols = ikmodel.manip.FindIKSolutions(ikparam,IkFilterOptions.CheckEnvCollisions)
assert(numrepeats[0]==4)
def test_manipulators(self):
env=self.env
robot=self.LoadRobot('robots/pr2-beta-static.zae')
manip=robot.GetManipulator('leftarm_torso')
links = manip.GetChildLinks()
assert(all([l.GetName().startswith('l_gripper') or l.GetName() == 'l_wrist_roll_link' for l in links]))
ilinks = manip.GetIndependentLinks()
expectednames = set([u'base_footprint', u'base_link', u'base_bellow_link', u'base_laser_link', u'bl_caster_rotation_link', u'bl_caster_l_wheel_link', u'bl_caster_r_wheel_link', u'br_caster_rotation_link', u'br_caster_l_wheel_link', u'br_caster_r_wheel_link', u'fl_caster_rotation_link', u'fl_caster_l_wheel_link', u'fl_caster_r_wheel_link', u'fr_caster_rotation_link', u'fr_caster_l_wheel_link', u'fr_caster_r_wheel_link', u'torso_lift_motor_screw_link'])
curnames = set([l.GetName() for l in ilinks])
assert(expectednames==curnames)
cjoints = manip.GetChildJoints()
assert(len(cjoints)==4)
assert(all([j.GetName().startswith('l_') for j in cjoints]))
cdofs = manip.GetChildDOFIndices()
assert(cdofs == [22,23,24,25])
# test if manipulator can be created
manip = robot.GetManipulator('leftarm')
manipinfo = Robot.ManipulatorInfo()
manipinfo._name = 'testmanip'
manipinfo._sBaseLinkName = manip.GetBase().GetName()
manipinfo._sEffectorLinkName = manip.GetEndEffector().GetName()
manipinfo._tLocalTool = eye(4)
manipinfo._tLocalTool[2,3] = 1.0
manipinfo._vGripperJointNames = ['l_gripper_l_finger_joint']
manipinfo._vdirection = [0,1,0]
manipinfo._vClosingDirection = [1.0]
newmanip = robot.AddManipulator(manipinfo)
assert(newmanip.GetBase().GetName() == manip.GetBase().GetName())
assert(newmanip.GetEndEffector().GetName() == manip.GetEndEffector().GetName())
assert(robot.GetManipulator('testmanip')==newmanip)
assert(transdist(newmanip.GetLocalToolTransform(),manipinfo._tLocalTool) <= g_epsilon)
robot.SetActiveManipulator(newmanip)
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot)
if not ikmodel.load():
ikmodel.autogenerate()
def test_grabdynamics(self):
self.log.info('test is grabbed bodies have correct')
env=self.env
with env:
robot=self.LoadRobot('robots/pr2-beta-static.zae')
body = env.ReadKinBodyURI('data/mug1.kinbody.xml')
env.Add(body)
manip=robot.SetActiveManipulator('leftarm')
velocities = zeros(robot.GetDOF())
velocities[manip.GetArmIndices()] = ones(len(manip.GetArmIndices()))
robot.SetDOFVelocities(velocities)
Tmanip = manip.GetTransform()
Tbody = array(Tmanip)
Tbody[0,3] += 0.1
body.SetTransform(Tbody)
robot.Grab(body)
diff = Tbody[0:3,3] - Tmanip[0:3,3]
bodyvelocity = body.GetLinkVelocities()[0]
manipvelocity = manip.GetVelocity()
assert(transdist(manipvelocity[0:3] + cross(manipvelocity[3:6],diff),bodyvelocity[0:3]) <= g_epsilon)
assert(transdist(manipvelocity[3:6],bodyvelocity[3:6]) <= g_epsilon)
# change velocity and try again
velocities[manip.GetArmIndices()] = -ones(len(manip.GetArmIndices()))
robot.SetDOFVelocities(velocities)
bodyvelocity = body.GetLinkVelocities()[0]
manipvelocity = manip.GetVelocity()
assert(transdist(manipvelocity[0:3] + cross(manipvelocity[3:6],diff),bodyvelocity[0:3]) <= g_epsilon)
assert(transdist(manipvelocity[3:6],bodyvelocity[3:6]) <= g_epsilon)
# set robot base velocity
robot.SetVelocity([1,2,3],[4,5,6])
bodyvelocity = body.GetLinkVelocities()[0]
manipvelocity = manip.GetVelocity()
assert(transdist(manipvelocity[0:3] + cross(manipvelocity[3:6],diff),bodyvelocity[0:3]) <= g_epsilon)
assert(transdist(manipvelocity[3:6],bodyvelocity[3:6]) <= g_epsilon)
def test_quaternionjacobian(self):
self.log.info('test jacobiaquaternions')
env=self.env
with env:
affine = DOFAffine.Transform
self.LoadEnv('robots/pr2-beta-static.zae')
| |
from learning_to_adapt.dynamics.core.layers import MLP
from collections import OrderedDict
import tensorflow as tf
import numpy as np
from learning_to_adapt.utils.serializable import Serializable
from learning_to_adapt.utils import tensor_utils
from learning_to_adapt.logger import logger
import time
class MetaMLPDynamicsModel(Serializable):
"""
Class for MLP continous dynamics model
"""
_activations = {
None: None,
"relu": tf.nn.relu,
"tanh": tf.tanh,
"sigmoid": tf.sigmoid,
"softmax": tf.nn.softmax,
"swish": lambda x: x * tf.sigmoid(x)
}
def __init__(self,
name,
env,
hidden_sizes=(512, 512),
meta_batch_size=10,
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=None,
batch_size=500,
learning_rate=0.001,
inner_learning_rate=0.1,
normalize_input=True,
optimizer=tf.train.AdamOptimizer,
valid_split_ratio=0.2,
rolling_average_persitency=0.99,
):
Serializable.quick_init(self, locals())
self.normalization = None
self.normalize_input = normalize_input
self.next_batch = None
self.meta_batch_size = meta_batch_size
self.valid_split_ratio = valid_split_ratio
self.rolling_average_persitency = rolling_average_persitency
self.batch_size = batch_size
self.learning_rate = learning_rate
self.inner_learning_rate = inner_learning_rate
self.name = name
self._dataset_train = None
self._dataset_test = None
self._prev_params = None
self._adapted_param_values = None
# determine dimensionality of state and action space
self.obs_space_dims = obs_space_dims = env.observation_space.shape[0]
self.action_space_dims = action_space_dims = env.action_space.shape[0]
hidden_nonlinearity = self._activations[hidden_nonlinearity]
output_nonlinearity = self._activations[output_nonlinearity]
""" ------------------ Pre-Update Graph + Adaptation ----------------------- """
with tf.variable_scope(name):
# Placeholders
self.obs_ph = tf.placeholder(tf.float32, shape=(None, obs_space_dims))
self.act_ph = tf.placeholder(tf.float32, shape=(None, action_space_dims))
self.delta_ph = tf.placeholder(tf.float32, shape=(None, obs_space_dims))
# Concatenate action and observation --> NN input
self.nn_input = tf.concat([self.obs_ph, self.act_ph], axis=1)
# Create MLP
mlp = MLP(name,
output_dim=obs_space_dims,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
input_var=self.nn_input,
input_dim=obs_space_dims+action_space_dims)
self.delta_pred = mlp.output_var # shape: (batch_size, ndim_obs, n_models)
self.loss = tf.reduce_mean(tf.square(self.delta_ph - self.delta_pred))
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.adaptation_sym = tf.train.GradientDescentOptimizer(self.inner_learning_rate).minimize(self.loss)
# Tensor_utils
self.f_delta_pred = tensor_utils.compile_function([self.obs_ph, self.act_ph], self.delta_pred)
""" --------------------------- Meta-training Graph ---------------------------------- """
nn_input_per_task = tf.split(self.nn_input, self.meta_batch_size, axis=0)
delta_per_task = tf.split(self.delta_ph, self.meta_batch_size, axis=0)
pre_input_per_task, post_input_per_task = zip(*[tf.split(nn_input, 2, axis=0) for nn_input in nn_input_per_task])
pre_delta_per_task, post_delta_per_task = zip(*[tf.split(delta, 2, axis=0) for delta in delta_per_task])
pre_losses = []
post_losses = []
self._adapted_params = []
for idx in range(self.meta_batch_size):
with tf.variable_scope(name + '/pre_model_%d' % idx, reuse=tf.AUTO_REUSE):
pre_mlp = MLP(name,
output_dim=obs_space_dims,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
input_var=pre_input_per_task[idx],
input_dim=obs_space_dims + action_space_dims,
params=mlp.get_params())
pre_delta_pred = pre_mlp.output_var
pre_loss = tf.reduce_mean(tf.square(pre_delta_per_task[idx] - pre_delta_pred))
adapted_params = self._adapt_sym(pre_loss, pre_mlp.get_params())
self._adapted_params.append(adapted_params)
with tf.variable_scope(name + '/post_model_%d' % idx, reuse=tf.AUTO_REUSE):
post_mlp = MLP(name,
output_dim=obs_space_dims,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
input_var=post_input_per_task[idx],
params=adapted_params,
input_dim=obs_space_dims + action_space_dims)
post_delta_pred = post_mlp.output_var
post_loss = tf.reduce_mean(tf.square(post_delta_per_task[idx] - post_delta_pred))
pre_losses.append(pre_loss)
post_losses.append(post_loss)
self.pre_loss = tf.reduce_mean(pre_losses)
self.post_loss = tf.reduce_mean(post_losses)
self.train_op = optimizer(self.learning_rate).minimize(self.post_loss)
""" --------------------------- Post-update Inference Graph --------------------------- """
with tf.variable_scope(name + '_ph_graph'):
self.post_update_delta = []
self.network_phs_meta_batch = []
nn_input_per_task = tf.split(self.nn_input, self.meta_batch_size, axis=0)
for idx in range(meta_batch_size):
with tf.variable_scope('task_%i' % idx):
network_phs = self._create_placeholders_for_vars(mlp.get_params())
self.network_phs_meta_batch.append(network_phs)
mlp_meta_batch = MLP(name,
output_dim=obs_space_dims,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
params=network_phs,
input_var=nn_input_per_task[idx],
input_dim=obs_space_dims + action_space_dims,
)
self.post_update_delta.append(mlp_meta_batch.output_var)
self._networks = [mlp]
def fit(self, obs, act, obs_next, epochs=1000, compute_normalization=True,
valid_split_ratio=None, rolling_average_persitency=None, verbose=False, log_tabular=False):
assert obs.ndim == 3 and obs.shape[2] == self.obs_space_dims
assert obs_next.ndim == 3 and obs_next.shape[2] == self.obs_space_dims
assert act.ndim == 3 and act.shape[2] == self.action_space_dims
if valid_split_ratio is None: valid_split_ratio = self.valid_split_ratio
if rolling_average_persitency is None: rolling_average_persitency = self.rolling_average_persitency
assert 1 > valid_split_ratio >= 0
sess = tf.get_default_session()
if (self.normalization is None or compute_normalization) and self.normalize_input:
self.compute_normalization(obs, act, obs_next)
if self.normalize_input:
# Normalize data
obs, act, delta = self._normalize_data(obs, act, obs_next)
assert obs.ndim == act.ndim == obs_next.ndim == 3
else:
delta = obs_next - obs
# Split into valid and test set
obs_train, act_train, delta_train, obs_test, act_test, delta_test = train_test_split(obs, act, delta,
test_split_ratio=valid_split_ratio)
if self._dataset_test is None:
self._dataset_test = dict(obs=obs_test, act=act_test, delta=delta_test)
self._dataset_train = dict(obs=obs_train, act=act_train, delta=delta_train)
else:
self._dataset_test['obs'] = np.concatenate([self._dataset_test['obs'], obs_test])
self._dataset_test['act'] = np.concatenate([self._dataset_test['act'], act_test])
self._dataset_test['delta'] = np.concatenate([self._dataset_test['delta'], delta_test])
self._dataset_train['obs'] = np.concatenate([self._dataset_train['obs'], obs_train])
self._dataset_train['act'] = np.concatenate([self._dataset_train['act'], act_train])
self._dataset_train['delta'] = np.concatenate([self._dataset_train['delta'], delta_train])
valid_loss_rolling_average = None
epoch_times = []
""" ------- Looping over training epochs ------- """
num_steps_per_epoch = max(int(np.prod(self._dataset_train['obs'].shape[:2])
/ (self.meta_batch_size * self.batch_size * 2)), 1)
num_steps_test = max(int(np.prod(self._dataset_test['obs'].shape[:2])
/ (self.meta_batch_size * self.batch_size * 2)), 1)
for epoch in range(epochs):
# preparations for recording training stats
pre_batch_losses = []
post_batch_losses = []
t0 = time.time()
""" ------- Looping through the shuffled and batched dataset for one epoch -------"""
for _ in range(num_steps_per_epoch):
obs_batch, act_batch, delta_batch = self._get_batch(train=True)
pre_batch_loss, post_batch_loss, _ = sess.run([self.pre_loss, self.post_loss, self.train_op],
feed_dict={self.obs_ph: obs_batch,
self.act_ph: act_batch,
self.delta_ph: delta_batch})
pre_batch_losses.append(pre_batch_loss)
post_batch_losses.append(post_batch_loss)
valid_losses = []
for _ in range(num_steps_test):
obs_test, act_test, delta_test = self._get_batch(train=False)
# compute validation loss
feed_dict = {self.obs_ph: obs_test,
self.act_ph: act_test,
self.delta_ph: delta_test}
valid_loss = sess.run(self.loss, feed_dict=feed_dict)
valid_losses.append(valid_loss)
valid_loss = np.mean(valid_losses)
if valid_loss_rolling_average is None:
valid_loss_rolling_average = 1.5 * valid_loss # set initial rolling to a higher value avoid too early stopping
valid_loss_rolling_average_prev = 2 * valid_loss
if valid_loss < 0:
valid_loss_rolling_average = valid_loss/1.5 # set initial rolling to a higher value avoid too early stopping
valid_loss_rolling_average_prev = valid_loss/2
valid_loss_rolling_average = rolling_average_persitency*valid_loss_rolling_average \
+ (1.0-rolling_average_persitency)*valid_loss
epoch_times.append(time.time() - t0)
if verbose:
logger.log("Training DynamicsModel - finished epoch %i - "
"train loss: %.4f valid loss: %.4f valid_loss_mov_avg: %.4f epoch time: %.2f"
% (epoch, np.mean(post_batch_losses), valid_loss, valid_loss_rolling_average,
time.time() - t0))
if valid_loss_rolling_average_prev < valid_loss_rolling_average or epoch == epochs - 1:
logger.log('Stopping Training of Model since its valid_loss_rolling_average decreased')
break
valid_loss_rolling_average_prev = valid_loss_rolling_average
""" ------- Tabular Logging ------- """
if log_tabular:
logger.logkv('AvgModelEpochTime', np.mean(epoch_times))
logger.logkv('Post-Loss', np.mean(post_batch_losses))
logger.logkv('Pre-Loss', np.mean(pre_batch_losses))
logger.logkv('Epochs', epoch)
def predict(self, obs, act):
assert obs.shape[0] == act.shape[0]
assert obs.ndim == 2 and obs.shape[1] == self.obs_space_dims
assert act.ndim == 2 and act.shape[1] == self.action_space_dims
obs_original = obs
if self.normalize_input:
obs, act = self._normalize_data(obs, act)
delta = np.array(self._predict(obs, act))
delta = denormalize(delta, self.normalization['delta'][0], self.normalization['delta'][1])
else:
delta = np.array(self._predict(obs, act))
assert delta.ndim == 2
pred_obs = obs_original + delta
return pred_obs
def _predict(self, obs, act):
if self._adapted_param_values is not None:
sess = tf.get_default_session()
obs, act = self._pad_inputs(obs, act)
feed_dict = {self.obs_ph: obs, self.act_ph: act}
feed_dict.update(self.network_params_feed_dict)
delta = sess.run(self.post_update_delta[:self._num_adapted_models], feed_dict=feed_dict)
delta = np.concatenate(delta, axis=0)
else:
delta = self.f_delta_pred(obs, act)
return delta
def _pad_inputs(self, obs, act, obs_next=None):
if self._num_adapted_models < self.meta_batch_size:
pad = int(obs.shape[0] / self._num_adapted_models * (self.meta_batch_size - self._num_adapted_models))
obs = np.concatenate([obs, np.zeros((pad,) + obs.shape[1:])], axis=0)
act = np.concatenate([act, np.zeros((pad,) + act.shape[1:])], axis=0)
if obs_next is not None:
obs_next = np.concatenate([obs_next, np.zeros((pad,) + obs_next.shape[1:])], axis=0)
if obs_next is not None:
return obs, act, obs_next
else:
return obs, act
def adapt(self, obs, act, obs_next):
self._num_adapted_models = len(obs)
assert len(obs) == len(act) == len(obs_next)
obs = np.concatenate([np.concatenate([ob, np.zeros_like(ob)], axis=0) for ob in obs], axis=0)
act = np.concatenate([np.concatenate([a, np.zeros_like(a)], axis=0) for a in act], axis=0)
obs_next = np.concatenate([np.concatenate([ob, np.zeros_like(ob)], axis=0) for ob in obs_next], axis=0)
obs, act, obs_next = self._pad_inputs(obs, act, obs_next)
assert obs.shape[0] == act.shape[0] == obs_next.shape[0]
assert obs.ndim == 2 and obs.shape[1] == self.obs_space_dims
assert act.ndim == 2 and act.shape[1] == self.action_space_dims
assert obs_next.ndim == 2 and obs_next.shape[1] == self.obs_space_dims
if self.normalize_input:
# Normalize data
obs, act, delta = self._normalize_data(obs, act, obs_next)
assert obs.ndim == act.ndim == obs_next.ndim == 2
else:
delta = obs_next - obs
self._prev_params = [nn.get_param_values() for nn in self._networks]
sess = tf.get_default_session()
self._adapted_param_values = sess.run(self._adapted_params[:self._num_adapted_models],
feed_dict={self.obs_ph: obs, self.act_ph: act, self.delta_ph: delta})
def switch_to_pre_adapt(self):
if self._prev_params is not None:
[nn.set_params(params) for nn, params in zip(self._networks, self._prev_params)]
self._prev_params = None
self._adapted_param_values = None
def _get_batch(self, train=True):
if train:
num_paths, len_path = self._dataset_train['obs'].shape[:2]
idx_path = np.random.randint(0, num_paths, size=self.meta_batch_size)
idx_batch = np.random.randint(self.batch_size, len_path - self.batch_size, size=self.meta_batch_size)
obs_batch = np.concatenate([self._dataset_train['obs'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
act_batch = np.concatenate([self._dataset_train['act'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
delta_batch = np.concatenate([self._dataset_train['delta'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
else:
num_paths, len_path = self._dataset_test['obs'].shape[:2]
idx_path = np.random.randint(0, num_paths, size=self.meta_batch_size)
idx_batch = np.random.randint(self.batch_size, len_path - self.batch_size, size=self.meta_batch_size)
obs_batch = np.concatenate([self._dataset_test['obs'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
act_batch = np.concatenate([self._dataset_test['act'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
delta_batch = np.concatenate([self._dataset_test['delta'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
return obs_batch, act_batch, delta_batch
def _normalize_data(self, obs, act, obs_next=None):
obs_normalized = normalize(obs, self.normalization['obs'][0], self.normalization['obs'][1])
actions_normalized = normalize(act, self.normalization['act'][0], self.normalization['act'][1])
if obs_next is not None:
delta = obs_next - obs
deltas_normalized = normalize(delta, self.normalization['delta'][0], self.normalization['delta'][1])
return obs_normalized, actions_normalized, deltas_normalized
else:
return obs_normalized, actions_normalized
def compute_normalization(self, obs, act, obs_next):
assert obs.shape[0] == obs_next.shape[0] == act.shape[0]
assert obs.shape[1] == obs_next.shape[1] == act.shape[1]
delta = obs_next | |
""" metric.py is a small script to turn psuedo-c metric descriptions
into executable c code as needed by zettair. Usage:
metric.py [--debug | --help] metricname.metric
If you want to debug your metric, i suggest you insert printf statements,
as there's nothing stopping you doing so.
Our input format is this:
- comments are allowed at any point in the file, using either # at the
start of the comment and continuing to the end of the line. e.g.
# this is a comment
- a parameters section, where you provide whatever parameters the
metric needs by putting the word 'parameter ' before a c declaration of
the parameter, with a default if necessary. Each parameter declaration
must be on a single line. e.g.
parameter float k1;
- two functions, decode and post, declared as:
post() {
}
- these functions contain an initial declarations section, where
you can declare intermediate quantities in c declarations as needed. i.e.
decode() {
float w_t;
}
- a series of expressions and logic involving the parameters,
intermediate quantities, special quantities. To find out the
special quantities available, type python metric.py --help.
- note: you are almost certainly better off using the ternary
operator in decode calculations, as if statement processing is fragile
written nml 2004-12-15
"""
import getopt
import sys
import string
import operator
from time import strftime, gmtime
def indent(str, dent = 0, prestr = ' '):
'''Simple function to uniformly indent lines.'''
return '\n'.join(map(lambda x: (prestr * dent) + x, str.split('\n')))
def dedent(str, dent = 0):
"""Simple function to uniformly remove whitespace from the front of
lines."""
lines = str.split('\n')
try:
dent = reduce(min, map(lambda x: len(x) - len(x.lstrip()),
filter(lambda x: len(x.lstrip()) > 0, lines)))
except TypeError:
dent = 0
return '\n'.join(map(lambda x: len(x.lstrip()) > 0 and x[dent:] or x, lines))
def isop(char):
"""Simple function to determine whether a character is a c
operator character (not including semi-colon)."""
if ((char == '-') or (char == '+') or (char == ',') or (char == '=')
or (char == '(') or (char == ')') or (char == '?') or (char == ':')
or (char == '*') or (char == '/') or (char == '~') or (char == '!')
or (char == '^') or (char == '|') or (char == '&') or (char == '[')
or (char == ']') or (char == '{') or (char == '}') or (char == '%')
or (char == '<') or (char == '>')):
return 1
else:
return 0
def isdecl(word):
if (word == 'float' or word == 'double' or word == 'long'
or word == 'unsigned' or word == 'signed' or word == 'const'
or word == 'volatile' or word == 'int' or word == 'short'
or word == 'char' or word == 'register' or word == 'void'
or word == 'union' or word == 'struct' or word == '*'
or word == '**' or word == '***' or word == '****'):
return 1
else:
return 0
# function to filter whitespace tokens from ctok
def ctok_nspace(line):
toks, chars = ctok(line)
filtered = filter(lambda x: not x[0].isspace(), zip(toks, chars))
toks = map(lambda x: x[0], filtered)
chars = map(lambda x: x[1], filtered)
return toks, chars
def ctok(line):
toks = []
chars = []
char = 0
while (len(line)):
c = line[0]
schar = char
line = line[1:]
if (c.isspace()):
tok = c
while (len(line) and line[0].isspace()):
tok = tok + line[0]
line = line[1:]
char += 1
toks.append(tok)
chars.append(schar)
elif (isop(c) == 1):
tok = c
while (len(line) and isop(line[0])):
tok = tok + line[0]
line = line[1:]
char += 1
toks.append(tok)
chars.append(schar)
elif (c == ';'):
toks.append(';')
chars.append(schar)
else:
tok = c
while (len(line) and isop(line[0]) == 0
and line[0] != ';' and not line[0].isspace()):
tok = tok + line[0]
line = line[1:]
char += 1
toks.append(tok)
chars.append(schar)
char += 1
return toks, chars
def usage(progname, decode = None, post = None):
print 'usage: %s [--debug] metricfile templatefile' \
% progname
if (decode != None):
dkeys = decode.keys()
dkeys.sort()
print
print 'these quantities are available in decode routines:'
for d in dkeys:
print ' ', d + ':', decode[d].ex
if (post != None):
dkeys = post.keys()
dkeys.sort()
print
print 'these quantities are available in post routines:'
for d in dkeys:
print ' ', d + ':', post[d].ex
class Decl:
def __init__(self, lineno, name, type, init, level, macro,
comment, fninit, pre, contrib, ex):
self.pre = pre
self.lineno = lineno
self.name = name
self.type = type
self.init = init
# fn init is if we can't initialise by assignment, which happens
# sometimes (only available to built-in quantities)
self.fninit = fninit
self.level = level
self.macro = macro
self.comment = comment.strip()
# contrib is what happens if we have to calculate the
# contribution without a specific document
self.contrib = contrib
self.ex = ex
def __repr__(self):
return self.__str__()
def __str__(self):
return "type/name/init/fninit %s %s %s %s; lvl %u line %s macro %s comment %s pre %s" \
% (self.type, self.name, self.init, self.fninit, self.level, self.lineno, self.macro, self.comment, self.pre)
def ins_decl(namespaces, used, line, level, macro = '',
lineno = -1, fninit = '', pre = '', contrib = '', ex = ''):
# check for and remove trailing comment
comment = ''
pos = line.find('#')
if (pos > 0):
comment = line[pos + 1:]
line = line[0:pos]
pos = line.find('/*')
if (pos > 0):
pos2 = line.find('*/')
if (pos2 > 0):
pos2 += 2
comment = line[pos + 2:pos2 - 2]
line = line[0:pos] + line[pos2:]
else:
print '#line', lineno, '"' + args[0] + '"'
print '#error "multiline comment in declaration"'
toks, chars = ctok_nspace(line.rstrip())
type = ''
while (len(toks) > 0 and isdecl(toks[0]) == 1):
# this token is part of the type
# skip tag in struct and union
if (toks[0] == 'struct' or toks[0] == 'union'):
type += toks[0] + ' '
toks = toks[1:]
type += toks[0] + ' '
toks = toks[1:]
if (len(toks)):
# check for namespace collisions
for n in namespaces:
if (n.has_key(toks[0])):
# collision
print '#line', lineno, '"' + args[0] + '"'
print '#error "duplicate','declaration \'', line.rstrip(), '\'"'
sys.exit(2)
# check for and remove semi-colon
if (toks[-1] == ';'):
toks = toks[0:-1]
else:
print '#line', lineno, '"' + args[0] + '"'
print '#error "declaration without semi-colon ending"'
sys.exit(2)
# identify initialiser without tokenising (makes the spacing odd
# otherwise)
init = line[chars[len(chars) - len(toks)]:]
init = init[0:init.find(';')]
# identify quantities used
for t in toks[1:]:
for n in namespaces:
if (t in n):
for u in used:
u[t] = t
# accept declaration
for n in namespaces:
n[toks[0]] = Decl(lineno, toks[0], type.strip(),
init.strip(), level, macro, comment, fninit, pre, contrib, ex)
# identify quantities used in contrib (XXX: we only insert
# them in the first namespace/used combo - this is dodgy)
name = toks[0]
toks, chars = ctok_nspace(contrib)
for t in toks:
if (t in namespaces[0]):
used[0] = t
else:
# huh?
print '#line', lineno, '"' + args[0] + '"'
print '#error "error parsing declaration\'',\
line.rstrip(), '\'"'
sys.exit(2)
def process_decl(file, decl, decl_used, lineno):
"""Function to process declarations in post, decode sections.
Returns first line that isn't a declaration."""
# start with next line
line = file.readline()
lineno += 1
while (line != '' and line.rstrip() != '}'):
# in decl section
if (len(line.strip()) > 0 and line.strip()[0] != '#'):
toks, chars = ctok_nspace(line)
# tokenise line and process it
if (isdecl(toks[0]) == 1):
ins_decl([decl], [decl_used], line=line, level=1, macro='', lineno=lineno, contrib='', ex='user declared quantity')
else:
return [lineno, line]
# next line
line = file.readline()
lineno += 1
if (line == ''):
print '#line', lineno, '"' + args[0] + '"'
print '#error "unexpected EOF"'
sys.exit(2)
return [lineno, line]
def levelise(file, line, lineno, decl, list, used):
"""assign lines in a file | |
<filename>sc_projects/SC101/my_drawing/my_drawing.py
"""
File: my_drawing.py
Name: <NAME>
----------------------
This file print the logo of 'STARWARS' and may the force be with you
"""
from campy.graphics.gobjects import GRect, GPolygon
from campy.graphics.gwindow import GWindow
window = GWindow(width=600, height=362, title='STARWARS')
def main():
"""
Well, just draw all of the lines by GPolygon
"""
background = GRect(window.width, window.height, x=0, y=0)
background.filled = True
background.fill_color = 'black'
window.add(background)
# Drawing first S & T
poly_s1 = GPolygon()
poly_s1.add_vertex((116, 143))
poly_s1.add_vertex((116, 137))
poly_s1.add_vertex((26, 137))
poly_s1.add_vertex((26, 179))
poly_s1.add_vertex((116, 179))
poly_s1.add_vertex((116, 173))
poly_s1.add_vertex((32, 173))
poly_s1.add_vertex((32, 143))
poly_s1.filled = True
poly_s1.color = 'yellow'
poly_s1.fill_color = 'yellow'
window.add(poly_s1)
poly_s2 = GPolygon()
poly_s2.add_vertex((116, 137))
poly_s2.add_vertex((98, 114))
poly_s2.add_vertex((94, 107))
poly_s2.add_vertex((92, 99))
poly_s2.add_vertex((94, 90))
poly_s2.add_vertex((98, 81))
poly_s2.add_vertex((106, 71))
poly_s2.add_vertex((114, 66))
poly_s2.add_vertex((123, 65))
poly_s2.add_vertex((123, 71))
poly_s2.add_vertex((114, 74))
poly_s2.add_vertex((109, 78))
poly_s2.add_vertex((106, 83))
poly_s2.add_vertex((101, 90))
poly_s2.add_vertex((100, 98))
poly_s2.add_vertex((101, 107))
poly_s2.add_vertex((122, 133))
poly_s2.add_vertex((123, 135))
poly_s2.add_vertex((123, 138))
poly_s2.add_vertex((121, 141))
poly_s2.add_vertex((118, 143))
poly_s2.add_vertex((116, 143))
poly_s2.filled = True
poly_s2.color = 'yellow'
poly_s2.fill_color = 'yellow'
window.add(poly_s2)
poly_t1 = GPolygon()
poly_t1.add_vertex((123, 65))
poly_t1.add_vertex((290, 65))
poly_t1.add_vertex((290, 103))
poly_t1.add_vertex((240, 103))
poly_t1.add_vertex((240, 178))
poly_t1.add_vertex((197, 178))
poly_t1.add_vertex((197, 103))
poly_t1.add_vertex((145, 103))
poly_t1.add_vertex((145, 95))
poly_t1.add_vertex((204, 95))
poly_t1.add_vertex((204, 172))
poly_t1.add_vertex((233, 172))
poly_t1.add_vertex((233, 95))
poly_t1.add_vertex((282, 95))
poly_t1.add_vertex((282, 71))
poly_t1.add_vertex((123, 71))
poly_t1.filled = True
poly_t1.color = 'yellow'
poly_t1.fill_color = 'yellow'
window.add(poly_t1)
poly_s3 = GPolygon()
poly_s3.add_vertex((145, 95))
poly_s3.add_vertex((141, 97))
poly_s3.add_vertex((139, 100))
poly_s3.add_vertex((137, 103))
poly_s3.add_vertex((137, 105))
poly_s3.add_vertex((140, 109))
poly_s3.add_vertex((160, 136))
poly_s3.add_vertex((162, 139))
poly_s3.add_vertex((162, 143))
poly_s3.add_vertex((161, 149))
poly_s3.add_vertex((160, 155))
poly_s3.add_vertex((158, 160))
poly_s3.add_vertex((153, 165))
poly_s3.add_vertex((146, 170))
poly_s3.add_vertex((137, 171))
poly_s3.add_vertex((116, 173))
poly_s3.add_vertex((116, 179))
poly_s3.add_vertex((134, 178))
poly_s3.add_vertex((143, 178))
poly_s3.add_vertex((149, 176))
poly_s3.add_vertex((153, 174))
poly_s3.add_vertex((157, 171))
poly_s3.add_vertex((161, 168))
poly_s3.add_vertex((164, 164))
poly_s3.add_vertex((167, 158))
poly_s3.add_vertex((169, 153))
poly_s3.add_vertex((170, 146))
poly_s3.add_vertex((169, 140))
poly_s3.add_vertex((168, 135))
poly_s3.add_vertex((165, 130))
poly_s3.add_vertex((145, 102))
poly_s3.filled = True
poly_s3.color = 'yellow'
poly_s3.fill_color = 'yellow'
window.add(poly_s3)
# Drawing second S staring at x=491, y=257 (x+375 y+114)
x2 = 375
y2 = 118
poly_s4 = GPolygon()
poly_s4.add_vertex((116 + x2, 143 + y2))
poly_s4.add_vertex((116 + x2, 137 + y2))
poly_s4.add_vertex((78 + x2, 140 + y2))
poly_s4.add_vertex((78 + x2, 146 + y2))
poly_s4.filled = True
poly_s4.color = 'yellow'
poly_s4.fill_color = 'yellow'
window.add(poly_s4)
poly_s5 = GPolygon()
poly_s5.add_vertex((116 + x2, 137 + y2))
poly_s5.add_vertex((98 + x2, 114 + y2))
poly_s5.add_vertex((94 + x2, 107 + y2))
poly_s5.add_vertex((92 + x2, 99 + y2))
poly_s5.add_vertex((94 + x2, 90 + y2))
poly_s5.add_vertex((98 + x2, 81 + y2))
poly_s5.add_vertex((106 + x2, 71 + y2))
poly_s5.add_vertex((114 + x2, 66 + y2))
poly_s5.add_vertex((123 + x2, 65 + y2))
poly_s5.add_vertex((123 + x2, 71 + y2))
poly_s5.add_vertex((114 + x2, 74 + y2))
poly_s5.add_vertex((109 + x2, 78 + y2))
poly_s5.add_vertex((106 + x2, 83 + y2))
poly_s5.add_vertex((101 + x2, 90 + y2))
poly_s5.add_vertex((100 + x2, 98 + y2))
poly_s5.add_vertex((101 + x2, 107 + y2))
poly_s5.add_vertex((122 + x2, 133 + y2))
poly_s5.add_vertex((123 + x2, 135 + y2))
poly_s5.add_vertex((123 + x2, 138 + y2))
poly_s5.add_vertex((121 + x2, 141 + y2))
poly_s5.add_vertex((118 + x2, 143 + y2))
poly_s5.add_vertex((116 + x2, 143 + y2))
poly_s5.filled = True
poly_s5.color = 'yellow'
poly_s5.fill_color = 'yellow'
window.add(poly_s5)
poly_s6 = GPolygon()
poly_s6.add_vertex((145 + x2, 95 + y2))
poly_s6.add_vertex((141 + x2, 97 + y2))
poly_s6.add_vertex((139 + x2, 100 + y2))
poly_s6.add_vertex((137 + x2, 103 + y2))
poly_s6.add_vertex((137 + x2, 105 + y2))
poly_s6.add_vertex((140 + x2, 109 + y2))
poly_s6.add_vertex((160 + x2, 136 + y2))
poly_s6.add_vertex((162 + x2, 139 + y2))
poly_s6.add_vertex((162 + x2, 143 + y2))
poly_s6.add_vertex((161 + x2, 149 + y2))
poly_s6.add_vertex((160 + x2, 155 + y2))
poly_s6.add_vertex((158 + x2, 160 + y2))
poly_s6.add_vertex((153 + x2, 165 + y2))
poly_s6.add_vertex((146 + x2, 170 + y2))
poly_s6.add_vertex((137 + x2, 171 + y2))
poly_s6.add_vertex((101 + x2, 173 + y2))
poly_s6.add_vertex((101 + x2, 180 + y2))
poly_s6.add_vertex((134 + x2, 178 + y2))
poly_s6.add_vertex((143 + x2, 178 + y2))
poly_s6.add_vertex((149 + x2, 176 + y2))
poly_s6.add_vertex((153 + x2, 174 + y2))
poly_s6.add_vertex((157 + x2, 171 + y2))
poly_s6.add_vertex((161 + x2, 168 + y2))
poly_s6.add_vertex((164 + x2, 164 + y2))
poly_s6.add_vertex((167 + x2, 158 + y2))
poly_s6.add_vertex((169 + x2, 153 + y2))
poly_s6.add_vertex((170 + x2, 146 + y2))
poly_s6.add_vertex((169 + x2, 140 + y2))
poly_s6.add_vertex((168 + x2, 135 + y2))
poly_s6.add_vertex((165 + x2, 130 + y2))
poly_s6.add_vertex((145 + x2, 102 + y2))
poly_s6.filled = True
poly_s6.color = 'yellow'
poly_s6.fill_color = 'yellow'
window.add(poly_s6)
poly_s7 = GPolygon()
poly_s7.add_vertex((123 + x2, 65 + y2))
poly_s7.add_vertex((574, 65 + y2))
poly_s7.add_vertex((573, 222))
poly_s7.add_vertex((520, 222))
poly_s7.add_vertex((520, 213))
poly_s7.add_vertex((567, 213))
poly_s7.add_vertex((567, 189))
poly_s7.add_vertex((123 + x2, 71 + y2))
poly_s7.filled = True
poly_s7.color = 'yellow'
poly_s7.fill_color = 'yellow'
window.add(poly_s7)
# Drawing first A
poly_a1 = GPolygon()
poly_a1.add_vertex((311, 65))
poly_a1.add_vertex((368, 65))
poly_a1.add_vertex((408, 178))
poly_a1.add_vertex((363, 178))
poly_a1.add_vertex((360, 163))
poly_a1.add_vertex((320, 163))
poly_a1.add_vertex((316, 178))
poly_a1.add_vertex((272, 178))
poly_a1.add_vertex((311, 66))
poly_a1.add_vertex((316, 71))
poly_a1.add_vertex((282, 171))
poly_a1.add_vertex((311, 171))
poly_a1.add_vertex((315, 158))
poly_a1.add_vertex((366, 158))
poly_a1.add_vertex((368, 171))
poly_a1.add_vertex((399, 171))
poly_a1.add_vertex((363, 71))
poly_a1.add_vertex((316, 71))
poly_a1.filled = True
poly_a1.color = 'yellow'
poly_a1.fill_color = 'yellow'
window.add(poly_a1)
poly_a2 = GPolygon()
poly_a2.add_vertex((340, 86))
poly_a2.add_vertex((323, 135))
poly_a2.add_vertex((357, 135))
poly_a2.add_vertex((347, 128))
poly_a2.add_vertex((333, 128))
poly_a2.add_vertex((340, 107))
poly_a2.add_vertex((347, 128))
poly_a2.add_vertex((357, 135))
poly_a2.add_vertex((340, 86))
poly_a2.filled = True
poly_a2.color = 'yellow'
poly_a2.fill_color = 'yellow'
window.add(poly_a2)
# Drawing second A starting at x=219,y=184 (x-92 y+119)
x1 = -92
y1 = 119
poly_a3 = GPolygon()
poly_a3.add_vertex((311+x1, 65+y1))
poly_a3.add_vertex((368+x1, 65+y1))
poly_a3.add_vertex((408+x1, 178+y1))
poly_a3.add_vertex((363+x1, 178+y1))
poly_a3.add_vertex((360+x1, 163+y1))
poly_a3.add_vertex((320+x1, 163+y1))
poly_a3.add_vertex((316+x1, 178+y1))
poly_a3.add_vertex((272+x1, 178+y1))
poly_a3.add_vertex((311+x1, 66+y1))
poly_a3.add_vertex((316+x1, 71+y1))
poly_a3.add_vertex((282+x1, 171+y1))
poly_a3.add_vertex((311+x1, 171+y1))
poly_a3.add_vertex((315+x1, 158+y1))
poly_a3.add_vertex((366+x1, 158+y1))
poly_a3.add_vertex((368+x1, 171+y1))
poly_a3.add_vertex((399+x1, 171+y1))
poly_a3.add_vertex((363+x1, 71+y1))
poly_a3.add_vertex((316+x1, 71+y1))
poly_a3.filled = True
poly_a3.color = 'yellow'
poly_a3.fill_color = 'yellow'
window.add(poly_a3)
poly_a4 = GPolygon()
poly_a4.add_vertex((340+x1, 86+119))
poly_a4.add_vertex((323+x1, 135+119))
poly_a4.add_vertex((357+x1, 135+119))
poly_a4.add_vertex((347+x1, 128+119))
poly_a4.add_vertex((333+x1, 128+119))
poly_a4.add_vertex((340+x1, 107+119))
poly_a4.add_vertex((347+x1, 128+119))
poly_a4.add_vertex((357+x1, 135+119))
poly_a4.add_vertex((340+x1, 86+119))
poly_a4.filled = True
poly_a4.color = 'yellow'
poly_a4.fill_color = 'yellow'
window.add(poly_a4)
# Drawing first R
poly_r1 = GPolygon()
poly_r1.add_vertex((418, 65))
poly_r1.add_vertex((494, 65))
poly_r1.add_vertex((501, 66))
poly_r1.add_vertex((506, 67))
poly_r1.add_vertex((510, 68))
poly_r1.add_vertex((515, 70))
poly_r1.add_vertex((521, 75))
poly_r1.add_vertex((526, 80))
poly_r1.add_vertex((529, 85))
poly_r1.add_vertex((531, 89))
poly_r1.add_vertex((533, 94))
poly_r1.add_vertex((533, 100))
poly_r1.add_vertex((532, 106))
poly_r1.add_vertex((531, 111))
poly_r1.add_vertex((530, 118))
poly_r1.add_vertex((526, 124))
poly_r1.add_vertex((521, 129))
poly_r1.add_vertex((514, 134))
poly_r1.add_vertex((512, 136))
poly_r1.add_vertex((574, 138))
poly_r1.add_vertex((575, 178))
poly_r1.add_vertex((512, 178))
poly_r1.add_vertex((500, 177))
poly_r1.add_vertex((496, 176))
poly_r1.add_vertex((493, 175))
poly_r1.add_vertex((488, 173))
poly_r1.add_vertex((464, 150))
poly_r1.add_vertex((464, 178))
poly_r1.add_vertex((418, 178))
poly_r1.add_vertex((418, 65))
poly_r1.add_vertex((425, 72))
poly_r1.add_vertex((496, 72))
poly_r1.add_vertex((501, 73))
poly_r1.add_vertex((506, 74))
poly_r1.add_vertex((511, 76))
poly_r1.add_vertex((515, 80))
poly_r1.add_vertex((519, 83))
poly_r1.add_vertex((522, 87))
poly_r1.add_vertex((525, 92))
poly_r1.add_vertex((526, 98))
poly_r1.add_vertex((526, 105))
poly_r1.add_vertex((524, 111))
poly_r1.add_vertex((522, 117))
poly_r1.add_vertex((518, 122))
poly_r1.add_vertex((513, 126))
poly_r1.add_vertex((505, 131))
poly_r1.add_vertex((503, 132))
poly_r1.add_vertex((505, 136))
poly_r1.add_vertex((508, 139))
poly_r1.add_vertex((512, 142))
poly_r1.add_vertex((514, 143))
poly_r1.add_vertex((567, 144))
poly_r1.add_vertex((567, 171))
poly_r1.add_vertex((503, 171))
poly_r1.add_vertex((499, 170))
poly_r1.add_vertex((495, 167))
poly_r1.add_vertex((491, 164))
poly_r1.add_vertex((456, 134))
poly_r1.add_vertex((456, 171))
poly_r1.add_vertex((425, 170))
poly_r1.add_vertex((425, 72))
poly_r1.filled = True
poly_r1.color = 'yellow'
poly_r1.fill_color = 'yellow'
window.add(poly_r1)
poly_r2 = GPolygon()
poly_r2.add_vertex((456, 92))
poly_r2.add_vertex((482, 92))
poly_r2.add_vertex((487, 93))
poly_r2.add_vertex((491, 94))
poly_r2.add_vertex((495, 96))
poly_r2.add_vertex((497, 99))
poly_r2.add_vertex((498, 104))
poly_r2.add_vertex((497, 109))
poly_r2.add_vertex((495, 113))
poly_r2.add_vertex((491, 115))
poly_r2.add_vertex((487, 116))
poly_r2.add_vertex((456, 116))
poly_r2.add_vertex((456, 92))
poly_r2.add_vertex((463, 98))
poly_r2.add_vertex((482, 98))
poly_r2.add_vertex((488, 100))
poly_r2.add_vertex((490, 103))
poly_r2.add_vertex((491, 105))
poly_r2.add_vertex((488, 109))
poly_r2.add_vertex((482, 110))
poly_r2.add_vertex((463, 110))
poly_r2.add_vertex((463, 98))
poly_r2.filled = True
poly_r2.color = 'yellow'
poly_r2.fill_color = 'yellow'
window.add(poly_r2)
# Drawing second R starting at x=327,y=185 (x-91 y+120)
x3 = -91
y3 = 120
poly_r3 = GPolygon()
poly_r3.add_vertex((418+x3, 65+y3))
poly_r3.add_vertex((494+x3, 65+y3))
poly_r3.add_vertex((501+x3, 66+y3))
poly_r3.add_vertex((506+x3, 67+y3))
poly_r3.add_vertex((510+x3, 68+y3))
poly_r3.add_vertex((515+x3, 70+y3))
poly_r3.add_vertex((521+x3, 75+y3))
poly_r3.add_vertex((526+x3, 80+y3))
poly_r3.add_vertex((529+x3, 85+y3))
poly_r3.add_vertex((531+x3, 89+y3))
poly_r3.add_vertex((533+x3, 94+y3))
poly_r3.add_vertex((533+x3, 100+y3))
poly_r3.add_vertex((532+x3, 106+y3))
poly_r3.add_vertex((531+x3, 111+y3))
poly_r3.add_vertex((530+x3, 118+y3))
poly_r3.add_vertex((526+x3, 124+y3))
poly_r3.add_vertex((521+x3, 129+y3))
poly_r3.add_vertex((514+x3, 134+y3))
poly_r3.add_vertex((512+x3, 136+y3))
poly_r3.add_vertex((574+x3, 138+y3))
poly_r3.add_vertex((575+x3, 178+y3))
poly_r3.add_vertex((512+x3, 178+y3))
poly_r3.add_vertex((500+x3, 177+y3))
poly_r3.add_vertex((496+x3, 176+y3))
poly_r3.add_vertex((493+x3, 175+y3))
poly_r3.add_vertex((488+x3, 173+y3))
poly_r3.add_vertex((464+x3, 150+y3))
poly_r3.add_vertex((464+x3, 178+y3))
poly_r3.add_vertex((418+x3, 178+y3))
poly_r3.add_vertex((418+x3, 65+y3))
poly_r3.add_vertex((425+x3, 72+y3))
poly_r3.add_vertex((496+x3, 72+y3))
poly_r3.add_vertex((501+x3, 73+y3))
poly_r3.add_vertex((506+x3, 74+y3))
poly_r3.add_vertex((511+x3, 76+y3))
poly_r3.add_vertex((515+x3, 80+y3))
poly_r3.add_vertex((519+x3, 83+y3))
poly_r3.add_vertex((522+x3, 87+y3))
poly_r3.add_vertex((525+x3, 92+y3))
poly_r3.add_vertex((526+x3, 98+y3))
poly_r3.add_vertex((526+x3, 105+y3))
poly_r3.add_vertex((524+x3, 111+y3))
poly_r3.add_vertex((522+x3, 117+y3))
poly_r3.add_vertex((518+x3, 122+y3))
poly_r3.add_vertex((513+x3, 126+y3))
poly_r3.add_vertex((505+x3, 131+y3))
poly_r3.add_vertex((503+x3, 132+y3))
poly_r3.add_vertex((505+x3, 136+y3))
poly_r3.add_vertex((508+x3, 139+y3))
poly_r3.add_vertex((512+x3, 142+y3))
poly_r3.add_vertex((514+x3, 143+y3))
poly_r3.add_vertex((567+x3, 144+y3))
poly_r3.add_vertex((567+x3, 171+y3))
poly_r3.add_vertex((503+x3, 171+y3))
poly_r3.add_vertex((499+x3, 170+y3))
poly_r3.add_vertex((495+x3, 167+y3))
poly_r3.add_vertex((491+x3, 164+y3))
poly_r3.add_vertex((456+x3, 134+y3))
poly_r3.add_vertex((456+x3, 171+y3))
poly_r3.add_vertex((425+x3, 170+y3))
poly_r3.add_vertex((425+x3, 72+y3))
poly_r3.filled = True
poly_r3.color = 'yellow'
poly_r3.fill_color = 'yellow'
window.add(poly_r3)
poly_r4 = GPolygon()
poly_r4.add_vertex((456+x3, 92+y3))
poly_r4.add_vertex((482+x3, 92+y3))
poly_r4.add_vertex((487+x3, 93+y3))
poly_r4.add_vertex((491+x3, 94+y3))
poly_r4.add_vertex((495+x3, 96+y3))
poly_r4.add_vertex((497+x3, 99+y3))
poly_r4.add_vertex((498+x3, 104+y3))
poly_r4.add_vertex((497+x3, 109+y3))
poly_r4.add_vertex((495+x3, 113+y3))
poly_r4.add_vertex((491+x3, 115+y3))
poly_r4.add_vertex((487+x3, 116+y3))
poly_r4.add_vertex((456+x3, 116+y3))
poly_r4.add_vertex((456+x3, 92+y3))
poly_r4.add_vertex((463+x3, 98+y3))
poly_r4.add_vertex((482+x3, 98+y3))
poly_r4.add_vertex((488+x3, 100+y3))
poly_r4.add_vertex((490+x3, 103+y3))
poly_r4.add_vertex((491+x3, 105+y3))
poly_r4.add_vertex((488+x3, 109+y3))
poly_r4.add_vertex((482+x3, 110+y3))
poly_r4.add_vertex((463+x3, 110+y3))
poly_r4.add_vertex((463+x3, 98+y3))
poly_r4.filled = True
poly_r4.color = 'yellow'
poly_r4.fill_color = | |
<gh_stars>1-10
"""
`Cargo SQL shortcut functions for creating tables, models, role, etc`
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
The MIT License (MIT) © 2015 <NAME>
http://github.com/jaredlunde/cargo-orm
"""
import sys
from cargo.builder.casts import Cast
from cargo.builder.comments import Comment
from cargo.builder.databases import Database
from cargo.builder.domains import Domain
from cargo.builder.extensions import Extension
from cargo.builder.functions import Function
from cargo.builder.indexes import Index
from cargo.builder.operators import Operator
from cargo.builder.roles import Role, User
from cargo.builder.rules import Rule
from cargo.builder.schemas import Schema
from cargo.builder.sequences import Sequence
from cargo.builder.tables import Table
from cargo.builder.tablespaces import Tablespace
from cargo.builder.triggers import Trigger
from cargo.builder.types import Type, EnumType, RangeType
from cargo.builder.utils import *
from cargo.builder.views import View
__all__ = (
'create_cast',
'comment_on',
'create_database',
'create_domain',
'create_operator',
'create_extension',
'create_schema',
'create_index',
'create_sequence',
'create_function',
'create_table',
'create_type',
'create_range_type',
'create_enum_type',
'create_role',
'create_rule',
'create_tablespace',
'create_trigger',
'create_user',
'create_view'
)
def _cast_return(q, dry=False):
if not dry:
return q.execute()
return q
def comment_on(*args, dry=False, **kwargs):
""" `Create a Comment`
@orm: (:class:ORM)
@name: (#str) name of the table
@type: (#str) |COLUMN|, |TABLE|, |SCHEMA|, etc...
@identifier: (#str) if you're commenting on a cast for instance,
the identifier is |(source_type AS target_type)|, and on a
column |relation_name.column_name|.
See http://www.postgresql.org/docs/9.5/static/sql-comment.html
@comment: (#str) the comment content
"""
comment = Comment(*args, **kwargs)
return _cast_return(comment, dry)
def create_table(*args, dry=False, **kwargs):
""" `Create a Table`
@orm: (:class:ORM)
@name: (#str) name of the table
@*column: (:class:Field or :class:cargo.builders.tables.Column)
columns to add to the table with all constraints defined
within the object itself
@local: (#bool) |True| sets the |LOCAL| flag in
the |CREATE| clause
@temporary: (#bool) |True| sets the |TEMPORARY| flag in
the |CREATE| clause
@unlogged: (#bool) |True| sets the |UNLOGGED| flag in
the |CREATE| clause
@not_exists: (#bool) |True| sets the |IF NOT EXISTS| flag in
the |CREATE| clause
@storage_parameters: (#dict) |param_name: value|
@on_commit: (#str or :class:Clause) one of |PRESERVE ROWS|,
|DELETE ROWS|, |DROP|
@inherits: (#str or #tuple(#str)) name or names of the tables to
inherit from
@tablespace: (#str) name of the tablespace
@type_name: (#str) creates a typed table, which takes its
structure from the specified composite type
@like: (#str or :class:BaseExpression) specifies a table from which
the new table automatically copies all column names, their data
types, and their not-null constraints
@constraints: (#dict of {#str: #str or :class:BaseExpression})
pairs of |constraint_name: constraint_value| to add to the
table, e.g. |{check: (fielda > 10)}|
@**columns: (#list or #tuple) of |column_name=((opt_name, opt_val),)|
option #tuple pairs
@dry: (#bool) |True| to execute the query before returning
-> :class:BaseCreator if dry is |False|, otherwise the client cursor is
returned
"""
table = Table(*args, **kwargs)
return _cast_return(table, dry)
def create_extension(*args, dry=False, **kwargs):
""" @orm: (:class:ORM)
@name: (#str) name of the extension
@schema: (#str) schema name
@version: (#str) value for |VERSION| clause
@old_version: (#str) value for |FROM| clause
@not_exists: (#bool) True to include |IF NOT EXISTS| clause
@dry: (#bool) |True| to execute the query before returning
-> :class:BaseCreator if dry is |False|, otherwise the client cursor is
returned
"""
ext = Extension(*args, **kwargs)
return _cast_return(ext, dry)
def create_schema(orm, name, authorization=None, not_exists=True, dry=False):
""" @orm: (:class:ORM)
@name: (#str) name of the schema
@authorization: (#str) username to create a schema for
@not_exists: (#bool) adds |IF NOT EXISTS| clause to the statement
@dry: (#bool) |True| to execute the query before returning
-> :class:BaseCreator if dry is |False|, otherwise the client cursor is
returned
"""
schema = Schema(orm, name, authorization, not_exists)
return _cast_return(schema, dry)
def create_index(*args, dry=False, **kwargs):
""" @orm: (:class:ORM)
@field: (#str or :class:Field) the field to create an index on
@method: (#str) type of index to create
@name: (#str) name of the index, one will be autogenerated if not
given
@table: (#str) table to create the index on
@collate: (#str) collation for |COLLATE| clause
@order: (#str or :class:Clause) |ASC| or |DESC|
@nulls: (#str or :class:Clause) |FIRST| or |LAST
@unique: (#bool) True to create a unique index
@concurrent: (#bool) True to concurrently create the index
@buffering: (#bool) False to set |BUFFERING OFF| for
gist indexes
@fastupdate: (#bool) True to implement |FASTUPDATE ON| for
gin indexes
@fillfactor: (#int [10-100]) fillfactor for an index is a percentage
that determines how full the index method will try to pack
index pages
@tablespace: (#str) name of the tablespace to create the index in
@partial: (#str or :class:BaseExpression) sets the |WHERE| clause
for partial indexes
@dry: (#bool) |True| to execute the query before returning
-> :class:BaseCreator if dry is |False|, otherwise the client cursor is
returned
"""
index = Index(*args, **kwargs)
return _cast_return(index, dry)
def create_sequence(*args, dry=False, **kwargs):
""" @orm: (:class:ORM)
@name: (#str) name of the sequence
@incr: (#int) specifies which value is added to the current
sequence value to create a new value. A positive value will
make an ascending sequence, a negative one a descending sequence.
The default value is 1.
@minval: (#int) The optional clause MINVALUE minvalue determines the
minimum value a sequence can generate. If this clause is not
supplied or NO MINVALUE is specified, then defaults will be used.
The defaults are 1 and -263-1 for ascending and descending
sequences, respectively.
@start: (#int) allows the sequence to begin anywhere. The default
starting value is minvalue for ascending sequences and maxvalue
for descending ones.
@cache: (#int) specifies how many sequence numbers are to be
preallocated and stored in memory for faster access.
The minimum value is 1 (only one value can be generated at a
time, i.e., no cache), and this is also the default.
@cycle: (#bool) The CYCLE option allows the sequence to wrap around
when the maxvalue or minvalue has been reached by an ascending or
descending sequence respectively. If the limit is reached, the
next number generated will be the minvalue or maxvalue,
respectively.
@owned_by: (#str or :class:Field) The OWNED BY option causes the
sequence to be associated with a specific table column, such
that if that column (or its whole table) is dropped, the
sequence will be automatically dropped as well. The specified
table must have the same owner and be in the same schema as
the sequence. OWNED BY NONE, the default, specifies that there
is no such association.
@not_exists: (#bool) |True| to add |IF NOT EXISTS| clause
@dry: (#bool) |True| to execute the query before returning
-> :class:BaseCreator if dry is |False|, otherwise the client cursor is
returned
"""
sequence = Sequence(*args, **kwargs)
return _cast_return(sequence, dry)
def create_function(*args, dry=False, **kwargs):
""" @orm: (:class:ORM)
@function: (#str or :class:Function) name of the function with
arguments e.g. |func(arg1 int, arg2 text)| or
|Function('func', safe('arg1 int'), safe('arg2 text'))|
@arg: (#str or :class:BaseExpression) function arguments in the form of
|[argmode ] [ argname ] argtype [ { DEFAULT | = } default_expr|
@expression: (#str or :class:BaseExpression or :class:Query)
the function context. This is the expression that gets executed
when the function is called.
@returns: (#str) data type name
@language: (#str) name of the language that the function is
implemented in
@*opt: (#str) option flags to implement in the query after |LANGUAGE|
e.g. |WINDOW|
@**opts: |option_name=option_value| pairs to implement in the query
after |LANGUAGE| e.g. |cost=0.0025|
@dry: (#bool) |True| to execute the query before returning
-> :class:BaseCreator if dry is |False|, otherwise the client cursor is
returned
"""
function = Function(*args, **kwargs)
return _cast_return(function, dry)
def create_type(*args, dry=False, **kwargs):
""" @orm: (:class:ORM)
@name: (#str) name of the type
@*opt: (#str or :class:Clause) type options
* |PASSEDBYVALUE|
@attrs: (#tuple(#tuple)) #tuple pairs of |(attr_name, data_type, *opt)|
- creates types from attributes e.g.
|CREATE TYPE compfoo AS (f1 int, f2 text);|
@**opts: (#str or :class:Function) type options |name=value|
* INPUT = input_function,
* OUTPUT = output_function
* RECEIVE = receive_function
* SEND = send_function
* TYPMOD_IN = type_modifier_input_function
* TYPMOD_OUT = type_modifier_output_function
* ANALYZE = analyze_function
* INTERNALLENGTH = { internallength | VARIABLE }
* ALIGNMENT = alignment
* STORAGE = storage
* LIKE = like_type
* CATEGORY = category
* PREFERRED = preferred
* DEFAULT = default
* ELEMENT = element
* DELIMITER = delimiter
@dry: (#bool) |True| to execute the query before returning
-> :class:BaseCreator if dry is |False|, otherwise the client cursor is
returned
"""
type = Type(*args, **kwargs)
return _cast_return(type, dry)
def create_enum_type(orm, name, *types, dry=False):
""" @orm: (:class:ORM)
@name: (#str) the | |
<filename>coco.py
"""
Mask R-CNN
Configurations and data loading code for MS COCO.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
Edited by <NAME>
------------------------------------------------------------
Usage: import the module, or run from the command line as such:
# Train a new model from scratch
train --dataset=../Master_Thesis_GvA_project/data/4_external --model=none
# Train a new model starting from pre-trained COCO weights
TODO: fix COCO .pth to match dimensions of this network
CURRENTLY NOT WORKING: train --dataset=../Master_Thesis_GvA_project/data/4_external --model=coco
# Train a new model starting from ImageNet weights
train --dataset=../Master_Thesis_GvA_project/data/4_external --model=imagenet
# Continue training a model that you had trained earlier
train --dataset=../Master_Thesis_GvA_project/data/4_external --model=/path/to/weights.h5
# Continue training the last model you trained
train --dataset=../Master_Thesis_GvA_project/data/4_external --model=last
# Run COCO evaluation with validation set on last trained model
evaluate --dataset=../Master_Thesis_GvA_project/data/4_external --model=last --val_test=validation
# Run COCO evaluation with test set
evaluate --dataset=../Master_Thesis_GvA_project/data/4_external --model=last --val_test=test
# Close to deterministic behaviour by setting seed for both train and evaluate
--random=1
"""
import datetime
from distutils.util import strtobool
import os
import numpy as np
import pandas as pd
import pickle
import random
import sys
import time
import torch
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from config import Config
import utils
import model as modellib
import visualize
# Root directory of the project
ROOT_DIR = os.getcwd()
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "models/mask_rcnn_coco.pth")
IMAGENET_MODEL_PATH = os.path.join(ROOT_DIR, "models/resnet50_imagenet.pth")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = "2019"
############################################################
# Dataset
############################################################
class CocoDataset(utils.Dataset):
def load_coco(self, dataset_dir, subset, class_ids=None, return_coco=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (training, validation, test)
year: XX Not implemented. What dataset year to load (2014, 2017) as a string, not an integer
class_ids: If provided, only loads images that have the given classes.
class_map: XX Not implemented yet. Supports mapping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
"""
if subset == "training":
coco_file = COCO("{}/Extension_75_{}.json".format(dataset_dir, subset))
else:
coco_file = COCO("{}/GT_{}_(new-split).json".format(dataset_dir, subset))
# coco_file = COCO("{}/GT_{}_(new-split).json".format(dataset_dir, subset))
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco_file.getCatIds())
# All images or a subset?
if class_ids:
image_ids = []
for i in class_ids:
image_ids.extend(list(coco_file.getImgIds(catIds=[i])))
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco_file.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("PanorAMS", i, coco_file.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"PanorAMS", image_id=i,
path=coco_file.imgs[i]['file_name'],
width=coco_file.imgs[i]["width"],
height=coco_file.imgs[i]["height"],
annotations=coco_file.loadAnns(coco_file.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco_file
############################################################
# COCO Evaluation
############################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores):
"""Arrange results to match COCO specs in http://cocodataset.org/#format
rois: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
image_ids: [num_instances]
class_ids: [num_instances]
scores: (optional) confidence scores for each box
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "PanorAMS"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score
}
results.append(result)
return results
def evaluate_coco(model, dataset, coco, display, iou_thresh, val_test, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with validation data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
print("Running COCO evaluation on {} images.".format(len(image_ids)))
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[i]["id"] for i in image_ids]
t_prediction = 0
t_start = time.time()
# Retrieve noisy dataset
if display == "GT+noise":
noisy_dataset = utils.retrieve_boxes_csv("../Master_Thesis_GvA_project/data/4_external/"
"PanorAMS_boxes_lichtmast.csv", coco_image_ids)
results = []
columns = ["Image id", "Bbox", "Score"]
csv_results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image])
t_prediction += (time.time() - t)
if r:
r = r[0] # Unpack results
# Visualize result
if display == "True":
visualize.display_instances(image, r['rois'], r['class_ids'], dataset.class_names, r['scores'],
title="Detections in "+str(coco_image_ids[image_id]))
elif display == "GT":
gt_boxes, _ = utils.extract_bboxes_coco(dataset.image_info[image_id])
visualize.display_instances_gt(image, gt_boxes, r['rois'], r['class_ids'], r['scores'],
save_dir=model.log_dir.split('_')[1],
title="Detections + GT in "+str(coco_image_ids[image_id]))
elif display == "GT+noise":
gt_boxes, _ = utils.extract_bboxes_coco(dataset.image_info[image_id])
noisy_boxes = noisy_dataset.get(coco_image_ids[image_id])
noisy_boxes = noisy_boxes if noisy_boxes is not None else np.array([])
visualize.display_instances_gt_noise(image, gt_boxes, noisy_boxes, r['rois'], r['class_ids'],
r['scores'], save_dir=model.log_dir.split('_')[1],
title="Detections + GT + noise in "+str(coco_image_ids[image_id]))
# Convert results to COCO format
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"])
results.extend(image_results)
# Save in csv format
detections = r['rois'].tolist()
for j in range(len(detections)):
csv_results.append([coco_image_ids[i], detections[j], r["scores"][j]])
if len(results) == 0:
raise ValueError("No objects were detected...")
# Save csv
df = pd.DataFrame(csv_results, columns=columns)
df.to_csv(os.path.join(model.log_dir, "results-{}-IoU{},{}.csv".format(val_test, iou_thresh[0], iou_thresh[1])))
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.params.iouThrs = np.linspace(iou_thresh[0], iou_thresh[1],
int(np.round((iou_thresh[1] - iou_thresh[0]) / .05)) + 1, endpoint=True)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Prediction time: {}s. Average {}s/image".format(
round(t_prediction, 2), round(t_prediction / len(image_ids), 2)))
print("Total time: ", round(time.time() - t_start, 2), "s\n")
############################################################
# Training
############################################################
def find_last(models_dir):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
log_dir: The directory where events and weights are saved
checkpoint_path: the path to the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(models_dir))[1]
# key = self.config.NAME.lower()
# dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names, key=lambda f: f[-13:])
if not dir_names:
return None, None
# Pick last directory
dir_name = os.path.join(models_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
return dir_name, None
checkpoint = os.path.join(dir_name, checkpoints[-1])
return dir_name, checkpoint
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on a MS-COCO format dataset.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate'")
parser.add_argument('--dataset', required=True,
metavar="/path/to/coco/",
help='Directory of the MS-COCO format dataset')
parser.add_argument('--model', required=False,
metavar="/path/to/weights.pth",
help="Path to weights .pth file, 'none', 'last', or 'imagenet'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=500,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--val_test', required=False,
default='validation',
metavar='"validation" or "test"',
help="Evaluate with test or validation set (default=validation)")
parser.add_argument('--random', required=False,
default=None,
metavar='Any integer or leave empty',
help='Set random seed for consistent results. For randomness set to 0, leave empty or remove '
'argument (default=None)')
parser.add_argument('--schedule', required=False,
default='example',
metavar='"example", "all", "3+", "4+", "heads"',
help='specify training schedule (default=example)')
parser.add_argument('--display', required=False,
default=False,
metavar='"True" or anything else',
help='Whether to display detection results per image. Only works when "True".')
parser.add_argument('--iou', required=False,
default='[0.5,0.95]',
metavar='[min,max]',
help='List of two values with the minimum and maximum threshold for Intersection over Union '
'(IoU) scoring. Step size is 0.05')
args = parser.parse_args()
exec("args.iou=" + args.iou) # convert string to actual list
del parser
# Set random seed
if args.random:
args.random = int(args.random)
random.seed(args.random)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.random)
torch.cuda.manual_seed_all(args.random)
np.random.seed(args.random)
print("Random seed PyTorch, NumPy, and random set to {}".format(args.random))
# Select weights file to load
if isinstance(args.model, str):
model_command = args.model.lower()
if model_command == "last":
# Find last trained weights
model_dir, model_path = find_last(args.logs)
elif model_command[-3:] == 'pth':
model_path = args.model
model_dir = model_path.split(os.path.basename(model_path))[0]
elif model_command == "coco":
# Start from COCO trained weights - not working yet
model_path = COCO_MODEL_PATH
model_dir = os.path.join(model_path.split(os.path.basename(model_path))[0], model_command)
elif model_command == "imagenet":
# Start from ImageNet trained weights
model_path = IMAGENET_MODEL_PATH
model_dir = os.path.join(model_path.split(os.path.basename(model_path))[0], model_command)
else:
model_path = args.model
else:
model_path = ""
# Configurations
if args.command == "train" and args.model != "last":
config = Config()
# Add starting model name to log folder
if isinstance(args.model, | |
self.extrapolation_method = None
self.final_energy_index = None
self.geography = None
self.geography_map_key = None
self.input_type = None
self.interpolation_method = None
self.is_stock_dependent = None
self.other_index_1 = None
self.other_index_2 = None
self.subsector = None
self.unit = None
def set_args(self, scenario, demand_technology_index=None, driver_1=None, driver_2=None, driver_3=None,
driver_denominator_1=None, driver_denominator_2=None, extrapolation_growth=None,
extrapolation_method=None, final_energy_index=None, geography=None,
geography_map_key=None, input_type=None, interpolation_method=None,
is_stock_dependent=None, other_index_1=None, other_index_2=None, subsector=None,
unit=None):
self.check_scenario(scenario)
self.demand_technology_index = demand_technology_index
self.driver_1 = driver_1
self.driver_2 = driver_2
self.driver_3 = driver_3
self.driver_denominator_1 = driver_denominator_1
self.driver_denominator_2 = driver_denominator_2
self.extrapolation_growth = extrapolation_growth
self.extrapolation_method = extrapolation_method
self.final_energy_index = final_energy_index
self.geography = geography
self.geography_map_key = geography_map_key
self.input_type = input_type
self.interpolation_method = interpolation_method
self.is_stock_dependent = is_stock_dependent
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.subsector = subsector
self.unit = unit
def init_from_tuple(self, tup, scenario, **kwargs):
(subsector, is_stock_dependent, input_type, unit, driver_denominator_1,
driver_denominator_2, driver_1, driver_2, driver_3, geography, final_energy_index,
demand_technology_index, other_index_1, other_index_2, interpolation_method,
extrapolation_method, extrapolation_growth, geography_map_key,) = tup
self.set_args(scenario, demand_technology_index=demand_technology_index, driver_1=driver_1, driver_2=driver_2,
driver_3=driver_3, driver_denominator_1=driver_denominator_1,
driver_denominator_2=driver_denominator_2, extrapolation_growth=extrapolation_growth,
extrapolation_method=extrapolation_method, final_energy_index=final_energy_index,
geography=geography, geography_map_key=geography_map_key, input_type=input_type,
interpolation_method=interpolation_method, is_stock_dependent=is_stock_dependent,
other_index_1=other_index_1, other_index_2=other_index_2, subsector=subsector, unit=unit)
class DemandServiceEfficiency(DataObject):
_instances_by_key = {}
_table_name = "DemandServiceEfficiency"
_key_col = "subsector"
_cols = ["denominator_unit", "energy_unit", "extrapolation_growth", "extrapolation_method",
"geography", "geography_map_key", "interpolation_method", "other_index_1",
"other_index_2", "sensitivity", "subsector"]
_df_cols = ["gau", "value", "oth_2", "oth_1", "year", "final_energy"]
_df_filters = []
_data_table_name = None
def __init__(self, subsector, scenario):
DataObject.__init__(self, subsector, scenario)
DemandServiceEfficiency._instances_by_key[self._key] = self
self.denominator_unit = None
self.energy_unit = None
self.extrapolation_growth = None
self.extrapolation_method = None
self.geography = None
self.geography_map_key = None
self.interpolation_method = None
self.other_index_1 = None
self.other_index_2 = None
self.sensitivity = None
self.subsector = None
def set_args(self, scenario, denominator_unit=None, energy_unit=None, extrapolation_growth=None,
extrapolation_method=None, geography=None, geography_map_key=None,
interpolation_method=None, other_index_1=None, other_index_2=None, sensitivity=None,
subsector=None):
self.check_scenario(scenario)
self.denominator_unit = denominator_unit
self.energy_unit = energy_unit
self.extrapolation_growth = extrapolation_growth
self.extrapolation_method = extrapolation_method
self.geography = geography
self.geography_map_key = geography_map_key
self.interpolation_method = interpolation_method
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.sensitivity = sensitivity
self.subsector = subsector
def init_from_tuple(self, tup, scenario, **kwargs):
(subsector, energy_unit, denominator_unit, geography, other_index_1, other_index_2,
interpolation_method, extrapolation_method, extrapolation_growth, geography_map_key,
sensitivity,) = tup
self.set_args(scenario, denominator_unit=denominator_unit, energy_unit=energy_unit,
extrapolation_growth=extrapolation_growth, extrapolation_method=extrapolation_method,
geography=geography, geography_map_key=geography_map_key,
interpolation_method=interpolation_method, other_index_1=other_index_1,
other_index_2=other_index_2, sensitivity=sensitivity, subsector=subsector)
class DemandServiceLink(DataObject):
_instances_by_key = {}
_table_name = "DemandServiceLink"
_key_col = "name"
_cols = ["linked_subsector", "name", "service_demand_share", "subsector", "year"]
_df_cols = []
_df_filters = []
_data_table_name = None
def __init__(self, name, scenario):
DataObject.__init__(self, name, scenario)
DemandServiceLink._instances_by_key[self._key] = self
self.linked_subsector = None
self.name = None
self.service_demand_share = None
self.subsector = None
self.year = None
def set_args(self, scenario, linked_subsector=None, name=None, service_demand_share=None, subsector=None, year=None):
self.check_scenario(scenario)
self.linked_subsector = linked_subsector
self.name = name
self.service_demand_share = service_demand_share
self.subsector = subsector
self.year = year
def init_from_tuple(self, tup, scenario, **kwargs):
(name, subsector, linked_subsector, service_demand_share, year,) = tup
self.set_args(scenario, linked_subsector=linked_subsector, name=name, service_demand_share=service_demand_share,
subsector=subsector, year=year)
class DemandStock(DataObject):
_instances_by_key = {}
_table_name = "DemandStock"
_key_col = "subsector"
_cols = ["demand_stock_unit_type", "driver_1", "driver_2", "driver_3", "driver_denominator_1",
"driver_denominator_2", "extrapolation_growth", "extrapolation_method", "geography",
"geography_map_key", "input_type", "interpolation_method", "is_service_demand_dependent",
"other_index_1", "other_index_2", "specify_stocks_past_current_year", "subsector",
"time_unit", "unit"]
_df_cols = ["gau", "demand_technology", "value", "oth_2", "oth_1", "year", "sensitivity"]
_df_filters = []
_data_table_name = None
def __init__(self, subsector, scenario):
DataObject.__init__(self, subsector, scenario)
DemandStock._instances_by_key[self._key] = self
self.demand_stock_unit_type = None
self.driver_1 = None
self.driver_2 = None
self.driver_3 = None
self.driver_denominator_1 = None
self.driver_denominator_2 = None
self.extrapolation_growth = None
self.extrapolation_method = None
self.geography = None
self.geography_map_key = None
self.input_type = None
self.interpolation_method = None
self.is_service_demand_dependent = None
self.other_index_1 = None
self.other_index_2 = None
self.specify_stocks_past_current_year = None
self.subsector = None
self.time_unit = None
self.unit = None
def set_args(self, scenario, demand_stock_unit_type=None, driver_1=None, driver_2=None, driver_3=None,
driver_denominator_1=None, driver_denominator_2=None, extrapolation_growth=None,
extrapolation_method=None, geography=None, geography_map_key=None, input_type=None,
interpolation_method=None, is_service_demand_dependent=None, other_index_1=None,
other_index_2=None, specify_stocks_past_current_year=None, subsector=None,
time_unit=None, unit=None):
self.check_scenario(scenario)
self.demand_stock_unit_type = demand_stock_unit_type
self.driver_1 = driver_1
self.driver_2 = driver_2
self.driver_3 = driver_3
self.driver_denominator_1 = driver_denominator_1
self.driver_denominator_2 = driver_denominator_2
self.extrapolation_growth = extrapolation_growth
self.extrapolation_method = extrapolation_method
self.geography = geography
self.geography_map_key = geography_map_key
self.input_type = input_type
self.interpolation_method = interpolation_method
self.is_service_demand_dependent = is_service_demand_dependent
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.specify_stocks_past_current_year = specify_stocks_past_current_year
self.subsector = subsector
self.time_unit = time_unit
self.unit = unit
def init_from_tuple(self, tup, scenario, **kwargs):
(subsector, is_service_demand_dependent, driver_denominator_1, driver_denominator_2,
driver_1, driver_2, driver_3, geography, other_index_1, other_index_2, geography_map_key,
input_type, demand_stock_unit_type, unit, time_unit, interpolation_method,
extrapolation_method, extrapolation_growth, specify_stocks_past_current_year,) = tup
self.set_args(scenario, demand_stock_unit_type=demand_stock_unit_type, driver_1=driver_1, driver_2=driver_2,
driver_3=driver_3, driver_denominator_1=driver_denominator_1,
driver_denominator_2=driver_denominator_2, extrapolation_growth=extrapolation_growth,
extrapolation_method=extrapolation_method, geography=geography,
geography_map_key=geography_map_key, input_type=input_type,
interpolation_method=interpolation_method,
is_service_demand_dependent=is_service_demand_dependent, other_index_1=other_index_1,
other_index_2=other_index_2,
specify_stocks_past_current_year=specify_stocks_past_current_year, subsector=subsector,
time_unit=time_unit, unit=unit)
class DemandStockMeasures(DataObject):
_instances_by_key = {}
_table_name = "DemandStockMeasures"
_key_col = "name"
_cols = ["demand_technology", "extrapolation_growth", "extrapolation_method", "geography",
"interpolation_method", "name", "other_index_1", "subsector"]
_df_cols = ["gau", "oth_1", "value", "year"]
_df_filters = []
_data_table_name = None
def __init__(self, name, scenario):
DataObject.__init__(self, name, scenario)
DemandStockMeasures._instances_by_key[self._key] = self
self.demand_technology = None
self.extrapolation_growth = None
self.extrapolation_method = None
self.geography = None
self.interpolation_method = None
self.name = None
self.other_index_1 = None
self.subsector = None
def set_args(self, scenario, demand_technology=None, extrapolation_growth=None, extrapolation_method=None,
geography=None, interpolation_method=None, name=None, other_index_1=None, subsector=None):
self.check_scenario(scenario)
self.demand_technology = demand_technology
self.extrapolation_growth = extrapolation_growth
self.extrapolation_method = extrapolation_method
self.geography = geography
self.interpolation_method = interpolation_method
self.name = name
self.other_index_1 = other_index_1
self.subsector = subsector
def init_from_tuple(self, tup, scenario, **kwargs):
(name, subsector, geography, other_index_1, demand_technology, interpolation_method,
extrapolation_method, extrapolation_growth,) = tup
self.set_args(scenario, demand_technology=demand_technology, extrapolation_growth=extrapolation_growth,
extrapolation_method=extrapolation_method, geography=geography,
interpolation_method=interpolation_method, name=name, other_index_1=other_index_1,
subsector=subsector)
class DemandSubsectors(DataObject):
_instances_by_key = {}
_table_name = "DemandSubsectors"
_key_col = "name"
_cols = ["cost_of_capital", "is_active", "max_lag_hours", "max_lead_hours", "name", "sector",
"shape"]
_df_cols = []
_df_filters = []
_data_table_name = None
def __init__(self, name, scenario):
DataObject.__init__(self, name, scenario)
DemandSubsectors._instances_by_key[self._key] = self
self.cost_of_capital = None
self.is_active = None
self.max_lag_hours = None
self.max_lead_hours = None
self.name = None
self.sector = None
self.shape = None
def set_args(self, scenario, cost_of_capital=None, is_active=None, max_lag_hours=None, max_lead_hours=None, name=None,
sector=None, shape=None):
self.check_scenario(scenario)
self.cost_of_capital = cost_of_capital
self.is_active = is_active
self.max_lag_hours = max_lag_hours
self.max_lead_hours = max_lead_hours
self.name = name
self.sector = sector
self.shape = shape
def init_from_tuple(self, tup, scenario, **kwargs):
(name, sector, cost_of_capital, is_active, shape, max_lead_hours, max_lag_hours,) = tup
self.set_args(scenario, cost_of_capital=cost_of_capital, is_active=is_active, max_lag_hours=max_lag_hours,
max_lead_hours=max_lead_hours, name=name, sector=sector, shape=shape)
class DemandTechs(DataObject):
_instances_by_key = {}
_table_name = "DemandTechs"
_key_col = "name"
_cols = ["additional_description", "cost_of_capital", "demand_tech_unit_type", "lifetime_variance",
"linked", "max_lag_hours", "max_lead_hours", "max_lifetime", "mean_lifetime",
"min_lifetime", "name", "shape", "source", "stock_decay_function", "stock_link_ratio",
"subsector", "time_unit", "unit"]
_df_cols = []
_df_filters = []
_data_table_name = None
def __init__(self, name, scenario):
DataObject.__init__(self, name, scenario)
DemandTechs._instances_by_key[self._key] = self
self.additional_description = None
self.cost_of_capital = None
self.demand_tech_unit_type = None
self.lifetime_variance = None
self.linked = None
self.max_lag_hours = None
self.max_lead_hours = None
self.max_lifetime = None
self.mean_lifetime = None
self.min_lifetime = None
self.name = None
self.shape = None
self.source = None
self.stock_decay_function = None
self.stock_link_ratio = None
self.subsector = None
self.time_unit = None
self.unit = None
def set_args(self, scenario, additional_description=None, cost_of_capital=None, demand_tech_unit_type=None,
lifetime_variance=None, linked=None, max_lag_hours=None, max_lead_hours=None,
max_lifetime=None, mean_lifetime=None, min_lifetime=None, name=None, shape=None,
source=None, stock_decay_function=None, stock_link_ratio=None, subsector=None,
time_unit=None, unit=None):
self.check_scenario(scenario)
self.additional_description = additional_description
self.cost_of_capital = cost_of_capital
self.demand_tech_unit_type = demand_tech_unit_type
self.lifetime_variance = lifetime_variance
self.linked = linked
self.max_lag_hours = max_lag_hours
self.max_lead_hours = max_lead_hours
self.max_lifetime = max_lifetime
self.mean_lifetime = mean_lifetime
self.min_lifetime = min_lifetime
self.name = name
self.shape = shape
self.source = source
self.stock_decay_function = stock_decay_function
self.stock_link_ratio = stock_link_ratio
self.subsector = subsector
self.time_unit = time_unit
self.unit = unit
def init_from_tuple(self, tup, scenario, **kwargs):
(name, linked, stock_link_ratio, subsector, min_lifetime, max_lifetime, source,
additional_description, demand_tech_unit_type, unit, time_unit, cost_of_capital,
stock_decay_function, mean_lifetime, lifetime_variance, shape, max_lead_hours,
max_lag_hours,) = tup
self.set_args(scenario, additional_description=additional_description, cost_of_capital=cost_of_capital,
demand_tech_unit_type=demand_tech_unit_type, lifetime_variance=lifetime_variance,
linked=linked, max_lag_hours=max_lag_hours, max_lead_hours=max_lead_hours,
max_lifetime=max_lifetime, mean_lifetime=mean_lifetime, min_lifetime=min_lifetime,
name=name, shape=shape, source=source, stock_decay_function=stock_decay_function,
stock_link_ratio=stock_link_ratio, subsector=subsector, time_unit=time_unit, unit=unit)
class DemandTechsAuxEfficiency(DataObject):
_instances_by_key = {}
_table_name = "DemandTechsAuxEfficiency"
_key_col = "demand_technology"
_cols = ["age_growth_or_decay", "age_growth_or_decay_type", "definition",
"demand_tech_efficiency_types", "demand_technology", "denominator_unit",
"extrapolation_growth", "extrapolation_method", "final_energy", "geography",
"interpolation_method", "is_numerator_service", "numerator_unit", "other_index_1",
"other_index_2", "reference_tech", "shape"]
_df_cols = ["vintage", "gau", "value", "oth_2", "oth_1", "sensitivity"]
_df_filters = []
_data_table_name = None
def __init__(self, demand_technology, scenario):
DataObject.__init__(self, demand_technology, scenario)
DemandTechsAuxEfficiency._instances_by_key[self._key] = self
self.age_growth_or_decay = None
self.age_growth_or_decay_type = None
self.definition = None
self.demand_tech_efficiency_types = None
self.demand_technology = None
self.denominator_unit = None
self.extrapolation_growth = None
self.extrapolation_method = None
self.final_energy = None
self.geography = None
self.interpolation_method = None
self.is_numerator_service = None
self.numerator_unit = None
self.other_index_1 = None
self.other_index_2 = None
self.reference_tech = None
self.shape = None
def set_args(self, scenario, age_growth_or_decay=None, age_growth_or_decay_type=None, definition=None,
demand_tech_efficiency_types=None, demand_technology=None, denominator_unit=None,
extrapolation_growth=None, extrapolation_method=None, final_energy=None, geography=None,
interpolation_method=None, is_numerator_service=None, numerator_unit=None,
other_index_1=None, other_index_2=None, reference_tech=None, shape=None):
self.check_scenario(scenario)
self.age_growth_or_decay = age_growth_or_decay
self.age_growth_or_decay_type = age_growth_or_decay_type
self.definition = definition
self.demand_tech_efficiency_types = demand_tech_efficiency_types
self.demand_technology = demand_technology
self.denominator_unit = denominator_unit
self.extrapolation_growth = extrapolation_growth
self.extrapolation_method = extrapolation_method
self.final_energy = final_energy
self.geography = geography
self.interpolation_method = interpolation_method
self.is_numerator_service = is_numerator_service
self.numerator_unit = numerator_unit
self.other_index_1 = other_index_1
self.other_index_2 | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['BackendAddressPoolArgs', 'BackendAddressPool']
@pulumi.input_type
class BackendAddressPoolArgs:
def __init__(__self__, *,
loadbalancer_id: pulumi.Input[str],
backend_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['BackendAddressPoolBackendAddressArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a BackendAddressPool resource.
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Backend Address Pool.
:param pulumi.Input[str] name: Specifies the name of the Backend Address Pool.
"""
pulumi.set(__self__, "loadbalancer_id", loadbalancer_id)
if backend_addresses is not None:
warnings.warn("""This field is non-functional and will be removed in version 3.0 of the Azure Provider - use the separate `azurerm_lb_backend_address_pool_address` resource instead.""", DeprecationWarning)
pulumi.log.warn("""backend_addresses is deprecated: This field is non-functional and will be removed in version 3.0 of the Azure Provider - use the separate `azurerm_lb_backend_address_pool_address` resource instead.""")
if backend_addresses is not None:
pulumi.set(__self__, "backend_addresses", backend_addresses)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
warnings.warn("""This field is no longer used and will be removed in the next major version of the Azure Provider""", DeprecationWarning)
pulumi.log.warn("""resource_group_name is deprecated: This field is no longer used and will be removed in the next major version of the Azure Provider""")
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter(name="loadbalancerId")
def loadbalancer_id(self) -> pulumi.Input[str]:
"""
The ID of the Load Balancer in which to create the Backend Address Pool.
"""
return pulumi.get(self, "loadbalancer_id")
@loadbalancer_id.setter
def loadbalancer_id(self, value: pulumi.Input[str]):
pulumi.set(self, "loadbalancer_id", value)
@property
@pulumi.getter(name="backendAddresses")
def backend_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendAddressPoolBackendAddressArgs']]]]:
return pulumi.get(self, "backend_addresses")
@backend_addresses.setter
def backend_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendAddressPoolBackendAddressArgs']]]]):
pulumi.set(self, "backend_addresses", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Backend Address Pool.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@pulumi.input_type
class _BackendAddressPoolState:
def __init__(__self__, *,
backend_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['BackendAddressPoolBackendAddressArgs']]]] = None,
backend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
load_balancing_rules: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
loadbalancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
outbound_rules: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering BackendAddressPool resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] backend_ip_configurations: The Backend IP Configurations associated with this Backend Address Pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] load_balancing_rules: The Load Balancing Rules associated with this Backend Address Pool.
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Backend Address Pool.
:param pulumi.Input[str] name: Specifies the name of the Backend Address Pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] outbound_rules: An array of the Load Balancing Outbound Rules associated with this Backend Address Pool.
"""
if backend_addresses is not None:
warnings.warn("""This field is non-functional and will be removed in version 3.0 of the Azure Provider - use the separate `azurerm_lb_backend_address_pool_address` resource instead.""", DeprecationWarning)
pulumi.log.warn("""backend_addresses is deprecated: This field is non-functional and will be removed in version 3.0 of the Azure Provider - use the separate `azurerm_lb_backend_address_pool_address` resource instead.""")
if backend_addresses is not None:
pulumi.set(__self__, "backend_addresses", backend_addresses)
if backend_ip_configurations is not None:
pulumi.set(__self__, "backend_ip_configurations", backend_ip_configurations)
if load_balancing_rules is not None:
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
if loadbalancer_id is not None:
pulumi.set(__self__, "loadbalancer_id", loadbalancer_id)
if name is not None:
pulumi.set(__self__, "name", name)
if outbound_rules is not None:
pulumi.set(__self__, "outbound_rules", outbound_rules)
if resource_group_name is not None:
warnings.warn("""This field is no longer used and will be removed in the next major version of the Azure Provider""", DeprecationWarning)
pulumi.log.warn("""resource_group_name is deprecated: This field is no longer used and will be removed in the next major version of the Azure Provider""")
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter(name="backendAddresses")
def backend_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendAddressPoolBackendAddressArgs']]]]:
return pulumi.get(self, "backend_addresses")
@backend_addresses.setter
def backend_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendAddressPoolBackendAddressArgs']]]]):
pulumi.set(self, "backend_addresses", value)
@property
@pulumi.getter(name="backendIpConfigurations")
def backend_ip_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The Backend IP Configurations associated with this Backend Address Pool.
"""
return pulumi.get(self, "backend_ip_configurations")
@backend_ip_configurations.setter
def backend_ip_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "backend_ip_configurations", value)
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The Load Balancing Rules associated with this Backend Address Pool.
"""
return pulumi.get(self, "load_balancing_rules")
@load_balancing_rules.setter
def load_balancing_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancing_rules", value)
@property
@pulumi.getter(name="loadbalancerId")
def loadbalancer_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Load Balancer in which to create the Backend Address Pool.
"""
return pulumi.get(self, "loadbalancer_id")
@loadbalancer_id.setter
def loadbalancer_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "loadbalancer_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Backend Address Pool.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="outboundRules")
def outbound_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of the Load Balancing Outbound Rules associated with this Backend Address Pool.
"""
return pulumi.get(self, "outbound_rules")
@outbound_rules.setter
def outbound_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "outbound_rules", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
class BackendAddressPool(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendAddressPoolBackendAddressArgs']]]]] = None,
loadbalancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Load Balancer Backend Address Pool.
> **NOTE:** When using this resource, the Load Balancer needs to have a FrontEnd IP Configuration Attached
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_public_ip = azure.network.PublicIp("examplePublicIp",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
allocation_method="Static")
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
example_backend_address_pool = azure.lb.BackendAddressPool("exampleBackendAddressPool", loadbalancer_id=example_load_balancer.id)
```
## Import
Load Balancer Backend Address Pools can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:lb/backendAddressPool:BackendAddressPool example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Backend Address Pool.
:param pulumi.Input[str] name: Specifies the name of the Backend Address Pool.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BackendAddressPoolArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Load Balancer Backend Address Pool.
> **NOTE:** When using this resource, the Load Balancer needs to have a FrontEnd IP Configuration Attached
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_public_ip = azure.network.PublicIp("examplePublicIp",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
allocation_method="Static")
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
example_backend_address_pool = azure.lb.BackendAddressPool("exampleBackendAddressPool", loadbalancer_id=example_load_balancer.id)
```
## Import
Load Balancer Backend Address Pools can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:lb/backendAddressPool:BackendAddressPool example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1
```
:param str resource_name: The name of the resource.
:param BackendAddressPoolArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BackendAddressPoolArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackendAddressPoolBackendAddressArgs']]]]] = None,
loadbalancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BackendAddressPoolArgs.__new__(BackendAddressPoolArgs)
if backend_addresses is not None and not opts.urn:
warnings.warn("""This field is non-functional and will be removed in version 3.0 of the Azure Provider - use the separate `azurerm_lb_backend_address_pool_address` resource instead.""", DeprecationWarning)
pulumi.log.warn("""backend_addresses is deprecated: This field is non-functional and will be removed in version 3.0 of the Azure Provider - use the separate `azurerm_lb_backend_address_pool_address` resource instead.""")
__props__.__dict__["backend_addresses"] = backend_addresses
if loadbalancer_id is None and not opts.urn:
raise TypeError("Missing required property 'loadbalancer_id'")
__props__.__dict__["loadbalancer_id"] = loadbalancer_id
__props__.__dict__["name"] = name
if resource_group_name is not None and not opts.urn:
warnings.warn("""This field is no longer used and will be removed in the next major version of the Azure Provider""", DeprecationWarning)
pulumi.log.warn("""resource_group_name | |
<filename>app/caffe_detector.py
#!/usr/bin/env python
import argparse
import os
import sys
import time
import glob
import datetime
import numpy as np
import json
import caffe
from caffe.proto import caffe_pb2
import multiprocessing as mp
import gdal
from gdalconst import *
import osr
import logging as log
import re
from string import Template
from functools import partial
import zipfile
import traceback
import ast
import scipy.misc
# For profiling the code
#from profilehooks import profile
# log.getLogger().setLevel(log.DEBUG)
# Suppress most caffe output
os.environ['GLOG_minloglevel'] = '2'
POLYGON_TEMPLATE = Template("POLYGON (($left $bottom, $left $top, $right $top, $right $bottom, $left $bottom))")
LABEL_ID_REGEX = re.compile('^n\d+\s*')
BASE_DIR='/mnt/work/'
INPUT_DIR_PATH = BASE_DIR+'input'
DEFAULT_STATUS_JSON_PATH = BASE_DIR+'status.json'
OUTPUT_VECTORS_DIR_PATH = BASE_DIR+'output/result/detections'
OUTPUT_VECTORS_FILE = 'detection-results.json'
OUTPUT_VECTORS_ZIP_PATH = BASE_DIR+'output/result/detections.zip'
RASTER_DIM = 252 + 28
RASTER_STRIDE = RASTER_DIM - 28
DEFAULT_THRESHOLD = 80.0
DEFAULT_WIN_SIZE = 150
DEFAULT_STEP_SIZE = 30
DEFAULT_MIN_PYRAMID_SIZE = 30
DEFAULT_PYRAMID_SCALE_FACTOR = 1.5
DEFAULT_GPU_FLAG = 'False'
DEFAULT_NUM_PROCESSES = 1
STATUS_SUCCESS = "success"
STATUS_FAILED = "failed"
status_dict = {
'status': STATUS_SUCCESS,
'reason': "Detection succeeded"
}
TIF_SUFFIX = ".tif"
CAFFEMODEL_SUFFIX = ".caffemodel"
DEPLOY_FILE_SUFFIX = "deploy.prototxt"
MEAN_FILE_SUFFIX = "mean.binaryproto"
LABELS_FILE_SUFFIX = "labels.txt"
CAFFE_GPU_BATCH_SIZE= 80
CAFFE_CPU_BATCH_SIZE= 40
def caffe_ms_band_window_transform(window, transformed_size, transformed_window, mean):
"""
@param window a (width, height, num_channels) numpy array dtype=int or float
@param transformed_size A tuple (trans_width, trans_height) representing the size of the transfomred image
@param transformed window (num_channels, trans_width, trans_height) numpy array dtype must be float (32 or 64)
"""
for i_band in range(window.shape[0]):
transformed_window[i_band,:,:] = scipy.misc.imresize(window[i_band,:,:], transformed_size, interp='bilinear')
transformed_window[i_band,:,:] -= mean[i_band]
def caffe_window_transform_bw(window, transformed_size, transformed_window_bw, mean):
"""
@param window a (width, height, num_channels) numpy array dtype=int or float
@param transformed_size A tuple (trans_width, trans_height) representing the size of the transfomred image
@param transformed window (num_channels, trans_width, trans_height) numpy array dtype must be float (32 or 64)
"""
# Convert RGB to black and white
# ITU-R 601-2 luma transform:
# R*299./1000 + G*587./1000 + B*114./1000 (to match PIL.Image)
luma_coefs = [.299, .587, .114]
num_channels = window.shape[2]
transformed_window = np.zeros(( (num_channels,) + transformed_size), dtype=np.float32)
if num_channels == 3:
transformed_window_bw[0,:,:] = 0.
for i_band in range(window.shape[0]):
transformed_window_bw[0,:,:] += luma_coefs[i_band]*scipy.misc.imresize(window[i_band,:,:], transformed_size, interp='bilinear')
else:
transformed_window_bw[0,:,:] = scipy.misc.imresize(window[i_band,:,:], transformed_size, interp='bilinear')
# Subtract the mean
transformed_window_bw -= mean[0]
class GDALImage:
def __init__(self, imagefile, tilewidth=256, tileheight=256, strideX=None, strideY=None, bands=None, padWithZeros=False):
self.imagefile = imagefile
self.tilewidth = tilewidth
self.tileheight = tileheight
# Open dataset
self.dataset = gdal.Open(self.imagefile, gdal.GA_ReadOnly)
self.nbands = self.dataset.RasterCount
self.width = self.dataset.RasterXSize
self.height = self.dataset.RasterYSize
self.geoTransform = self.dataset.GetGeoTransform()
self.projRef = self.dataset.GetProjectionRef()
self.datatype = self.dataset.GetRasterBand(1).DataType
self.isByteImage = ( self.datatype == GDT_Byte )
self.padWithZeros = padWithZeros
self.strideX = strideX
if strideX == None:
self.strideX = self.tilewidth
self.strideY = strideY
if strideY == None:
self.strideY = self.tileheight
# Set up projections
self.spr = osr.SpatialReference( self.projRef )
self.geospr = self.spr.CloneGeogCS()
self.coordTfProjToGeo = osr.CoordinateTransformation( self.spr, self.geospr )
self.coordTfGeoToProj = osr.CoordinateTransformation( self.geospr, self.spr )
# Set up boundingBox
self.bb_x0 = 0
self.bb_y0 = 0
self.bb_x1 = self.width
self.bb_y1 = self.height
self.bands = []
if not bands is None:
# Verify that the bands are all less than the number of bands
for i_band in bands:
if i_band < self.nbands:
self.bands.append(i_band)
else:
error_msg = "Error: band {} not in image {}".format(str(i_band), imagefile)
log.error(error_msg)
raise RuntimeError(error_msg, e)
else:
self.bands = [i for i in range(self.nbands)]
# Convert to immutable tuple
self.bands = tuple(self.bands)
def setBoundingBox(self,x0,y0,x1,y1):
self.bb_x0 = int ( max( 0, x0 ) )
self.bb_y0 = int ( max( 0, y0 ) )
self.bb_x1 = int ( min( x1, self.width ) )
self.bb_y1 = int ( min( y1, self.height ) )
def setGeoBoundingBox(self,lon_ul,lat_ul,lon_lr,lat_lr):
x0,y0 = self.tfGeoToRaster(lon_ul, lat_ul)
x1,y1 = self.tfGeoToRaster(lon_lr, lat_lr)
self.setBoundingBox( min(x0,x1), min(y0,y1), max(x0,x1), max (y0,y1) )
def __str__(self):
return "%d,%d,%d" % ( self.width, self.height, self.nbands)
def nextTile(self):
y0 = self.bb_y0
while y0 < self.bb_y1:
x0 = self.bb_x0
y1 = min ( y0+self.tileheight, self.bb_y1 )
while x0 < self.bb_x1:
x1 = min ( x0+self.tilewidth, self.bb_x1 )
yield x0, y0, x1, y1
x0 = x0 + self.strideX
y0 = y0 + self.strideY
def nextDataTile(self):
for x0, y0, x1, y1 in self.nextTile():
yield self.readTile(x0, y0, x1, y1), x0, y0
def readTile(self, x0, y0, x1, y1):
data = self.dataset.ReadAsArray(x0, y0, x1-x0, y1-y0)
if len(data.shape) == 2: # only one band - extend to 3-dim
data = np.reshape(data, (1, data.shape[0], data.shape[1]))
else:
data = data[self.bands,:,:]
if self.padWithZeros:
if ( data.shape[1] < self.tileheight or data.shape[2] < self.tilewidth ):
tile = np.zeros( ( data.shape[0], self.tileheight, self.tilewidth), dtype=data.dtype )
tile[:,0:data.shape[1],0:data.shape[2]] = data[:,:,:]
data = tile
return data
def tfRasterToProj(self, x,y):
dfGeoX = self.geoTransform[0] + self.geoTransform[1] * x + self.geoTransform[2] * y;
dfGeoY = self.geoTransform[3] + self.geoTransform[4] * x + self.geoTransform[5] * y;
return dfGeoX, dfGeoY
def tfProjToRaster(self, projX, projY):
x = ( self.geoTransform[5] * ( projX - self.geoTransform[0] ) - self.geoTransform[2] * ( projY - self.geoTransform[3] ) ) / ( self.geoTransform[5] * self.geoTransform[1] + self.geoTransform[4] * self.geoTransform[2] )
y = (projY - self.geoTransform[3] - x*self.geoTransform[4] ) / self.geoTransform[5]
return x,y
def tfProjToGeo(self, projx, projy):
return self.coordTfProjToGeo.TransformPoint(projx, projy)
def tfGeoToProj(self, longitude, latitude):
return self.coordTfGeoToProj.TransformPoint(longitude, latitude)
def tfGeoToRaster(self, longitude, latitude):
proj = self.tfGeoToProj(longitude, latitude)
return self.tfProjToRaster(proj[0], proj[1])
def tfRasterToGeo(self,x,y):
proj = self.tfRasterToProj(x, y)
return self.tfProjToGeo( proj[0], proj[1] )
class PyramidWindowBatcher(object):
def __init__(self, pyramid, num_channels, window_shape, num_windows, max_batch_size=4096, mult_size=256, transform=caffe_ms_band_window_transform):
assert isinstance(pyramid, (Pyramid,))
self.pyramid = pyramid
self.mult_size = mult_size
# floor
num_mini_batches_max = int(max_batch_size/mult_size)
num_mini_batches_all = int(num_windows/mult_size)
# If num_windows isn't a multiple of mult_size
if num_windows % mult_size > 0:
num_mini_batches_all += 1
self.batch_size = min(num_mini_batches_max, num_mini_batches_all)*mult_size
self.num_channels = num_channels
self.window_shape = window_shape
self.window_batch = np.zeros((self.batch_size, self.num_channels) + self.window_shape, dtype=np.float32)
self.x_vals = np.zeros((self.batch_size), dtype=np.int)
self.y_vals = np.zeros((self.batch_size), dtype=np.int)
self.window_sizes = np.zeros((self.batch_size), dtype=np.int)
# for iterator
self.current_batch_num = 0
# for transformer
self.transform = transform
def window_iteration(self, image):
window_counter = 0
for win_size, win_step in self.pyramid:
for (x, y, window) in self.sliding_window(image, window_size=(win_size, win_size), step_size=win_step):
# Apply the transform to resize and swap axis of the windowed image
self.transform(window, self.window_shape, self.window_batch[window_counter,:,:,:])
self.x_vals[window_counter] = x
self.y_vals[window_counter] = y
self.window_sizes[window_counter] = win_size
window_counter += 1
if window_counter == self.get_batch_size():
window_counter = 0
yield self.window_batch, self.x_vals, self.y_vals, self.window_sizes, self.get_batch_size()
if window_counter > 0:
# batching finished return current state of window_batch
yield self.window_batch, self.x_vals, self.y_vals, self.window_sizes, window_counter
def get_batch_size(self):
return self.batch_size
def sliding_window(self, image, window_size, step_size):
for y in xrange(0, image.shape[1] - window_size[0] + 1, step_size):
for x in xrange(0, image.shape[2] - window_size[1] + 1, step_size):
yield (x, y, image[:, y:y + window_size[1], x:x + window_size[0]])
def iter_batches(self, image):
return self.window_iteration(image)
class CaffeBatchClassifier(object):
def __init__(self, caffe_models, deploy_files, label_files, mean_files, gpu_flag='False'):
self.caffe_net = None
self.caffe_net_models = []
self.caffe_models = caffe_models
self.deploy_files = deploy_files
self.label_files = label_files
self.mean_files = mean_files
# Perform checks on these arguments
self.check_valid()
self.num_models = len(self.caffe_models)
if gpu_flag.lower() == 'true':
self.caffe_batch_size = CAFFE_GPU_BATCH_SIZE
else:
self.caffe_batch_size = CAFFE_CPU_BATCH_SIZE
# Set gpu flags
if gpu_flag.lower() == 'true':
self.gpu_flag = True
caffe.set_mode_gpu()
else:
self.gpu_flag = False
caffe.set_mode_cpu()
self.loaded_model = -1
self.load_all_models()
self.setup_transformer()
#keep top 5 classes if training model contains more than 5 classes. Otherwise keep all classes.
self.top_n = 5 if len(self.labels) >5 else len(self.labels)
def check_valid(self):
pass
def get_num_models(self):
return self.num_models
def get_caffe_model(self, model_num):
return self.caffe_models[model_num]
def get_deploy_file(self, model_num):
return self.deploy_files[model_num]
def get_label_file(self, model_num):
return self.label_files[model_num]
def get_mean_file(self, model_num):
return self.mean_files[model_num]
def get_loaded_model_num(self):
return self.loaded_model
def set_loaded_model_num(self, model_num):
self.loaded_model = model_num
def get_transformer(self,):
return self.transformer
def setup_transformer(self,):
# Function pointer
with open(self.get_mean_file(0)) as infile:
blob = caffe_pb2.BlobProto()
data = infile.read()
blob.ParseFromString(data)
arr = np.array(caffe.io.blobproto_to_array(blob))
mean = arr[0].mean(1).mean(1)
if self.get_caffe_num_channels() == 1:
self.transformer = partial(caffe_window_transform_bw, mean=mean)
else:
# Use specified bands
self.transformer = partial(caffe_ms_band_window_transform, mean=mean)
def load_all_models(self):
for model_num in range(self.get_num_models()):
caffe_net = caffe.Net(self.get_deploy_file(model_num),
self.get_caffe_model(model_num),
caffe.TEST)
if model_num == 0:
# Size of output
self.caffe_input_shape = caffe_net.blobs['data'].data.shape
self.set_caffe_num_channels(self.caffe_input_shape[1])
self.set_caffe_window_size((self.caffe_input_shape[2],
self.caffe_input_shape[3]))
caffe_output_shape = caffe_net.blobs['prob'].data.shape
self.set_caffe_output_size(caffe_output_shape[-1])
self.labels = read_labels(self.get_label_file(model_num))
self.caffe_start_ind = 0
self.caffe_end_ind = len(caffe_net.layers) - 1
caffe_net.blobs['data'].reshape(self.get_caffe_batch_size(),
self.get_caffe_num_channels(),
self.get_caffe_window_size()[0],
self.get_caffe_window_size()[1])
caffe_net.reshape()
self.caffe_net_models.append(caffe_net)
def get_caffe_batch_size(self):
return self.caffe_batch_size
def get_caffe_num_channels(self):
return self.num_channels
def get_caffe_window_size(self):
return self.window_size
def set_caffe_batch_size(self, batch_size):
self.caffe_batch_size = batch_size
def set_caffe_num_channels(self, num_channels):
self.num_channels = num_channels
def set_caffe_window_size(self, size_tuple):
self.window_size = size_tuple
def set_caffe_output_size(self, output_size):
self.caffe_output_size = output_size
def get_caffe_output_size(self):
return self.caffe_output_size
def get_scores_model(self, window_batch, model_num):
if model_num == 0:
self.caffe_net_models[model_num].blobs['data'].data[...] = window_batch
else:
self.caffe_net_models[model_num].blobs['data'].data[...] = self.caffe_net_models[0].blobs['data'].data[...]
out = self.caffe_net_models[model_num].forward()
return out['prob']
def classify_batch_all_models(self, window_batch, num_windows):
# Classify the image
batch_scores | |
<filename>uos_statphys/isingModel.py
__all__ = ['IsingModel','IsingMultiAnalyzer']
from .ImportManager import Import_Manager
mm = Import_Manager()
mm.requireAs('ctypes', 'os','gc', np = 'numpy', plt = 'matplotlib.pyplot', globs = globals())
import ctypes, os, gc
import numpy as np
import matplotlib.pyplot as plt
from .C_ext import internal_Library
class IsingModelSingle:
'''class for 2D square lattice ising model simulator (only one size).
for now, there are two methods, metropolis and wolff algorithm.
Parameters
------------
L : `int`
A size of system. total number of spin is N (L*L).
algorithm : `str`
specifying Monte Carlo algorithm. 'wolff' and 'metropolis' can be captured. Default is 'metropolis'
'''
_cdll = internal_Library('isingmonte')
metropolis = _cdll.monteCarlo
metropolis.argtypes = [np.ctypeslib.ndpointer(dtype=np.int32),
np.ctypeslib.ndpointer(dtype=np.int32),
ctypes.c_double,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int]
wolff = _cdll.wolff
wolff.argtypes = [np.ctypeslib.ndpointer(dtype=np.int32),
np.ctypeslib.ndpointer(dtype=np.int32),
ctypes.c_double,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int]
_modules = {'np':'numpy'}
def __init__(self, L, algorithm = 'wolff'):
self.L = L
self.T_range = []
self.relax = []
self.simulate_time = []
self.algorithm = 'wolff'
if not algorithm in ['wolff', 'metropolis']:
raise ValueError("Wrong algorithm, only 'wolff' and 'metropolis' are allowed. ")
def __repr__(self):
return f"<Single ising model simulator>\nL\t\t : {self.L}\n"+f"T\t\t : {self.T_range}\n"+f"MC_steps\t : {self.simulate_time}"
def __gt__(self, other):
return self.L > other.L
def __ge__(self, other):
return self.L >= other.L
def add_set(self, temp, relaxation, MC_step):
''' add simulation setting for single size simulator.
Parameters
------------
temp : `list`
Temperatures which will be simulated.
relaxation : `int`
Relaxation iterations. Iteration number of pre-steps.
MC_step : `int`
MC iterations. Iteration number for simulation.
'''
if isinstance(temp, int) or isinstance(temp, float):
self.T_range.append(temp)
self.relax.append(relaxation)
self.simulate_time.append(MC_step)
else:
temp = list(temp)
if isinstance(relaxation, list):
if len(temp)!=len(relaxation):
raise ValueError("'relaxation' must be `int` or have same length with `temp`.")
else:
relaxation = [int(relaxation) for i in temp]
if isinstance(MC_step, list):
if len(temp)!=len(MC_step):
raise ValueError("'MC_step' must be `int` or have same length with `temp`.")
else:
MC_step = [int(MC_step) for i in temp]
self.T_range+=temp
self.relax+=relaxation
self.simulate_time+=MC_step
def _sort(self):
temp = np.array([self.T_range, self.relax, self.simulate_time])
temp.T.sort(axis = 0)
self.T_range = temp[0]
self.relax, self.simulate_time = temp.astype(np.int32)[1:]
self.T_range = list(self.T_range)
self.relax = list(self.relax)
self.simulate_time = list(self.simulate_time)
def simulate(self, ensemble, thr_num = 1):
global mm
if mm.check('tqdm'):
tqdm = mm.require_func(tqdm = 'tqdm', globs = globals())
else:
tqdm = lambda x:x
if not self.T_range:
raise ValueError("no target temperature. please `add_set` to add target temperature.")
self.E, self.M = [],[]
self.ensemble = ensemble
if (not isinstance(ensemble, int)) or (ensemble <= 0 ):
raise ValueError("the value of 'ensemble' is invalid.")
self._sort()
for T, rel, nsteps in tqdm(list(zip(self.T_range,self.relax, self.simulate_time))):
energy = np.zeros([self.ensemble*nsteps],dtype = np.int32)
mag = np.zeros([self.ensemble*nsteps],dtype = np.int32)
vars(self.__class__)[self.algorithm](energy, mag, T, self.ensemble, self.L, rel, nsteps, thr_num)
self.E.append(energy.reshape([self.ensemble, -1]))
self.M.append(mag.reshape([self.ensemble, -1]))
self.E = np.array(self.E)
self.M = np.array(self.M)
def save(self, path):
if os.is_dir(path):
pass
def get_analyzer(self):
temp = IsingSingleAnalyzer(self.L, np.array(self.T_range), self.E, self.M)
temp.total_ensemble = self.ensemble
temp.sim_time = self.simulate_time
return temp
class IsingModel:
'''class for 2D square lattice ising model simulator.
for now, there are two methods, metropolis and wolff algorithm.
Parameters
------------
algorithm : `str`
specifying Monte Carlo algorithm. 'wolff' and 'metropolis' can be captured. Default is 'metropolis'
'''
_modules = {'np':'numpy'}
def __init__(self, algorithm = 'wolff'):
self.entry = []
self.algorithm = algorithm
@property
def algorithm(self):
return self.__algorithm
@algorithm.getter
def algorithm(self):
return self.__algorithm
@algorithm.setter
def algorithm(self, value):
if not value in ['wolff', 'metropolis']:
raise ValueError("Wrong algorithm, only 'wolff' and 'metropolis' are allowed. ")
for ISim in self.entry:
ISim.algorithm = value
self.__algorithm = value
def __repr__(self):
return "<Ising Model Simulator>"
def show_setting(self):
for i in self.entry:
print(i)
def add_set(self, L, temp, relaxation, MC_step):
''' add simulation setting for single size simulator.
Parameters
------------
L : `int`
size of system.
temp : `list`
Temperatures which will be simulated.
relaxation : `int`
Relaxation iterations. Iteration number of pre-steps.
MC_step : `int`
MC iterations. Iteration number for simulation.
'''
if isinstance(L, int):
L = [L]
for l in L:
ch = False
for ISim in self.entry:
if ISim.L == l:
ch = True
ISim.add_set(temp, relaxation, MC_step)
if not ch:
ims = IsingModelSingle(l, self.algorithm)
ims.add_set(temp, relaxation, MC_step)
self.entry.append(ims)
def _sort(self):
self.entry.sort()
for ISim in self.entry:
ISim._sort()
def simulate(self, ensemble, thr_num = 1):
self.ensemble = ensemble
if (not isinstance(ensemble, int)) or (ensemble <= 0 ):
raise ValueError("the value of 'ensemble' is invalid.")
for ISim in self.entry:
ISim.simulate(ensemble, thr_num)
def __getitem__(self, value):
for i in self.entry:
if i.L == value:
return i
raise KeyError(f'{value}')
def get_analyzer(self):
temp = IsingMultiAnalyzer.new()
for Isim in self.entry:
temp.append(Isim.get_analyzer())
return temp
class IsingSingleAnalyzer:
_modules = {'np':'numpy'}
def __init__(self, L, T, E, M):
self.L = L
self.T = T
self.E = E
self.M = np.abs(M)
self.total_ensemble = None
self.sim_time = None
if isinstance(E, np.ndarray):
assert E.shape == M.shape
assert E.shape[0]==T.shape[0]
assert M.shape[0]==T.shape[0]
self.total_ensemble = E.shape[1]
self.sim_time = E.shape[2]
self.__analyzed = False
def analyze(self, reduced = False):
if self.__analyzed: return
self.average = Container()
self.var = Container()
self.second = Container()
self.forth = Container()
#self.meaned = Container()
for key in vars(self).copy():
if isinstance(vars(self)[key], np.ndarray) and len(vars(self)[key].shape)==3:
vars(self.average)[key] = np.average(vars(self)[key], axis =2)
vars(self.var)[key] = np.var(vars(self)[key], axis =2)
vars(self.second)[key] = np.average(vars(self)[key].astype(np.float64)**2, axis =2)
vars(self.forth)[key] = np.average(vars(self)[key].astype(np.float64)**4, axis =2)
if isinstance(vars(self)[key], list) and len(vars(self)[key][0].shape)==2:
temp = [[],[],[],[]]
for i in range(len(vars(self)[key])):
temp[0].append(np.average(vars(self)[key][i], axis =1))
temp[1].append(np.var(vars(self)[key][i], axis =1))
temp[2].append(np.average(vars(self)[key][i].astype(np.float64)**2, axis =1))
temp[3].append(np.average(vars(self)[key][i].astype(np.float64)**4, axis =1))
vars(self.average)[key], vars(self.var)[key], vars(self.second)[key], vars(self.forth)[key] = np.array(temp)
if reduced:
del self.E, self.M
self.__analyzed = True
class Container(object):
pass
class Observable:
def __init__(self, obj):
self.__raw = obj
self.__aver, self.__var = None, None
@property
def average(self):
return self.__aver
@average.getter
def average(self):
if self.__aver is None:
self.__aver = np.average(obj, axis = 2)
class IsingSingleAnalyzer:
def __init__(self, L, T, E, M):
self.L = L
self.T = T
self.E = E
self.M = np.abs(M)
assert E.shape == M.shape
assert E.shape[0]==T.shape[0]
assert M.shape[0]==T.shape[0]
self.total_ensemble = E.shape[1]
self.sim_time = E.shape[2]
self.__analyzed = False
def analyze(self, reduced = False):
if self.__analyzed: return
self.average = Container()
self.var = Container()
self.second = Container()
self.forth = Container()
for key in vars(self).copy():
if isinstance(vars(self)[key], np.ndarray) and len(vars(self)[key].shape)==3:
vars(self.average)[key] = np.average(vars(self)[key], axis =2)
vars(self.var)[key] = np.var(vars(self)[key], axis =2)
vars(self.second)[key] = np.average(vars(self)[key].astype(np.float64)**2, axis =2)
vars(self.forth)[key] = np.average(vars(self)[key].astype(np.float64)**4, axis =2)
if reduced:
del self.E, self.M
self.__analyzed = True
def reduced_T(self, t_c):
return (self.T - t_c)/t_c
def observable(func):
def plots(self, return_errors = False, return_argmax = False):
if not self.__analyzed:
self.analyze()
raw = func(self) #calculation
ret = [np.mean(raw, axis = 1)]
if return_errors:
ret.append(np.std(raw, axis = 1))
if return_argmax:
ret.append(np.argmax(raw, axis = 0))
return ret
return plots
@observable
def susceptibility(self):
return self.var.M/self.L/self.L/self.T
@observable
def heat_capacity(self):
return self.var.E/self.L/self.L/self.T/self.T
@observable
def binder_cumulant(self):
forth = self.forth.M
second = self.second.M
return 1 - forth/3/second**2
class IsingMultiAnalyzer:
def __init__(self,L,T,E,M, title = ""):
self.entry = []
self.L = L
for l,t,e,m in zip(L,T,E,M):
vars(self)[f"_{l}"] = IsingSingleAnalyzer(l,t,e,m)
self.entry.append(vars(self)[f"_{l}"])
self.title = title
self.__analyzed = False
def new(isa = None, title =""):
temp = IsingMultiAnalyzer([],[],[],[],title)
if isa is not None:
temp.append(isa)
return temp
def append(self, value):
if isinstance(value, IsingSingleAnalyzer):
self.L.append(value.L)
self.entry.append(value)
vars(self)[f"_{l}"] = value
else:
raise ValueError
def analyze(self):
for isa in self.entry:
isa.analyze()
self.__analyzed = True
@property
def average(self):
if not self.__analyzed:
self.analyze()
for l, isa in zip(self.L,self.entry):
yield l, isa.T, isa.average.E, isa.average.M
@property
def variance(self):
if not self.__analyzed:
self.analyze()
for l, isa in zip(self.L,self.entry):
yield l, isa.T, isa.var.E, isa.var.M
@property
def second(self):
if not self.__analyzed:
self.analyze()
for l, isa in zip(self.L,self.entry):
yield l, isa.T, isa.second.E, isa.second.M
@property
def forth(self):
if not self.__analyzed:
self.analyze()
for l, isa in zip(self.L,self.entry):
yield l, isa.T, isa.forth.E, isa.forth.M
def line_fitting(self, x, y, y_err, line_range= None, logscale = False , label = ""):
popt , pcov = curve_fit(lambda xhat,a,b:a*xhat+b, x, y, sigma =y_err )
perr = np.sqrt(np.diag(pcov))
if line_range is not None:
pred_x = np.array(line_range)
if logscale:
pred_x = np.power(10,pred_x)
predict = 10**popt[1]*np.power(pred_x,popt[0])
else:
predict = popt[0]*np.array(line_range)+popt[1]
if | |
#!/usr/bin/env python
"""
Greynir: Natural language processing for Icelandic
Evaluation of spelling and grammar correction
Copyright (C) 2021 <NAME>.
This software is licensed under the MIT License:
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This program uses an Icelandic spelling & grammar error corpus
(https://github.com/antonkarl/iceErrorCorpus) to evaluate the
performance of the GreynirCorrect package.
The program reads a development set of hand-annotated texts in
TEI XML format and automatically annotates errors using GreynirCorrect.
The machine-generated annotations are then compared with the hand-annotated
gold reference.
This program uses Python's multiprocessing.Pool() to perform
the evaluation using all available CPU cores, simultaneously.
A normal way to configure this program is to clone the iceErrorCorpus
repository (from the above path) into a separate directory, and
then place a symlink to it to the /eval directory. For example:
$ cd github
$ git clone https://github.com/antonkarl/iceErrorCorpus
$ cd GreynirCorrect/eval
$ ln -s ../../iceErrorCorpus/ .
$ python eval.py
An alternate method is to specify a glob path to the error corpus as an
argument to eval.py:
$ python eval.py ~/github/iceErrorCorpus/data/**/*.xml
To measure GreynirCorrect's performance on the test set
(by default located in ./iceErrorCorpus/testCorpus/):
$ python eval.py -m
To measure GreynirCorrect's performance on the test set
excluding malformed sentences:
$ python eval.py -m -x
To run GreynirCorrect on the entire development corpus
(by default located in ./iceErrorCorpus/data):
$ python eval.py
To run GreynirCorrect on 10 files in the development corpus:
$ python eval.py -n 10
To run GreynirCorrect on a randomly chosen subset of 10 files
in the development corpus:
$ python eval.py -n 10 -r
To get an analysis report of token comparisons:
$ python eval.py -a
"""
from typing import (
TYPE_CHECKING,
Dict,
List,
Optional,
Set,
Union,
Tuple,
Iterable,
cast,
Any,
DefaultDict,
Counter,
)
import os
from collections import defaultdict
from datetime import datetime
import glob
import random
import argparse
import xml.etree.ElementTree as ET
if TYPE_CHECKING:
# For some reason, types seem to be missing from the multiprocessing module
# but not from multiprocessing.dummy
import multiprocessing.dummy as multiprocessing
else:
import multiprocessing
from reynir import _Sentence
from tokenizer import detokenize, Tok, TOK
from reynir_correct.annotation import Annotation
from reynir_correct.checker import AnnotatedSentence, check as gc_check
# Disable Pylint warnings arising from Pylint not understanding the typing module
# pylint: disable=no-member
# pylint: disable=unsubscriptable-object
# The type of a single error descriptor, extracted from a TEI XML file
ErrorDict = Dict[str, Union[str, int, bool]]
# The type of the dict that holds statistical information about sentences
# within a particular content category
SentenceStatsDict = DefaultDict[str, Union[float, int]]
# The type of the dict that holds statistical information about
# content categories
CategoryStatsDict = DefaultDict[str, SentenceStatsDict]
# This tuple should agree with the parameters of the add_sentence() function
StatsTuple = Tuple[
str, int, bool, bool, int, int, int, int, int, int, int, int, int, int, int, int
]
# Counter of tp, tn, right_corr, wrong_corr, right_span, wrong_span
TypeFreqs = Counter[str]
# Stats for each error type for each content category
# tp, fn, right_corr, wrong_corr, right_span, wrong_span
ErrTypeStatsDict = DefaultDict[str, TypeFreqs]
CatResultDict = Dict[str, Union[int, float, str]]
# Create a lock to ensure that only one process outputs at a time
OUTPUT_LOCK = multiprocessing.Lock()
# Content categories in iceErrorCorpus, embedded within the file paths
GENRES = (
"essays",
"onlineNews",
"wikipedia",
)
# Error codes in iceErrorCorpus that are considered out of scope
# for GreynirCorrect, at this stage at least
OUT_OF_SCOPE = {
"act4mid",
"act4pass",
"adj4noun",
"adjective-inflection",
"agreement-pro", # samræmi fornafns við undanfara grammar ...vöðvahólf sem sé um dælinguna. Hann dælir blóðinu > Það dælir blóðinu
"aux", # meðferð vera og verða, hjálparsagna wording mun verða eftirminnilegt > mun vera eftirminnilegt
"bad-contraction",
"bracket4square", # svigi fyrir hornklofa punctuation (Portúgal) > [Portúgal]
"caps4low",
"case-verb",
"case-prep",
"case-adj",
"case-collocation",
# "collocation-idiom", # fast orðasamband með ógagnsæja merkingu collocation hélt hvorki vindi né vatni > hélt hvorki vatni né vindi
# "collocation", # fast orðasamband collocation fram á þennan dag > fram til þessa dags
"comma4conjunction", # komma fyrir samtengingu punctuation ...fara með vald Guðs, öll löggjöf byggir... > ...fara með vald Guðs og öll löggjöf byggir...
"comma4dash", # komma fyrir bandstrik punctuation , > -
"comma4ex", # komma fyrir upphrópun punctuation Viti menn, almúginn... > Viti menn! Almúginn...
"comma4period", # komma fyrir punkt punctuation ...kynnast nýju fólki, er á þrítugsaldri > ...kynnast nýju fólki. Hann er á þrítugsaldri
"comma4qm", # komma fyrir spurningarmerki punctuation Höfum við réttinn, eins og að... > Höfum við réttinn? Eins og að...
"conjunction",
"conjunction4comma", # samtenging fyrir kommu punctuation ...geta orðið þröngvandi og erfitt getur verið... > ...geta orðið þröngvandi, erfitt getur verið...
"conjunction4period", # samtenging fyrir punkt punctuation ...tónlist ár hvert og tónlistarstefnurnar eru orðnar... > ...tónlist ár hvert. Tónlistarstefnurnar eru orðnar...
"context", # rangt orð í samhengi other
"dash4semicolon", # bandstrik fyrir semíkommu punctuation núna - þetta > núna; þetta
"def4ind", # ákveðið fyrir óákveðið grammar skákinni > skák
"dem-pro", # hinn í stað fyrir sá; sá ekki til eða ofnotað grammar hinn > sá
"dem4noun", # ábendingarfornafn í stað nafnorðs grammar hinn > maðurinn
"dem4pers", # ábendingarfornafn í stað persónufornafns grammar þessi > hún
"extra-comma", # auka komma punctuation stríð, við náttúruna > stríð við náttúruna
"extra-dem-pro",
"extra-number", # tölustöfum ofaukið other 139,0 > 139
"extra-period", # auka punktur punctuation á morgun. Og ... > á morgun og...
"extra-punctuation", # auka greinarmerki punctuation ... að > að
"extra-space", # bili ofaukið spacing 4 . > 4.
"extra-sub",
"extra-symbol", # tákn ofaukið other Dalvík + gaf... > Dalvík gaf...
"extra-word", # orði ofaukið insertion augun á mótherja > augu mótherja
"extra-words", # orðum ofaukið insertion ...ég fer að hugsa... > ...ég hugsa...
"foreign-error", # villa í útlendu orði foreign Supurbowl > Super Bowl
"foreign-name", # villa í erlendu nafni foreign Warwixk > Warwick
"fw4ice", # erlent orð þýtt yfir á íslensku style Elba > Saxelfur
"gendered", # kynjað mál, menn fyrir fólk exclusion menn hugsa oft > fólk hugsar oft
"genitive",
"geta",
"have",
"ice4fw", # íslenskt orð notað í stað erlends Demókrata öldungarþings herferðarnefndina > Democratic Senatorial Campaign Committee
"ind4def", # óákveðið fyrir ákveðið grammar gítartakta > gítartaktana
"ind4sub", # framsöguháttur fyrir vh. grammar Þrátt fyrir að konfúsíanismi er upprunninn > Þrátt fyrir að konfúsíanismi sé upprunninn
"indef-pro", # óákveðið fornafn grammar enginn > ekki neinn
"interr-pro",
"it4nonit", # skáletrað fyrir óskáletrað Studdi Isma'il > Studdi Isma'il
"loan-syntax", # lánuð setningagerð style ég vaknaði upp > ég vaknaði
"low4caps",
"marked4unmarked",
"mid4act",
"mid4pass",
"missing-commas", # kommur vantar utan um innskot punctuation Hún er jafn verðmæt ef ekki verðmætari en háskólapróf > Hún er verðmæt, ef ekki verðmætari, en háskólapróf
"missing-conjunction", # samtengingu vantar punctuation í Noregi suður að Gíbraltarsundi > í Noregi og suður að Gíbraltarsundi
"missing-dem-pro",
"missing-ex", # vantar upphrópunarmerki punctuation Viti menn ég komst af > Viti menn! Ég komst af
"missing-fin-verb",
"missing-obj",
"missing-quot", # gæsalöpp vantar punctuation „I'm winning > „I'm winning“
"missing-quots", # gæsalappir vantar punctuation I'm winning > „I'm winning“
"missing-semicolon", # vantar semíkommu punctuation Haukar Björgvin Páll > Haukar; Björgvin Páll
"missing-square", # vantar hornklofi punctuation þeir > [þeir]
"missing-sub",
"missing-symbol", # tákn vantar punctuation 0 > 0%
"missing-word", # orð vantar omission | |
return hash(
(
id(self),
)
)
Callback = Callable[[Message, Channel], Union[None, Awaitable[None]]]
class Subscriber(_ExchangeChildModel):
"""A Subscriber consumes relevant Messages published to an Exchange.
Subscribers can invoke a callback when a new Message is published and can be used
as an asynchronous iterator to process Messages. The `cancel` method
will halt message processing and stop any attached async iterators.
Subscribers are asynchronously callable for notification of the publication of new Messages.
Attributes:
exchange: The pub/sub exchange that the Subscriber belongs to.
subscription: A descriptor of the types of Messages that the Subscriber is interested in.
callback: An optional callable to be invoked whben the Subscriber is notified of new Messages.
Usage:
```
# Processing via a callback
async def _my_callback(message: Message, channel: Channel) -> None:
print(f"Notified of a new Message: {message}, {channel}")
subscriber = Subscriber(exchange=exchange, subscription=subscription, callback=callback)
# Processing via async iteration
subscriber = Subscriber(exchange=exchange, subscription=subscription)
async for message, channel in subscriber:
print(f"Notified of a new Message: {message}, {channel}")
# Break out of processing
subscriber.cancel()
```
"""
subscription: Subscription
callback: Optional[Callback]
_event: asyncio.Event = pydantic.PrivateAttr(default_factory=asyncio.Event)
_iterators: List[_Iterator] = pydantic.PrivateAttr([])
def stop(self) -> None:
"""Stop the current async iterator.
The iterator to be stopped is determined by the current iteration scope.
Calling stop on a parent iterator scope will trigger a `RuntimeError`.
Raises:
RuntimeError: Raised if there is not an active iterator or the receiver
is not being iterated in the local scope.
"""
iterator = _current_iterator()
if iterator is not None:
if iterator.subscriber != self:
raise RuntimeError(f"Attempted to stop an inactive iterator")
iterator.stop()
else:
raise RuntimeError("Attempted to stop outside of an iterator")
def cancel(self) -> None:
"""Cancel the subscriber from receiving any further Messages.
Any objects waiting on the Subscriber and any async iterators are released.
Raises:
RuntimeError: Raised if the Subscriber has alreayd been cancelled.
"""
if self.cancelled:
raise RuntimeError(f"Subscriber is already cancelled")
self._event.set()
# Stop any attached iterators
for iterator in self._iterators:
iterator.stop()
self._iterators.clear()
@property
def cancelled(self) -> bool:
"""Return True if the subscriber has been cancelled."""
return self._event.is_set()
async def wait(self) -> None:
"""Wait for the subscriber to be cancelled.
The caller will block until the Subscriber is cancelled.
"""
await self._event.wait()
async def __call__(self, message: Message, channel: Channel) -> None:
if self.cancelled:
servo.logger.warning(f"ignoring call to cancelled Subscriber: {self}")
return
if self.subscription.matches(channel, message):
if self.callback:
# NOTE: Yield message or message, channel based on callable arity
signature = inspect.Signature.from_callable(self.callback)
if len(signature.parameters) == 1:
if asyncio.iscoroutinefunction(self.callback):
await self.callback(message)
else:
self.callback(message)
elif len(signature.parameters) == 2:
if asyncio.iscoroutinefunction(self.callback):
await self.callback(message, channel)
else:
self.callback(message, channel)
else:
raise TypeError(f"Incorrect callback")
for _, iterator in enumerate(self._iterators):
if iterator.stopped:
self._iterators.remove(iterator)
else:
await iterator(message, channel)
def __aiter__(self): # noqa: D105
iterator = _Iterator(self)
self._iterators.append(iterator)
return iterator
def __eq__(self, other) -> bool:
# compare exchanges by object identity rather than fields
if isinstance(other, Subscriber):
return id(self) == id(other)
return False
class Config:
arbitrary_types_allowed = True
class Publisher(_ExchangeChildModel):
"""A Publisher broadcasts Messages to Channels in an Exchange.
Publishers are asynchronously callable to publish a Message.
Attributes:
exchange: The pub/sub Exchange that the Publisher belongs to.
channels: The Channels that the Publisher publishes Messages to.
"""
channels: pydantic.conlist(Channel, min_items=1)
async def __call__(self, message: Message, *channels: List[Union[Channel, str]]) -> None:
for channel in (channels or self.channels):
if channel_ := self._find_channel(channel):
await self.exchange.publish(message, channel_)
else:
raise ValueError(f"Publisher is not bound to Channel: '{channel}'")
def _find_channel(self, channel: Union[Channel, str]) -> Optional[Channel]:
return next(filter(lambda c: c == channel, self.channels), None)
TransformerCallback = Callable[[Message, Channel], Union[Optional[Message], Awaitable[Optional[Message]]]]
class Transformer(abc.ABC, pydantic.BaseModel):
"""A Transformer intercepts Messages published to an Exchange and transforms
them before delivery.
Transformers are callable objects that accept a Message and a Channel as positional
arguments and return an optional Message. Returning None cancels propagation of the
Message to the downstream Transformers and Subscribers.
"""
def cancel(self) -> None:
"""Cancel the transformer, cleaning up any state.
The default implementation does nothing as in most cases transformers are
stateless.
"""
...
@abc.abstractmethod
async def __call__(self, message: Message, channel: Channel) -> Optional[Message]:
"""Transforms a published Message before delivery to Subscribers.
"""
class Filter(Transformer):
"""A Filter intercepts Messages before delivery to Subscribers and cancels or
modifies the Message.
Filters utilize a callback that takes a Message and Channel input arguments
and return an optional Message. When None is returned, the Message is
cancelled and is not delivered to Subscribers. When a Message object is
returned, it is passed as the input into subsequent transformers and the
final transformed Message is delivered to Subscribers.
Attributes:
callback: A callback that performs the filtering. Must accept Message and
Channel positional arguments and return an optional Message.
Usage:
```
# Cancel any Message with a text/xml MIME Type
async def _filter_xml_messages(message: Message, channel: Channel) -> Optional[Message]:
if message.content_type == 'text/xml':
return None
else:
return Message
xml_filter = Fitler(_filter_xml_messages)
exchange.add_transformer(xml_filter)
# Uppercase the text of all Message text
async def _uppercase_message_text(message: Message, channel: Channel) -> Optional[Message]:
return Message(text=message.text.upper(), content_type=message.content_type)
upper_filter = Fitler(_uppercase_message_text)
exchange.add_transformer(upper_filter)
```
"""
callback: TransformerCallback
def __init__(self, callback: TransformerCallback, *args, **kwargs) -> None:
super().__init__(callback=callback, *args, **kwargs)
async def __call__(self, message: Message, channel: Channel) -> Optional[Message]:
"""Called to transform Message
"""
if asyncio.iscoroutinefunction(self.callback):
return await self.callback(message, channel)
else:
return self.callback(message, channel)
class Config:
arbitrary_types_allowed = True
SplitterCallback = Callable[['Splitter', Message, Channel], Awaitable[None]]
class Splitter(Transformer):
"""A Splitter intercepts Messages before delivery to Subscribers and splits
the Message content across a number of other channels.
Splitters are useful for decomposing aggregated Messages into more specific
Messages. For example, given a message reporting a number of metrics
retrieved from a system such as Prometheus, it may be useful to extract a
subset of the metrics and report them on a more specific channel.
Spliters utilize a callback that takes a Splitter, Message, and Channel
input arguments and return None.
Attributes: callback: A callback that performs the filtering. Must accept
the Splitter instance, Message and Channel positional arguments and
return None.
Usage:
```
# Split a large message into smaller messages
async def _split_message(splitter: Splitter, message: Message, channel: Channel) -> None:
partition_len = 128
if message.content_type == 'text/plain' and len(message.text) > partition_len:
for index in range(0, len(message.text), partition_len):
substring = message.text[index : index + partition_len]
await splitter.channels[0].publish(text=substring)
splitter = Splitter(_split_message, target_channel)
exchange.add_transformer(splitter)
```
"""
callback: SplitterCallback
channels: pydantic.conlist(Channel, min_items=1)
def __init__(self, callback: SplitterCallback, *channels: List[Channel], **kwargs) -> None:
super().__init__(callback=callback, channels=channels, **kwargs)
async def __call__(self, message: Message, channel: Channel) -> Optional[Message]:
"""Called to transform Message
"""
await self.callback(self, message, channel)
return message
def get_channel(self, name: str) -> Channel:
"""Return a Channel by name."""
return next(filter(lambda m: m.name == name, self._channels))
AggregatorCallback = Callable[['Aggregator', Message, Channel], Awaitable[None]]
class Aggregator(Transformer):
"""An Aggregator accumulates Messages sent to a set of Channels and publishes
a new aggregate Message combining the data.
Aggregators can be used to pull data from multiple sources together into a new
canonical format, abstracting away the underlying source details and normalizing
the data format.
Aggregator publication can be triggered programmatically, automatically once all
source channels have published one or more messages, or on a fixed time interval.
Fixed time window publication trades off consistency for availability and requires
care to be taken when designing the aggregate data format as it may be incomplete
in arbitrary ways.
Attributes:
from_channels: The list of Channels to aggregate.
to_channel: The Channel to publish the aggregated Message to.
callback: A callback that performs the aggregation. Must accept
the Aggregator instance, Message, and Channel positional arguments and
return None.
every: An optional time interval specifying how often to publish regardless
of whether or not all source Channels have sent a Message.
message: The current aggregate Message state. Modified by the callback as
new Messages are processed.
Usage:
```
# Aggregate text messages by concatenating the text, publishing every 30s
async def _aggregate_text(aggregator: Aggregator, message: Message, channel: Channel) -> None:
if aggregator.message is None:
aggregator.message = message.copy()
else:
text = "\n".join([aggregator.message.text, message.text])
aggregator.message = servo.pubsub.Message(text=text)
aggregator = Aggregator(from_channels=[cbs, abc, fox, msnbc], to_channel=output_channel, | |
"""
Module: Sampler
The sampler module is provides methods exploring the potential functions.
Stochastic Integrators
"""
import numpy as np
import scipy.constants as const
from ensembler.samplers._basicSamplers import _samplerCls
from ensembler.util.ensemblerTypes import Union, List, Tuple, Number
from ensembler.util.ensemblerTypes import systemCls as systemType
class stochasticSampler(_samplerCls):
'''
This class is the parent class for all stochastic samplers. The pre-implemented
stochastic type samplers currently comprise various Monte-Carlo and Langevin Methods.
'''
# Params
minStepSize: Number = None
step_size_coefficient: Number = 1
spaceRange: Tuple[Number, Number] = None
resolution: float = 0.01 # increase the ammount of different possible values = between 0 and 10 there are 10/0.01 different positions. only used with space_range
fixedStepSize: (Number or List[Number])
# calculation
posShift: float = 0
# Limits:
_critInSpaceRange = lambda self, pos: self.spaceRange is None or (
self.spaceRange != None and pos >= min(self.spaceRange) and pos <= max(self.spaceRange))
def random_shift(self, nDimensions: int) -> Union[float, np.array]:
"""
randomShift
This function calculates the shift for the current position.
Parameters
----------
nDimensions : int
gives the dimensionality of the position, defining the ammount of shifts.
Returns
-------
Union[float, List[float]]
returns the Shifts
"""
# which sign will the shift have?
sign = np.array([-1 if (x < 50) else 1 for x in np.random.randint(low=0, high=100, size=nDimensions)])
# Check if there is a space restriction? - converges faster
if (not isinstance(self.fixedStepSize, type(None))):
shift = np.array(np.full(shape=nDimensions, fill_value=self.fixedStepSize), ndmin=1)
elif (not isinstance(self.spaceRange, type(None))):
shift = np.array(np.multiply(np.abs(np.random.randint(low=np.min(self.spaceRange) / self.resolution,
high=np.max(self.spaceRange) / self.resolution,
size=nDimensions)), self.resolution), ndmin=1)
else:
shift = self.step_size_coefficient * np.array(np.abs(np.random.rand(nDimensions)), ndmin=1)
# Is the step shift in the allowed area?
if (self.minStepSize != None and any([s < self.minStepSize for s in shift])):
self.posShift = np.multiply(sign, np.array([s if (s > self.minStepSize) else self.minStepSize for s in shift]))
else:
self.posShift = sign * shift
return np.squeeze(self.posShift)
class monteCarloIntegrator(stochasticSampler):
"""
monteCarloIntegrator
This class implements the classic Monte Carlo samplers.
It chooses its moves purely randomly. Therefore, the distributions generated by this integrator do not
resemble the (micro/grand) canonical ensemble. Additionally, no kinetic information can be obtained from
Monte Carlo samplers.
"""
name = "Monte Carlo Integrator"
def __init__(self, space_range: Tuple[Number, Number] = None,
step_size_coefficient: Number = 5, minimal_step_size: Number = None,
fixed_step_size: Number = None):
"""
__init__
This is the Constructor of the MonteCarlo samplers.
Parameters
----------
space_range : Tuple[Number, Number], optional
maximal and minimal allowed position for after an integration step.
If not fullfilled, step is rejected. By default None
step_size_coefficient: Number, optional
gives the range of the random numbers. Default is one and therefore values between 1 and -1 are chosen. (Default: 1)
minimal_step_size : Number, optional
minimal size of an integration step in any direction, by default None
fixed_step_size : Number, optional
this option restrains each integration step to a certain size in each dimension, by default None
"""
super().__init__()
self.fixedStepSize = None if (isinstance(fixed_step_size, type(None))) else np.array(fixed_step_size)
self.minStepSize = minimal_step_size
self.step_size_coefficient = step_size_coefficient
self.spaceRange = space_range
def step(self, system: systemType) -> Tuple[float, None, float]:
"""
step
This function is performing an integration step in MonteCarlo fashion.
Parameters
----------
system : systemType
A system, that should be integrated.
Returns
-------
Tuple[float, None, float]
This Tuple contains the new: (new Position, None, position Shift/ force)
"""
# integrate
# while no value in spaceRange was found, terminates in first run if no spaceRange
current_state = system.current_state
self.oldpos = current_state.position
while (True):
self.random_shift(system.nDimensions)
self.newPos = np.add(self.oldpos, self.posShift)
# only get positions in certain range or accept if no range
if (self._critInSpaceRange(self.newPos)):
break
if (self.verbose):
print(str(self.__name__) + ": current position\t ", self.oldpos)
print(str(self.__name__) + ": shift\t ", self.posShift)
print(str(self.__name__) + ": newPosition\t ", self.newPos)
print("\n")
return np.squeeze(self.newPos), np.nan, np.squeeze(self.posShift)
class metropolisMonteCarloIntegrator(stochasticSampler):
"""
metropolisMonteCarloIntegrator
This class is implementing a metropolis monte carlo Integrator.
In contrast to the Monte Carlo Integrator, that has completely random steps, this sampler has
limitations to the randomness. This limitation is expressed in the Metropolis Criterion and ensures
that the microcanonical ensemble is sampled.
There is a standard Metropolis Criterion implemented, but it can also be exchanged with a different one.
Default Metropolis Criterion:
$ decision = (E_{t} < E_{t-1}) || ( rand <= e^{(-1/(R/T*1000))*(E_t-E_{t-1})}$
with:
- $R$ as universal gas constant
The original Metropolis Criterion (N<NAME>is et al.; J. Chem. Phys.; 1953 ;doi: https://doi.org/10.1063/1.1699114):
$ p_A(E_{t}, E_{t-1}, T) = min(1, e^{-1/(k_b*T) * (E_{t} - E_{t-1})})
$ decision: True if( 0.5 < p_A(E_{t}, E_{t-1}, T)) else False
with:
- $k_b$ as Boltzmann Constant
"""
name = "Metropolis Monte Carlo Integrator"
# Parameters:
maxIterationTillAccept: float = np.inf # how often shall the samplers iterate till it accepts a step forcefully
convergence_limit: int = np.inf # after reaching a certain limit abort iteration
# METROPOLIS CRITERION
##random part of Metropolis Criterion:
_default_randomness = lambda self, ene_new, current_state: (
self._randomness_factor * np.random.rand() <= np.exp(
-1.0 / (const.gas_constant / 1000.0 * current_state.temperature) * (
ene_new - current_state.total_potential_energy)))
def __init__(self, space_range: tuple = None,
step_size_coefficient: Number = 5, minimal_step_size: float = None,
fixed_step_size=None,
randomness_increase_factor=1.25, max_iteration_tillAccept: int = 10000):
"""
__init__
This is the Constructor of the Metropolis-MonteCarlo samplers.
Parameters
----------
minimal_step_size : Number, optional
minimal size of an integration step in any direction, by default None
space_range : Tuple[Number, Number], optional
maximal and minimal allowed position for after an integration step.
If not fullfilled, step is rejected. By default None
fixed_step_size : Number, optional
this option restrains each integration step to a certain size in each dimension, by default None
randomness_increase_factor : int, optional
arbitrary factor, controlling the amount of randomness(the bigger the more random steps), by default 1
max_iteration_tillAccept : int, optional
number, after which a step is accepted, regardless its likelihood (turned off if np.inf). By default None
"""
super().__init__()
# Integration Step Constrains
self.fixedStepSize = None if (isinstance(fixed_step_size, type(None))) else np.array(fixed_step_size)
self.minStepSize = minimal_step_size
self.step_size_coefficient = step_size_coefficient
self.spaceRange = space_range
# Metropolis Criterions
self._randomness_factor = randomness_increase_factor
self.maxIterationTillAccept = max_iteration_tillAccept
##default Metropolis Criterion
def metropolis_criterion(self, ene_new, current_state):
"""
metropolisCriterion
The metropolis criterion decides if a step is accepted.
Parameters
----------
ene_new: float
new energy in case the step is accepted
current_state: stateType
state of the current step
Returns boolean
defines if step is accepted or not
-------
"""
return (ene_new < current_state.total_potential_energy or self._default_randomness(ene_new, current_state))
def step(self, system: systemType) -> Tuple[float, None, float]:
"""
step
This function is performing an Metropolis Monte Carlo integration step.
Parameters
----------
system : systemType
A system, that should be integrated.
Returns
-------
Tuple[float, None, float]
This Tuple contains the new: (new Position, None, position Shift/ force)
"""
current_iteration = 0
current_state = system.current_state
self.oldpos = current_state.position
nDimensions = system.nDimensions
# integrate position
while (current_iteration <= self.convergence_limit and current_iteration <= self.maxIterationTillAccept): # while no value in spaceRange was found, terminates in first run if no spaceRange
self.random_shift(nDimensions)
# eval new Energy
system._currentPosition = self.oldpos + self.posShift
system._currentForce = self.posShift
new_ene = system.potential.ene(system._currentPosition)
#print(system._currentPosition)
# MetropolisCriterion
if (self.maxIterationTillAccept <= current_iteration or ((self._critInSpaceRange(system._currentPosition) and
self.metropolis_criterion(new_ene, current_state)))):
break
else: # not accepted
current_iteration += 1
if (current_iteration >= self.convergence_limit):
raise ValueError(
"Metropolis-MonteCarlo samplers did not converge! Think about the maxIterationTillAccept")
self.newPos = self.oldpos
if (self.verbose):
print(str(self.__name__) + ": current position\t ", self.oldpos)
print(str(self.__name__) + ": shift\t ", self.posShift)
print(str(self.__name__) + ": newPosition\t ", self.newPos)
print(str(self.__name__) + ": iteration " + str(current_iteration) + "/" + str(self.convergence_limit))
print("\n")
return np.squeeze(system._currentPosition), np.nan, np.squeeze(self.posShift)
'''
Langevin stochastic integration
'''
class langevinIntegrator(stochasticSampler):
"""
This class implements the Position Langevin sampler. In Contrast to the Monte Carlo Methods,
Langevin integrators provide information on the kinetics of the system. The Position Langevin
Integrator does not calculate velocities. Therefore, the kinetic energy is undefined.
"""
name = "Langevin Integrator"
def __init__(self, dt: float = 0.005, gamma: float = 50, old_position: float = None):
"""
__init__
This is the Constructor of the Langevin samplers.
Parameters
----------
dt : Number, optional
time step of an integration, by default 0.005
gamma : Number, optional
Friktion constant of the system, by default 50
old_position : Iterable[Number, Number] of size nDim, optional
| |
the current matrix until it is in reduced row
echelon form (RREF). The transpose of a matrix has the same RREF as original.
Note: This doesn't change the matrix internally, you will have to assign the
return value to the your matrix variable if you want to change it.
:return: reduced row echelon form of current matrix
:rtype: Matrix
"""
new_matrix = self.row_echelon().comp # get in row echelon form first
pivots = {} # store pivot indexes key-value for use later
# store pivots as col : row pairs
for r, row in enumerate(new_matrix):
# identify pivot
i = 0
while i < self.cols and row[i] == 0:
i += 1
if i < self.cols:
pivots[i] = r
# apply only 0s above pivot (bottom part is done since already in row echelon form)
offset = 0 # how far ahead the first pivot is (ex. may be zero cols before first pivot)
for c in range(self.cols):
if c in pivots:
pivot_row = pivots[c] # row the pivot is in
for r in range(pivot_row): # top part, don't loop past location of pivot
while new_matrix[r][c] != 0: # stay in same column and fix parts above pivot
other_row = c-offset # when no offset, col c can be cleared using row c since there are c zeros
new_matrix = self._clear_pos(new_matrix, r, c, other_row)
else:
offset += 1
new_matrix = self._clean_matrix(new_matrix) # this function also changes floats to perfect ints based on gcd
# now, apply "each pivot is 1" rule, floats inevitable, but preserve as much ints as possible
for r, row in enumerate(new_matrix):
# identify pivot
i = 0
while i < self.cols and row[i] == 0:
i += 1
# divide row by proper amount to get a 1 on pivot
if i < self.cols:
pivot = row[i]
new_matrix[r] = [elem // pivot if elem % pivot == 0 else elem / pivot for elem in row]
return Matrix(sorted(new_matrix, reverse=True)) # ensure ordering is still valid
def inverse(self):
"""
Gets the inverse A^-1 of the current matrix A.
:return: inverse matrix of current matrix, or None if not invertible (singular)
:rtype: Matrix
:raises: value error if current matrix is not nxn
"""
n = self.cols
identity = Matrix.identity(n)
if self.rows != n:
raise ValueError("Need an nxn matrix to calculate inverse.")
# create combined matrix
with_identity = Matrix.combine(self, identity).row_reduce()
# if left side is identity, then right side is inverse
if Matrix([row[:n] for row in with_identity.comp]) != identity:
return None # no inverse, singular
else:
return Matrix([row[-n:] for row in with_identity.comp])
def __add__(self, other):
"""
Adds two matrices and returns a matrix with the respective components
added together as expected.
:param other: the other matrix to be added to current instance matrix
:type other: Matrix
:return: a matrix with the resulting added components
:rtype: Matrix
:raises: ValueError when matrices do not have same dimensions
"""
new_comp = []
if self.rows == other.rows and self.cols == other.cols:
for x, y in zip(self.comp, other.comp):
new_comp.append([a + b for a, b in zip(x, y)]) # adding done in list comprehension
return Matrix(new_comp)
else:
raise ValueError("Size mismatch, both matrices must have the same number of rows and columns.")
def __sub__(self, other):
"""
Subtracting two matrices returns a matrix with the respective components
subtracted. "current - other" is formatting.
:param other: the other matrix which is subtracting from the current matrix
:type other: Matrix
:return: a matrix with the resulting subtracted components
:rtype: Matrix
:raises: ValueError when matrices do not have same dimensions
"""
new_comp = []
if self.rows == other.rows and self.cols == other.cols:
for x, y in zip(self.comp, other.comp):
new_comp.append([a - b for a, b in zip(x, y)]) # subtracting done in list comprehension
return Matrix(new_comp)
else:
raise ValueError("Size mismatch, both matrices must have the same number of rows and columns.")
def __mul__(self, other):
"""
Multiplies the two matrices together; aka Matrix Multiplication.
Matrix-Vector product is also possible using the Vector class, though
this method works for a mx1 matrix as well. Also configured to work with
normal application of multiplying a scalar to a matrix.
Notes: Approach is to take the dot product of each row of current matrix
with each column of other matrix/vector. Since you typically write
"Ax" where A is the matrix and x is the vector, this syntax should
be adhered to when attempting matrix multiplication with these classes.
:param other: the other matrix or vector, could also be an int or float for scaling
:type other: Matrix, int, float
:return: the resulting matrix
:rtype: Matrix
:raises: ValueError when there's a matrix multiplication size mismatch ([mxn]*[nxp]=[mxp])
"""
new_matrix = []
if isinstance(other, int) or isinstance(other, float):
for row in self.comp:
new_matrix.append([elem * other for elem in row])
return Matrix(new_matrix)
elif self.cols == other.rows: # [m x n] * [n x p] = [m x p] i.e. [self.rows x other.cols] matrix
other_cols = []
for i in range(other.cols): # extract columns from rows
other_cols.append([row[i] if isinstance(other, Matrix) else row for row in other.comp])
for row_me in self.comp:
new_row = []
for col_other in other_cols:
new_row.append(Vector(row_me) * Vector(col_other)) # Dot product of vectors
new_matrix.append(new_row)
return Vector([row[0] for row in new_matrix]) if other.cols == 1 else Matrix(new_matrix)
else:
raise ValueError("Size mismatch; [m x n] * [n x p] = [m x p] matrix")
def __eq__(self, other):
"""
If two matrices have the same components, then they are equal. If the
lists are not the same length, will always be False with no error thrown.
Have to compare each component due to necessity of using math.isclose()
on floats in order to deal with floating point errors.
:param other: other matrix being tested for equality
:type other: Matrix
:return: True or False based on equality
:rtype: bool
"""
if self.rows != other.rows or self.cols != other.cols:
return False
for my_row, other_row in zip(self, other):
for my_val, other_val in zip(my_row, other_row):
if not isclose(my_val, other_val):
return False
return self.comp == other.comp # compares lists
def __pow__(self, power, modulo=None):
"""
Allows you to raise a matrix to a power, that is, each of the
components of the current matrix is raised to a power. Can use
power 0 to fill the current matrix with all 1s.
:param power: value to raise each component to
:param modulo: optional parameter that applies the modulus operator to each result
:type power: int, float
:type modulo: int, float
:return: a matrix containing the appropriately scaled components
:rtype: Matrix
"""
new_comp = []
for row in self.comp:
new_row = []
for elem in row:
if modulo:
elem = elem % modulo
new_row.append(pow(elem, power))
new_comp.append(new_row)
return Matrix(new_comp)
def __str__(self):
"""
String representation of matrix is each row separated by new line
characters. This is done so that when printed it resembles a normal
matrix as closely as possible.
:return: string representation of current matrix
:rtype: str
"""
# joins each row of matrix with a new line character and a space,
# floats are converted to visual fractions, need to get rid of quotes around them
return "[" + '\n '\
.join([str([str(Fraction(elem).limit_denominator()) if isinstance(elem, float) else elem for elem in row])
.replace('\'', '') for row in self.comp])\
+ "]"
def __len__(self):
"""
:return: returns tuple formatted as (row, col)
:rtype: tuple
"""
return self.rows, self.cols
def __getitem__(self, index):
"""
Allows user to access internal self.comp without doing
my_matrix.comp[i][j] and instead doing my_matrix[i][j]
Note: the first [] calls this function, which returns row,
that row is a list, which supports [] in the same way
that this function does.
:param index: index of row
:type index: int
:return: list or value for row or row+col value
:rtype: list, value
"""
return self.comp[index]
def __setitem__(self, key, value):
"""
Allows the user to set a value using brackets.
Note: behavior undefined if user | |
<gh_stars>10-100
import json
import itertools
from datetime import datetime, timedelta
import pandas as pd
from sqlalchemy import select, and_, join
from sqlalchemy.exc import IntegrityError
import copy
from ecoreleve_server.core import RootCore
from ecoreleve_server.core.base_resource import DynamicObjectResource, DynamicObjectCollectionResource
from .station_model import Station, Station_FieldWorker
from ..monitored_sites.monitored_site_model import MonitoredSite, MonitoredSitePosition
from ..users.user_model import User
from ..field_activities import fieldActivity
from ..observations.observation_resource import ObservationsResource
from .station_collection import StationCollection
from ..permissions import context_permissions
from ..sensors.sensor_data import CamTrap
from ...utils.datetime import parse
class StationResource(DynamicObjectResource):
model = Station
children = [('observations', ObservationsResource)]
__acl__ = context_permissions['stations']
def delete(self):
if self.objectDB:
id_ = self.objectDB.ID
DynamicObjectResource.delete(self)
else:
id_ = None
response = {'id': id_}
return response
class StationsResource(DynamicObjectCollectionResource):
Collection = StationCollection
model = Station
moduleFormName = 'StationForm'
moduleGridName = 'StationGrid'
children = [('{int}', StationResource)]
__acl__ = context_permissions['stations']
def __init__(self, ref, parent):
DynamicObjectCollectionResource.__init__(self, ref, parent)
self.__acl__ = context_permissions[ref]
def insertWithCamTrap(self):
session = self.request.dbsession
data = {}
for items, value in self.request.json_body.items():
data[items] = value
if data['camtrapId'] is None:
self.request.response.status_code = 502
raise KeyError("no camtrapId submitted")
else:
idCreated = -1
camtrapItem = session.query(CamTrap).get(data['camtrapId'])
self.objectDB.values = data
try:
session.begin_nested()
try:
session.add(self.objectDB)
session.flush()
except Exception as e:
# error when try inserting station ever on server
#hack handle error raise by business ruler
# need to find a cleaner way
self.request.response.status_code = 409
self.request.response.text = e.value
session.rollback()
pass
session.commit()
# session.refresh(self.objectDB)
idCreated = self.objectDB.ID
camtrapItem.stationId = idCreated
camtrapItem.validated = 2
session.add(camtrapItem)
session.flush()
except Exception as e:
self.request.response.status_code = 502
if self.request.response.status_code == 409 :
return self.request.response.text
else:
return {'ID': idCreated}
def insertAllWithCamTrap(self):
session = self.request.dbsession
session.autoflush = False
data = self.request.json_body
result = []
collectionItem = []
for row in data:
try:
self.newobjectDB = Station()
self.newobjectDB.values = row
session.begin_nested()
try:
session.add(self.newobjectDB)
session.flush()
camtrapItem = session.query(CamTrap).get(row['camtrapId'])
if self.newobjectDB.ID:
camtrapItem.stationId = self.newobjectDB.ID
camtrapItem.validated = 2
session.add(camtrapItem)
session.flush()
result.append({ row['camtrapId'] : self.newobjectDB.ID })
except Exception as e:
# error when try inserting station ever on server
#hack handle error raise by business ruler
# need to find a cleaner way
result.append({ row['camtrapId'] : e.value })
self.request.response.status_code = 202
self.newobjectDB.ID = None
session.rollback()
pass
session.commit()
except Exception as e:
self.request.response.status_code = 502
raise e
return result
def deleteStationWithCamTrap(self):
session = self.request.dbsession
data = self.request.json_body
result = []
for row in data:
camTrapItem = session.query(CamTrap).get(row['id'])
stationItem = session.query(self.model).get(row['stationId'])
try:
if stationItem:
session.delete(stationItem)
camTrapItem.stationId = None
session.add(camTrapItem)
result.append({camTrapItem.pk_id : 'station deleted'})
except Exception as e:
self.request.response.status_code = 502
raise e
return result
def insertAll(self) :
session = self.request.dbsession
data = self.request.json_body
result = []
collectionItem = []
for row in data:
self.newobjectDB = Station()
collectionItem.append(self.newobjectDB)
row = self.handleDataBeforeInsert(row)
self.newobjectDB.values = row
self.session.add(self.newobjectDB)
self.session.flush()
for item in collectionItem:
if item.ID :
result.append({ ''+str(item.Name)+'' : item.ID})
else :
result.append({ ''+str(item.Name)+'' : None})
return result
def handleDataBeforeInsert(self, data):
user_id = self.request.authenticated_userid['iss']
data['creator'] = user_id
return data
def updateMonitoredSite(self):
session = self.request.dbsession
data = self.request.params.mixed()
if "FK_MonitoredSite" not in data or data['FK_MonitoredSite'] == '':
return 'Station is not monitored'
try:
data['StartDate'] = data['StationDate']
data['Precision'] = data['precision']
if data.get('Name', None):
del data['Name']
currentMonitoredSite = session.query(MonitoredSite).get(data['FK_MonitoredSite'])
tmpVal = copy.deepcopy(currentMonitoredSite.values)
# tmpVal = currentMonitoredSite.values
tmpVal['LAT'] = data['LAT']
tmpVal['LON'] = data['LON']
tmpVal['ELE'] = data['ELE']
tmpVal['Comments'] = data['Comments']
tmpVal['StartDate'] = data['StationDate']
if tmpVal['creationDate'] > parse(data['StationDate'] ) :
tmpVal['creationDate'] = data['StationDate']
# print("on a fetch le site monitoré",currentMonitoredSite.values)
# print("on va mettre les valeurs",data)
currentMonitoredSite.values = tmpVal
# currentMonitoredSite.updateFromJSON(data)
return 'Monitored site position was updated'
except IntegrityError as e:
session.rollback()
return 'This location already exists'
except Exception as e:
print(e)
def getFormImportGPX(self):
return self.getForm(objectType=1, moduleName='ImportFileForm')
def lastImported(self, obj, params):
'''
will add all this criteria if this params is apply
'''
user = self.request.authenticated_userid['iss']
dateFrom = datetime.today() - timedelta(days=2)
dateFrom = dateFrom.replace(
hour=0,
minute=0,
second=0,
microsecond=0
)
obj['Operator'] = '='
obj['Value'] = True
criteria = [
{
'Column': 'creator',
'Operator': '=',
'Value': user
},
{
'Column': 'FK_StationType',
'Operator': '=',
'Value': 4 # => TypeID of GPX station
},
{
"Column": "creationDate",
"Operator": ">=",
"Value": dateFrom.strftime("%Y-%m-%dT%H:%M:%SZ")
}
]
params['criteria'].extend(criteria)
def handleCriteria(self, params):
if 'criteria' in params:
lastImported = False
for obj in params['criteria']:
if obj['Column'] == 'LastImported':
self.lastImported(obj, params)
lastImported = True
if not lastImported:
map(lambda x: obj['Column'] != 'FK_StationType', params['criteria'])
removePending = [
{
'Column': 'FK_StationType',
'Operator': 'Is not',
'Value': 6 # => TypeID of pending stations
}
]
params['criteria'].extend(removePending)
if 'geo' in self.request.params.mixed():
self.getGeoJsonParams(params)
return params
def handleResult(self, result):
if 'geo' in self.request.params.mixed():
data = self.getGeoJsonResult(result)
else:
data = self.getFieldWorkers(result)
# data = result
return data
def handleCount(self, count, callback, params):
if 'geo' in self.request.params.mixed() and count > 50000:
return []
else:
return callback(**params)
def retrieve(self):
if 'geo' in self.request.params.mixed():
paging = False
else:
paging = True
return self.search(paging=paging)
def deleteMany(self):
error = False
data = {}
if len(self.request.json_body) > 0 :
session = self.request.dbsession
stas = session.query(Station).filter(Station.ID.in_(self.request.json_body)).all()
for sta in stas:
data[str(sta.ID)] = 'not deleted'
try :
session.delete(sta)
data[str(sta.ID)] = 'deleted'
except :
self.request.response.status_code = 502
return data
def deleteManyWithCamTrap(self):
error = False
data = {}
if len(self.request.json_body) > 0 :
session = self.request.dbsession
stas = session.query(Station).filter(Station.ID.in_(self.request.json_body)).all()
camtraps = session.query(CamTrap).filter(CamTrap.stationId.in_(self.request.json_body)).all()
if len(camtraps):
for cam in camtraps:
data[str(cam.stationId)] = 'not exist'
flagNotFound = True
for sta in stas:
if sta.ID == cam.stationId:
flagNotFound = False
data[str(cam.stationId)] = 'not deleted'
try:
session.delete(sta)
cam.stationId = None
session.add(cam)
data[str(cam.stationId)] = 'deleted'
except:
self.request.response.status_code = 502
if flagNotFound:
try:
cam.stationId = None
session.add(cam)
except:
self.request.response.status_code = 502
return data
def getFieldActivityList(self):
query = select([fieldActivity.ID.label('value'),
fieldActivity.Name.label('label')])
result = self.session.execute(query).fetchall()
res = []
for row in result:
res.append({'label': row['label'], 'value': row['value']})
return sorted(res, key=lambda x: x['label'])
def getFieldWorkers(self, data):
params, history, startDate = self.formatParams({}, paging=True)
# params = {'selectable': ['ID'],
# 'filters':params.get('criteria', [])#,
# #'offset':params.get('offset'),
# #'limit':params.get('per_page')#,
# #'order_by':params.get('order_by')
# }
params = {
'selectable': [a.get('Column') for a in params.get('criteria')],
'filters': params.get('criteria', [])
}
queryTmp = self.collection.build_query(**params)
queryTmp = queryTmp.with_only_columns([getattr(self.model, 'ID')])
queryCTE = queryTmp.cte()
# queryCTE = self.collection.build_query(**params).cte()
joinFW = join(
Station_FieldWorker,
User,
Station_FieldWorker.FK_FieldWorker == User.id
)
joinTable = join(
queryCTE,
joinFW,
queryCTE.c['ID'] == Station_FieldWorker.FK_Station
)
query = select([
Station_FieldWorker.FK_Station,
User.Login
]).select_from(joinTable)
FieldWorkers = self.session.execute(query).fetchall()
list_ = {}
for x, y in FieldWorkers:
list_.setdefault(x, []).append(y)
for row in data[1]:
try:
row['FK_FieldWorker_FieldWorkers'] = list_[row['ID']]
except Exception as e:
print(e)
pass
return data
def getGeoJsonParams(self, params):
params['order_by'] = []
criteria = [{'Column': 'LAT',
'Operator': 'Is not',
'Value': None
},
{'Column': 'LON',
'Operator': 'Is not',
'Value': None
}]
params['criteria'].extend(criteria)
def getGeoJsonResult(self, data):
geoJson = []
exceed = True
countResult = data[0]['total_entries']
result = data[1]
if countResult < 50000:
exceed = False
for row in result:
geoJson.append({
'type': 'Feature',
'properties': {
'name': row['Name'],
'date': row['StationDate']},
'geometry': {
'type': 'Point',
'coordinates': [row['LAT'], row['LON']]}
})
data = {'type': 'FeatureCollection',
'features': geoJson,
'exceed': exceed}
return data
def insertMany(self):
### deprecated ???
session = self.request.dbsession
data = self.request.json_body
data_to_insert = []
format_dt = '%d/%m/%Y %H:%M'
dateNow = datetime.now()
model = self.model
# Rename field and convert date
# TODO
for row in data:
newRow = {}
newRow['LAT'] = row['latitude']
newRow['LON'] = row['longitude']
newRow['ELE'] = row['elevation']
newRow['precision'] = row['precision']
newRow['Name'] = row['name']
newRow['fieldActivityId'] = row['fieldActivity']
newRow['precision'] = 10 # row['Precision']
newRow['creationDate'] = dateNow
newRow['creator'] = self.request.authenticated_userid['iss']
newRow['FK_StationType'] = 4
newRow['id'] = row['id']
newRow['NbFieldWorker'] = row['NbFieldWorker']
newRow['StationDate'] = datetime.strptime(
row['waypointTime'], format_dt)
if 'fieldActivity' in row:
newRow['fieldActivityId'] = row['fieldActivity']
if 'NbFieldWorker' in row:
newRow['NbFieldWorker'] = row['NbFieldWorker']
data_to_insert.append(newRow)
# Load date into pandas DataFrame then round LAT,LON into decimal(5)
DF_to_check = pd.DataFrame(data_to_insert)
DF_to_check['LAT'] = DF_to_check['LAT'].round(5)
DF_to_check['LON'] = DF_to_check['LON'].round(5)
maxDate = DF_to_check['StationDate'].max()
minDate = DF_to_check['StationDate'].min()
maxLon = DF_to_check['LON'].max()
minLon = DF_to_check['LON'].min()
maxLat = DF_to_check['LAT'].max()
minLat = DF_to_check['LAT'].min()
# Retrieve potential duplicated stations from Database
query = select([model]).where(
and_(
model.StationDate.between(minDate, maxDate),
model.LAT.between(minLat, maxLat)
)).where(model.LON.between(minLon, maxLon))
data_to_insert = []
result_to_check = pd.read_sql_query(query, session.get_bind())
if result_to_check.shape[0] > 0:
# IF potential duplicated stations, load them into pandas DataFrame
result_to_check['LAT'] = result_to_check['LAT'].round(5)
result_to_check['LON'] = result_to_check['LON'].round(5)
merge_check = pd.merge(DF_to_check, result_to_check, on=[
'LAT', 'LON', 'StationDate'])
# Get only non existing data to insert
DF_to_insert = DF_to_check[~DF_to_check['id'].isin(merge_check['id'])]
DF_to_insert = DF_to_insert.drop(['id'], 1)
data_to_insert = json.loads(DF_to_insert.to_json(
orient='records', date_format='iso'))
else:
data_to_insert = json.loads(DF_to_check.to_json(
orient='records', date_format='iso'))
staListID = []
nbExc = 0
if | |
cos(b2) + (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * sin(a2) * sin(b2) + sin(a1) * sin(b1) * cos(a2)) * sin(
a3) * cos(b3),
- (1 - cos(a3)) * (
-(1 - cos(a1)) * (-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(b1) * cos(b1) - (1 - cos(a2)) * (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * sin(b2) * cos(b2) - sin(a1) * sin(a2) * sin(b1) * cos(
b2)) * sin(b3) * cos(b3) + (-(1 - cos(a3)) * sin(b3) ** 2 + 1) * (
(1 - cos(a1)) * (1 - cos(a2)) * sin(b1) * sin(b2) * cos(b1) * cos(b2) + (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * (-(1 - cos(a2)) * sin(b2) ** 2 + 1) - sin(a1) * sin(
a2) * sin(b1) * sin(b2)) - (-(1 - cos(a1)) * sin(a2) * sin(b1) * cos(b1) * cos(b2) + (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * sin(a2) * sin(b2) + sin(a1) * sin(b1) * cos(a2)) * sin(
a3) * sin(b3),
(-(1 - cos(a1)) * (-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(b1) * cos(b1) - (1 - cos(a2)) * (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * sin(b2) * cos(b2) - sin(a1) * sin(a2) * sin(b1) * cos(
b2)) * sin(a3) * cos(b3) + (-(1 - cos(a1)) * sin(a2) * sin(b1) * cos(b1) * cos(b2) + (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * sin(a2) * sin(b2) + sin(a1) * sin(b1) * cos(a2)) * cos(
a3) + ((1 - cos(a1)) * (1 - cos(a2)) * sin(b1) * sin(b2) * cos(b1) * cos(b2) + (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * (-(1 - cos(a2)) * sin(b2) ** 2 + 1) - sin(a1) * sin(
a2) * sin(b1) * sin(b2)) * sin(a3) * sin(b3)],
[-(1 - cos(a3)) * ((1 - cos(a2)) * sin(a1) * sin(b2) * cos(b1) * cos(b2) - (
-(1 - cos(a2)) * sin(b2) ** 2 + 1) * sin(a1) * sin(b1) - sin(a2) * sin(b2) * cos(a1)) * sin(
b3) * cos(b3) + (-(1 - cos(a3)) * cos(b3) ** 2 + 1) * (
(1 - cos(a2)) * sin(a1) * sin(b1) * sin(b2) * cos(b2) - (-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(
a1) * cos(b1) - sin(a2) * cos(a1) * cos(b2)) - (
-sin(a1) * sin(a2) * sin(b1) * sin(b2) - sin(a1) * sin(a2) * cos(b1) * cos(b2) + cos(a1) * cos(
a2)) * sin(a3) * cos(b3),
- (1 - cos(a3)) * (
(1 - cos(a2)) * sin(a1) * sin(b1) * sin(b2) * cos(b2) - (-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(
a1) * cos(b1) - sin(a2) * cos(a1) * cos(b2)) * sin(b3) * cos(b3) + (
-(1 - cos(a3)) * sin(b3) ** 2 + 1) * (
(1 - cos(a2)) * sin(a1) * sin(b2) * cos(b1) * cos(b2) - (-(1 - cos(a2)) * sin(b2) ** 2 + 1) * sin(
a1) * sin(b1) - sin(a2) * sin(b2) * cos(a1)) - (
-sin(a1) * sin(a2) * sin(b1) * sin(b2) - sin(a1) * sin(a2) * cos(b1) * cos(b2) + cos(a1) * cos(
a2)) * sin(a3) * sin(b3),
(-sin(a1) * sin(a2) * sin(b1) * sin(b2) - sin(a1) * sin(a2) * cos(b1) * cos(b2) + cos(a1) * cos(
a2)) * cos(
a3) + (
(1 - cos(a2)) * sin(a1) * sin(b1) * sin(b2) * cos(b2) - (-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(
a1) * cos(b1) - sin(a2) * cos(a1) * cos(b2)) * sin(a3) * cos(b3) + (
(1 - cos(a2)) * sin(a1) * sin(b2) * cos(b1) * cos(b2) - (-(1 - cos(a2)) * sin(b2) ** 2 + 1) * sin(
a1) * sin(b1) - sin(a2) * sin(b2) * cos(a1)) * sin(a3) * sin(b3)]]
)
self.n = [R[0][0], -R[1][0], -R[2][0]]
self.o = [R[0][1], -R[1][1], -R[2][1]]
self.a = [R[0][2], -R[1][2], -R[2][2]]
#self.euler_alpha, self.euler_beta, self.euler_gamma = self.Arms.desired_orientation_euler()
self.euler_beta = atan2(sqrt(float(R[2][0] ** 2 + R[2][1] ** 2)), R[2][2])
if -pi/180<self.euler_beta<pi/180:
self.euler_beta=0
if sin(self.euler_beta) != 0 :
self.euler_alpha = atan2(R[1][2] / sin(self.euler_beta), R[0][2] / sin(self.euler_beta ))
self.euler_gamma = atan2(R[2][1] / sin(self.euler_beta), -R[2][0] / sin(self.euler_beta ))
elif self.euler_beta == 0:
self.euler_alpha = 0
self.euler_gamma = atan2(-R[0][1], R[0][0])
elif 99*pi/100<self.euler_beta <= pi:
self.euler_gamma = atan(R[0][1], -R[0][0])
self.euler_alpha = 0
def create_circle(self):
def create(L, r):
num_res = 200
pos3 = np.zeros((num_res, num_res, 3))
pos3[:, :, :2] = np.mgrid[:num_res, :num_res].transpose(1, 2, 0) * [-0.1, 0.1]
pos3 = pos3.reshape(num_res**2, 3)
d3 = (pos3 ** 2).sum(axis=1) ** 0.5
area = L #产生备选点的区域
ring_res = 0.15 #环粗细
for i in range(num_res):
pos3[i * num_res:num_res * (i + 1), 0] = -area + 2*area*i/num_res
pos3[i * num_res:num_res * (i + 1), 1] = np.linspace(-area, area, num_res)
pos3[i * num_res:num_res * (i + 1), 2] = 0
count = 0
list1 = list()
rad_ring = r #环圆心距离
ring = 0.06*10 #环半径
for i in range(num_res**2):
if (ring - ring_res) ** 2 < ((pos3[i, 1]) ** 2 + (pos3[i, 0]-rad_ring) ** 2 )< ring**2 or\
(ring - ring_res) ** 2 < ((pos3[i, 1]+rad_ring*0.866) ** 2 + (pos3[i, 0]-rad_ring/2) ** 2)<ring**2 or\
(ring - ring_res) ** 2 < ((pos3[i, 1]+rad_ring*0.866) ** 2 + (pos3[i, 0]+rad_ring/2) ** 2)< ring**2 or\
(ring - ring_res) ** 2 < ((pos3[i, 1]-rad_ring*0.866) ** 2 + (pos3[i, 0]-rad_ring/2) ** 2)< ring**2 or\
(ring - ring_res) ** 2 < ((pos3[i, 1]-rad_ring*0.866) ** 2 + (pos3[i, 0]+rad_ring/2) ** 2)< ring**2 or\
(ring - ring_res) ** 2 < ((pos3[i, 1]) ** 2 + (pos3[i, 0]+rad_ring) ** 2)< ring**2 :
list1.append(i)
backup = list()
for i in list1:
backup.append(pos3[i])
return backup
self.backup = create(L = 3, r = 0.0615*self.multi)
self.sp = list()
self.base = list()
color = {0:pg.glColor(40,20,5),1:pg.glColor(40,20,5),2:pg.glColor(40,20,5),3:pg.glColor(40,40,0),4:pg.glColor(40,40,0),5:pg.glColor(40,40,0),6:pg.glColor(0,40,40),7:pg.glColor(0,40,40),8:pg.glColor(0,40,40)}
for i in range(self.num_seg+1):
self.sp.append(gl.GLScatterPlotItem(pos=self.backup, size=0.08, pxMode=False, color = color[1]))
self.w.addItem(self.sp[i])
def vector_display(self, vector, pos, num, multipy=0, rgb=0):
color = [0,pg.glColor(255,0,0),pg.glColor(0,255,0),pg.glColor(0,0,255)]
if not num in self.pointer.keys():
if not rgb:
self.pointer[num] = gl.GLLinePlotItem(color=pg.glColor((40*num, 50)),
width=2, antialias=True)
else:
self.pointer[num] = gl.GLLinePlotItem(color=color[rgb],
width=2, antialias=True)
self.w.addItem(self.pointer[num])
length = 1
if multipy:
length = multipy
x = np.linspace(0, float(vector[0])*length, 10)
y = np.linspace(0, float(vector[1])*length, 10)
z = np.linspace(0, float(vector[2])*length, 10)
pts = np.vstack([x, y, z]).transpose() + pos
self.pointer[num].setData(pos=pts)
def update_circle(self, seg):
#for seg in range(self.num_seg):
if seg == self.num_seg:
#母本
data = self.backup
self.sp[seg].setData(pos=np.add(data, self.pts[0][0][:]))
else:
vector1 = np.subtract(self.pts[seg][1], self.pts[seg][0])
vector2 = np.subtract(self.pts[seg][2], self.pts[seg][0])
result = -np.cross(vector1, vector2)
# 化为单位向量
mod = np.sqrt(np.square(result[0])+np.square(result[1])+np.square(result[2]))
if mod:
result = np.divide(result, mod)
# 旋转轴
if not seg:
data = self.backup
else:
data = np.subtract(self.sp[seg - 1].pos, self.pts[seg - 1][18])
spin = -np.array(linalg.expm(np.multiply(self.alpha[seg], self.hat(result))))
self.sp[seg].setData(pos=np.add(np.dot(data, spin), self.pts[seg][18][:]))
def hat(self, vector):
hat = np.array([
[0, -vector[2], vector[1]],
[vector[2], 0, -vector[0]],
[-vector[1], vector[0], 0]
])
return hat
def transfer(self) :
# 每个seg 两次旋转
for seg in range(1, self.num_seg):
#print(seg)
angle_x = acos(np.dot(self.nx[seg], self.nx[0]))
#前一个节点的x轴和后一个节点的x轴叉乘
axis_x = np.cross(self.nx[seg], self.nx[0])
mod = np.sqrt(axis_x[0]**2+axis_x[1]**2+axis_x[2]**2)
if mod:
axis_x = np.divide(axis_x, mod)
spin_x = np.array(linalg.expm(np.multiply(angle_x, self.hat(axis_x))))
nz = np.dot(self.nz[0], spin_x)
angle_z = arccos(np.clip(np.dot(nz, self.nz[seg]), -1.0, 1.0))
#对比旋转后结果 不符合即反转
right = 1
while right:
spin_z = np.array(linalg.expm(np.multiply(angle_z, self.hat(self.nx[seg]))))
check = np.dot(nz, spin_z) - self.nz[seg]
if -0.005<check[0] <0.005 and -0.005<check[1] <0.005 and -0.005<check[2] <0.005:
right = 0
else:
angle_z = -angle_z
self.pts[seg] = np.dot(np.dot(self.pts[seg], spin_x), spin_z)
self.pts[seg] += self.pts[seg-1][18]
self.nx[seg+1] = np.dot(np.dot(self.nx[seg+1], spin_x), spin_z)
self.nz[seg+1] = np.dot(np.dot(self.nz[seg+1], spin_x), spin_z)
self.angle[seg] = self.nz[seg+1]
for i in range(self.num_seg):
for j in range(19): # 翻转z坐标y坐标
self.pts[i][j][1] = -self.pts[i][j][1]
self.pts[i][j][2] = -self.pts[i][j][2]
self.traces[i].setData(pos=self.pts[i])
if self.circle_show:
1
# self.update_circle()
# 基于向量
def transfer_line(self):
def transA(alpha):
result = np.array(
[[cos(alpha), 0, -sin(alpha)],
[0, 1, 0],
[sin(alpha), 0, cos(alpha)]]
)
return result
def transB(beta):
result = np.array(
[[cos(beta), sin(beta), 0],
[-sin(beta), cos(beta), 0],
[0, 0, 1]]
)
return result
def transA_(alpha, base):
result = np.array(
[[cos(alpha), 0, -sin(alpha), 0],
[0, 1, 0, 0],
[sin(alpha), 0, cos(alpha), 0],
[base[0], base[1], | |
= wgb.point3DListtoMemorySafeCoordList(sb,coordinatesList)
#pg = gbx.BasicSerialization.makegbPlanarGeom(memsafelist)
#namebase = "su-"+str(uniquesurfcount)
namebase = "su-"+surface.name
pg,gbsurfname = wgb.makegbPlanarGeoms(coordinatesList,0,namebase)
#for a all surfaces surface
if isinstance(pg,list) or isinstance(pg,tuple):
for pgcount,apg in enumerate(pg):
#normal surface
if len(coordinatesList) == 1:
sb = gbx.SpaceBoundary()
sb.surfaceIdRef = gbsurfname
sb.PlanarGeometry = apg
sblist.append(sb)
uniquesurfcount += 1
HBsurfaces[sb.surfaceIdRef] = surface
#meshed surface
else:
#tempsurface = copy.deepcopy(surface)
sb = gbx.SpaceBoundary()
sb.surfaceIdRef = gbsurfname+"_"+str(pgcount)
sb.PlanarGeometry = apg
sblist.append(sb)
uniquesurfcount += 1
HBsurfaces[sb.surfaceIdRef] = surface
else:
#it is an interior surface that has already been found
pass
#now add the space boundaries to the array
#write the space boundaries only for those that are unique
space.spbound = gbx.BasicSerialization.makeSBArray(len(sblist))
for sbcount,createdsb in enumerate(sblist):
space.spbound[sbcount] = createdsb
logging.info('Space boundaries created.')
return space,sharedint, uniquesurfcount,HBsurfaces
def writeSurfaces(self,cmp,hbsurfacetypes,uniquesurfcount,usedconstructions,usedopening,tsc):
logging.debug('Writing gb surfaces.')
cmp.Surface = gbx.BasicSerialization.defSurfaceArray(tsc)
print 'writing surfaces'
gbxmlSpaces = cmp.Buildings[0].Spaces
openingct = 0
surfnum = 0
for space in gbxmlSpaces:
sbcount = 0
while (sbcount < len(space.spbound)):
logging.info('Getting honeybee surface information to translate it.')
hbsurface = hbsurfacetypes[space.spbound[sbcount].surfaceIdRef]
coordinatesList = hbsurface.coordinates ##.extractPoints()
#do the work to map the coordinatesList appropriately,
#this will change as the honeybee object changes
#for now, here is the test for an unmeshed surface
#unmeshed surface
#make a new surface
logging.info("This honeybee surface is unmeshed.")
surface = gbx.Surface()
surface.id = space.spbound[sbcount].surfaceIdRef
#by convention, added by MR Jan 20 2014
LLeft = coordinatesList[0]
memsafelist = wgb.point3DListtoMemorySafeCoordList([coordinatesList])
normal = v.Vector.GetMemRHR(memsafelist[0])
#get tilt
tilt=gbx.prod.FindTilt(normal)
cp = gbx.CartesianPoint()
#this is hardcoded value and should be changed, it can cause bugs
if tilt <= 30:
#roof LL is unique by gbXML convention
logging.debug("Found roof with tilt:"+str(tilt))
llr = gbx.BasicSerialization.GetLLForRoof(memsafelist[0])
cp = llr.cp
elif tilt == 180:
#floor LL is unique by gbXML convention
logging.debug("Found floor with tilt:"+str(tilt))
llf = gbx.BasicSerialization.GetLLForFloor(memsafelist[0])
cp = llf.cp
else:
logging.debug("Assumed a wall with tilt:"+str(tilt))
LLeft = coordinatesList[0]
cp = wgb.makegbCartesianPt(LLeft)
normarr = []
normarr.append(normal.X)
normarr.append(normal.Y)
normarr.append(normal.Z)
#get the azimuth and tilt and assign the construction yourself
#this is a hack to get around honeybee's nested surface
surface.surfaceType = wgb.mapSurfaceTypes(hbsurface.type)
surface.constructionIdRef = "OpenStudio_"+hbsurface.construction.replace(" ","_")
usedconstructions.append(hbsurface.construction)
surface.Name = hbsurface.name
#make adjacent space identifications, which depend on surf type
parentname = hbsurface.parent.name
try:
neighborparentname = hbsurface.BCObject.parent.name
adjSpaces = gbx.BasicSerialization.defAdjSpID(2)
adjSp1 = gbx.AdjacentSpaceId()
adjSp1.spaceIdRef = "Space_"+parentname
adjSpaces[0] = adjSp1
adjSp2 = gbx.AdjacentSpaceId()
adjSp2.spaceIdRef = "Space_"+neighborparentname
adjSpaces[1] = adjSp2
surface.AdjacentSpaceId = adjSpaces
if hbsurface.type == 0:
surface.surfaceType = gbx.surfaceTypeEnum.InteriorWall
except:
neighborparentname = str.Empty
adjSpaces = gbx.BasicSerialization.defAdjSpID(1)
adjSp = gbx.AdjacentSpaceId()
adjSp.spaceIdRef = "Space_"+parentname
adjSpaces[0] = adjSp
surface.AdjacentSpaceId = adjSpaces
rg = gbx.RectangularGeometry()
#get azimuth
#need to add something for CADModel Azimuth eventually
az = gbx.prod.FindAzimuth(normal)
rg.Azimuth = '%.6f' % az
#set the rg to the found cp
rg.CartesianPoint = cp
rg.Tilt = '%.6f' % tilt
#add code to check for normal quads before making this simplification
#get width
area = v.Vector.GetAreaofMemSafeCoords(memsafelist[0])
ht=0
width=0
try:
sqrnt,height,wid = wgb.isItSquare(memsafelist[0])
if (sqrnt):
ht = min(height,wid)
width= max(height,wid)
else:
ht = 10
width = area/ht
except:
ht = 10
width = area/ht
rg.Width = '%.6f' % width
#get height
rg.Height = '%.6f' % ht
surface.RectangularGeometry = rg
pg = gbx.BasicSerialization.makegbPlanarGeom(memsafelist)
surface.PlanarGeometry = pg
if hbsurface.hasChild:
logging.debug("Making glazing surfaces.")
hbwindows = hbsurface.childSrfs
gbopenings,usedopening = wgb.makegbOpening(hbwindows,hbsurface.name, usedopening,openingct)
openingct+1
surface.Opening = gbopenings
CAD = gbx.CADObjectId()
#this should only occur if the surface is totaly new (bad idea)
CAD.id = str(uuid.uuid4())
surface.CADObjectId = CAD
cmp.Surface[surfnum] = surface
sbcount += 1
surfnum += 1
#write shading devices
cmp.Surface = wgb.makeShdSurface(HBContext,cmp.Surface, surfnum)
logging.info('Making surfaces completed.')
return cmp,usedconstructions,usedopening
def makeShdSurface(self, shades, surfaceList,index):
print 'making shades', shades
logging.debug('Making shading surfaces.')
#this has to be here because I may have to make both meshed and unmeshed shadings
totalcount = 0+index
for surfnum,shade in enumerate(shades):
surfnum = surfnum+index
#coordinateList contains all the shade points for all shades
coordinatesList = shade.extractPoints() # I haven't applied reEvaluate zones to shading surfaces
#print coordinatesList
try:
len(coordinatesList[0][0])
print 'meshed surface'
for subcount,ss in enumerate(coordinatesList):
surface = gbx.Surface()
shadeid = "su-"+str(surfnum)+"-"+str(subcount)
surface.id = shadeid
memsafelist = wgb.point3DListtoMemorySafeCoordList([coordinatesList[subcount]])
normal = v.Vector.GetMemRHR(memsafelist[0])
LLeft = memsafelist[0][0]
cp = gbx.CartesianPoint()
cp = wgb.makegbCartesianPt(LLeft)
normarr = []
normarr.append(normal.X)
normarr.append(normal.Y)
normarr.append(normal.Z)
#get the azimuth and tilt and assign the construction yourself
#this is a hack to get around honeybee's nested surface
surface.surfaceType = wgb.mapSurfaceTypes(shade.type)
surface.Name = shade.name
rg = gbx.RectangularGeometry()
#get azimuth
#need to add something for CADModel Azimuth eventually
az = gbx.prod.FindAzimuth(normal)
rg.Azimuth = '%.6f' % az
#get tilt
tilt=gbx.prod.FindTilt(normal)
#set the rg to the found cp
rg.CartesianPoint = cp
rg.Tilt = '%.6f' % tilt
#add code to check for normal quads before making this simplification
#get width
area = v.Vector.GetAreaofMemSafeCoords(memsafelist[0])
sqrnt,height,wid = wgb.isItSquare(memsafelist[0])
if (sqrnt):
ht = min(height,wid)
width= max(height,wid)
else:
ht = 10
width = area/ht
rg.Width = '%.6f' % width
#get height
rg.Height = '%.6f' % ht
surface.RectangularGeometry = rg
pg = gbx.BasicSerialization.makegbPlanarGeom(memsafelist)
surface.PlanarGeometry = pg
CAD = gbx.CADObjectId()
#this should only occur if the surface is totaly new (bad idea)
CAD.id = str(uuid.uuid4())
surface.CADObjectId = CAD
surfaceList[totalcount] = surface
totalcount+=1
except:
surface = gbx.Surface()
surface.id = "su-"+str(surfnum)
#by convention, added by MR Jan 20 2014
LLeft = coordinatesList[0]
memsafelist = wgb.point3DListtoMemorySafeCoordList([coordinatesList])
normal = v.Vector.GetMemRHR(memsafelist[0])
#get tilt
tilt=gbx.prod.FindTilt(normal)
#assign lower left, no special intelligence
cp = gbx.CartesianPoint()
cp = wgb.makegbCartesianPt(LLeft)
normarr = []
normarr.append(normal.X)
normarr.append(normal.Y)
normarr.append(normal.Z)
#get the azimuth and tilt and assign the construction yourself
#this is a hack to get around honeybee's nested surface
surface.surfaceType = wgb.mapSurfaceTypes(shade.type)
surface.Name = shade.name
rg = gbx.RectangularGeometry()
#get azimuth
#need to add something for CADModel Azimuth eventually
az = gbx.prod.FindAzimuth(normal)
rg.Azimuth = '%.6f' % az
#set the rg to the found cp
rg.CartesianPoint = cp
rg.Tilt = '%.6f' % tilt
#get width
area = v.Vector.GetAreaofMemSafeCoords(memsafelist[0])
sqrnt,height,wid = wgb.isItSquare(memsafelist[0])
if (sqrnt):
ht = min(height,wid)
width= max(height,wid)
else:
ht = 10
width = area/ht
rg.Width = '%.6f' % width
#get height
rg.Height = '%.6f' % ht
surface.RectangularGeometry = rg
pg = gbx.BasicSerialization.makegbPlanarGeom(memsafelist)
surface.PlanarGeometry = pg
CAD = gbx.CADObjectId()
#this should only occur if the surface is totaly new (bad idea)
CAD.id = str(uuid.uuid4())
surface.CADObjectId = CAD
surfaceList[totalcount] = surface
totalcount+=1
return surfaceList
def makegbOpening(self,hbwindows,parentsurfacename,usedopening,openingct):
logging.debug('Making gb openings from hb openings.')
gbopenings = gbx.BasicSerialization.defOpeningsArr(len(hbwindows))
defaz = 0
deftilt = 0
defht = 10
defllx = 0
deflly = 0
defllz = 0
for count,window in enumerate(hbwindows):
gbopen = gbx.Opening()
#do all naming
gbopen.id = "OpenStudio_"+window.name.replace(' ','_')+str(openingct)
usedopening[window.name] = window.construction
opCoordsList = window.coordinates ##.extractPoints()
windowpts = list([opCoordsList])
wmemsafelist = wgb.point3DListtoMemorySafeCoordList(windowpts)
parent = window.parent
surfCoordsList = parent.coordinates ##extractPoints()
surfpts = list([surfCoordsList])
smemsafelist = wgb.point3DListtoMemorySafeCoordList(surfpts)
try:
logging.info('getting lower left corner of window.')
cp = gbx.BasicSerialization.GetLLForOpening(smemsafelist[0],wmemsafelist[0])
except:
logging.error('problem when getting lower left corner.'+ sys.exc_info()[0])
parentsurfacename = parent.name
#we will always have fixed windows until code can accomodate
gbopen.openingType = gbx.openingTypeEnum.FixedWindow
gbopen.Name = window.name ##parentsurfacename+" Window-"+str(count)
wrg = gbx.RectangularGeometry()
try:
wrg.CartesianPoint = cp.cp
except:
logging.error("problem making window Rectangular Geometry."+sys.exc_info()[0])
wrg.Azimuth = '%.6f' % defaz
#we think the default tilt will always be this way
wrg.Tilt = '%.6f' % deftilt
warea = window.getTotalArea()
logging.info("Window area is: " + str(warea))
sqrnt,height,wid = wgb.isItSquare(wmemsafelist[0])
if (sqrnt):
ht = min(height,wid)
width= max(height,wid)
else:
ht = 10
width = warea/ht
wrg.Height = '%.6f' % ht
wrg.Width = '%.6f' % width
gbopen.rg = wrg
#define planar geometry
wpg = gbx.BasicSerialization.makegbPlanarGeom(wmemsafelist)
gbopen.pg = wpg
#add the finished product to the array
gbopenings[count] = gbopen
logging.info('Openings for gb successfully made.')
return gbopenings,usedopening
def makegbXMLevels(self,rhinolevels, bldg):
print ('Making gbxml Levels.')
print rhinolevels
bldlevels = gbx.BasicSerialization.setLevelsArray(len(rhinolevels))
bldg.bldgStories = bldlevels
for lcount, level in enumerate(rhinolevels):
storey = gbx.BuildingStorey()
storey.id = 'bldg-story-'+str(lcount+1)
storey.Name = 'Level-'+str(lcount+1)
storey.Level = '%.6f' % level
coordinates = wgb.makeLevelCoords(level)
#make planar geometry
pg = gbx.PlanarGeometry()
pl = gbx.PolyLoop()
pg.PolyLoop = pl
pl.Points = gbx.BasicSerialization.makeCartesianPtArray(len(coordinates))
for count,pt in enumerate(coordinates):
cp = gbx.CartesianPoint()
| |
# coding: utf-8
import requests
try:
from sseclient import SSEClient
except ImportError:
SSEClient = None
try:
# Python 3
from urllib.parse import urlencode
except ImportError:
# Python 2
from urllib import urlencode
from .exceptions import HorizonError
HORIZON_LIVE = "https://horizon.stellar.org"
HORIZON_TEST = "https://horizon-testnet.stellar.org"
class Horizon(object):
def __init__(self, horizon=None, sse=False, timeout=20):
"""The :class:`Horizon` object, which represents the interface for
making requests to a Horizon server instance.
This class aims to be up to date with Horizon's API endpoints; however,
you can utilize the internal session via ``self.session`` (which is a
:class:`requests.Session` object) to make arbitrary requests to
a Horizon instance's API.
In general, on HTTP errors (non 2XX/3XX responses), no exception is
raised, and the return dictionary must be checked to see if it is an
error or a valid response. Any other errors however are raised by this
class.
:param str horizon: The horizon base URL
:param bool sse: Default to using server side events for streaming
responses when available.
:param int timeout: The timeout for all requests.
"""
if sse and SSEClient is None:
raise ValueError('SSE not supported, missing sseclient module')
if horizon is None:
self.horizon = HORIZON_TEST
else:
self.horizon = horizon
self.session = requests.Session()
self.sse = sse
self.timeout = timeout
def _request(self, verb, endpoint, **kwargs):
url = '{base}{endpoint}'.format(base=self.horizon, endpoint=endpoint)
if kwargs.get('sse', False):
if 'params' in kwargs and kwargs['params']:
url = '{}?{}'.format(url, urlencode(kwargs['params']))
messages = SSEClient(url)
return messages
else:
try:
# FIXME: We should really consider raising the HTTPError when
# it happens and wrapping its JSON response in a HorizonError
resp = self.session.request(
verb, url, timeout=self.timeout, **kwargs)
return resp.json()
except requests.RequestException:
raise HorizonError(
'Could not successfully make a request to Horizon.')
def _get(self, endpoint, **kwargs):
# If sse has been passed in by an endpoint (meaning it supports sse)
# but it hasn't been explicitly been set by the request, default to the
# this instance's setting on SSE requests.
if 'sse' in kwargs and kwargs['sse'] is None:
kwargs['sse'] = self.sse
return self._request('GET', endpoint, **kwargs)
def _post(self, endpoint, **kwargs):
return self._request('POST', endpoint, **kwargs)
def submit(self, te, **kwargs):
"""Submit a transaction to Horizon.
`POST /transactions
<https://www.stellar.org/developers/horizon/reference/endpoints/transactions-create.html>`_
Uses form-encoded data to send over to Horizon.
:param bytes te: The transaction envelope to submit
:return: The JSON response indicating the success/failure of the
submitted transaction.
:rtype: dict
"""
payload = {'tx': te}
return self._post('/transactions', data=payload, **kwargs)
def account(self, address, **kwargs):
"""Returns information and links relating to a single account.
`GET /accounts/{account}
<https://www.stellar.org/developers/horizon/reference/endpoints/accounts-single.html>`_
:param str address: The account ID to retrieve details about
:return: The account details in a JSON response
:rtype: dict
"""
endpoint = '/accounts/{account_id}'.format(account_id=address)
return self._get(endpoint, **kwargs)
def account_data(self, account_id, data_key, **kwargs):
"""This endpoint represents a single data associated with a given
account.
`GET /accounts/{account}/data/{key}
<https://www.stellar.org/developers/horizon/reference/endpoints/data-for-account.html>`_
:param str account_id: The account ID to look up a data item from
:param str data_key: The name of the key for the data item in question
:return: The value of the data field for the given account and data
key
:rtype: dict
"""
endpoint = '/accounts/{account_id}/data/{data_key}'.format(
account_id=account_id, data_key=data_key)
return self._get(endpoint, **kwargs)
def account_effects(self, address, params=None, sse=None, **kwargs):
"""This endpoint represents all effects that changed a given account.
`GET /accounts/{account}/effects{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/effects-for-account.html>`_
:param str address: The account ID to look up effects for.
:param dict params: The query parameters to pass to this request, such
as cursor, order, and limit.
:param bool sse: Use server side events for streaming responses
:return: The list of effects in a JSON response.
:rtype: dict
"""
endpoint = '/accounts/{account_id}/effects'.format(account_id=address)
return self._get(endpoint, params=params, **kwargs)
def account_offers(self, address, params=None, **kwargs):
"""This endpoint represents all the offers a particular account makes.
`GET /accounts/{account}/offers{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/offers-for-account.html>`_
:param str address: The account ID to retrieve offers from
:param dict params: The query parameters to pass to this request, such
as cursor, order, and limit.
:return: The list of offers for an account in a JSON response.
:rtype: dict
"""
endpoint = '/accounts/{account_id}/offers'.format(account_id=address)
return self._get(endpoint, params=params, **kwargs)
def account_operations(self, address, params=None, sse=None, **kwargs):
"""This endpoint represents all operations that were included in valid
transactions that affected a particular account.
`GET /accounts/{account}/operations{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/operations-for-account.html>`_
:param str address: The account ID to list operations on
:param dict params: The query parameters to pass to this request, such
as cursor, order, and limit.
:param bool sse: Use server side events for streaming responses
:return: The list of operations for an account in a JSON response.
:rtype: dict
"""
endpoint = '/accounts/{account_id}/operations'.format(
account_id=address)
return self._get(endpoint, params=params, sse=sse, **kwargs)
def account_transactions(self, address, params=None, sse=None, **kwargs):
"""This endpoint represents all transactions that affected a given
account.
`GET /accounts/{account_id}/transactions{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/transactions-for-account.html>`_
:param str address: The account ID to list transactions from
:param dict params: The query parameters to pass to this request, such
as cursor, order, and limit.
:return: The list of transactions for an account in a JSON response.
:rtype: dict
"""
endpoint = '/accounts/{account_id}/transactions'.format(
account_id=address)
return self._get(endpoint, params=params, sse=sse, **kwargs)
def account_payments(self, address, params=None, sse=None, **kwargs):
"""This endpoint responds with a collection of Payment operations where
the given account was either the sender or receiver.
`GET /accounts/{id}/payments{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/payments-for-account.html>`_
:param str address: The account ID to list payments to/from
:param dict params: The query parameters to pass to this request, such
as cursor, order, and limit.
:param bool sse: Use server side events for streaming responses
:return: The list of payments for an account in a JSON response.
:rtype: dict
"""
endpoint = '/accounts/{account_id}/payments'.format(
account_id=address)
return self._get(endpoint, params=params, sse=sse, **kwargs)
def assets(self, params=None, **kwargs):
"""This endpoint represents all assets. It will give you all the assets
in the system along with various statistics about each.
See the documentation below for details on query parameters that are
available.
`GET /assets{?asset_code,asset_issuer,cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_
:param dict params: The query parameters to pass to this request, such
as cursor, order, and limit.
:return: A list of all valid payment operations
:rtype: dict
"""
endpoint = '/assets'
return self._get(endpoint, params=params, **kwargs)
def transactions(self, params=None, sse=None, **kwargs):
"""This endpoint represents all validated transactions.
`GET /transactions{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/transactions-all.html>`_
:param dict params: The query parameters to pass to this request, such
as cursor, order, and limit.
:param bool sse: Use server side events for streaming responses
:return: The list of all transactions
:rtype: dict
"""
endpoint = '/transactions'
return self._get(endpoint, params=params, sse=sse, **kwargs)
def transaction(self, tx_hash, **kwargs):
"""The transaction details endpoint provides information on a single
transaction.
`GET /transactions/{hash}
<https://www.stellar.org/developers/horizon/reference/endpoints/transactions-single.html>`_
:param str tx_hash: The hex-encoded transaction hash
:return: A single transaction's details
:rtype: dict
"""
endpoint = '/transactions/{tx_hash}'.format(tx_hash=tx_hash)
return self._get(endpoint, **kwargs)
def transaction_operations(self, tx_hash, params=None, **kwargs):
"""This endpoint represents all operations that are part of a given
transaction.
`GET /transactions/{hash}/operations{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/operations-for-transaction.html>`_
:param str tx_hash: The hex-encoded transaction hash
:param dict params: The query parameters to pass to this request, such
as cursor, order, and limit.
:return: A single transaction's operations
:rtype: dict
"""
endpoint = '/transactions/{tx_hash}/operations'.format(
tx_hash=tx_hash)
return self._get(endpoint, params=params, **kwargs)
def transaction_effects(self, tx_hash, params=None, **kwargs):
"""This endpoint represents all effects that occurred as a result of a
given transaction.
`GET /transactions/{hash}/effects{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/effects-for-transaction.html>`_
:param str tx_hash: The hex-encoded transaction hash
:param dict params: The query parameters to pass to this request, such
as cursor, order, and limit.
:return: A single transaction's effects
:rtype: dict
"""
endpoint = '/transactions/{tx_hash}/effects'.format(
tx_hash=tx_hash)
return self._get(endpoint, params=params, **kwargs)
def transaction_payments(self, tx_hash, params=None, **kwargs):
"""This endpoint represents all payment operations that are part of a
given transaction.
`GET /transactions/{hash}/payments{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/payments-for-transaction.html>`_
:param str tx_hash: The hex-encoded transaction hash
:param dict params: The query parameters to pass to this request, such
as cursor, order, and limit.
:return: A single transaction's payment operations
:rtype: dict
"""
endpoint = '/transactions/{tx_hash}/payments'.format(
tx_hash=tx_hash)
return self._get(endpoint, params=params, **kwargs)
def order_book(self, params=None, **kwargs):
"""Return, for each orderbook, a summary of the orderbook and the bids
and asks associated with that orderbook.
See the external docs below for information on the arguments required.
`GET /order_book
<https://www.stellar.org/developers/horizon/reference/endpoints/orderbook-details.html>`_
:param dict params: The query parameters to pass to this request.
:return: | |
PSM score
temp_result[26] = "XS:f:" + str(
row['search_score']['score'])
#XT: non/semi/tryptic
temp_result[27] = "XT:i:" + str(
enzyme_specificity)
#XU: petide URL
temp_result[28] = "XU:Z:*"
#YA: following 2AA:
temp_result[29] = "YA:Z:" + str(
pre_post_aa[1])
#YB: preceding 2AA
temp_result[30] = "YB:Z:" + str(
pre_post_aa[0])
#YP: protein accession ID from the original search
temp_result[31] = 'YP:Z:' + str(key)
# ZA additional field specifiying the transcript/protein id used for mapping
temp_result[32] = "ZA:Z:" + str(
transcript_id)
# remove duplicates if rm_duplicates=Y
if rm_duplicates == "Y":
dup_key= str(temp_result[9])+"_"+\
str(str(temp_result[0])+"_"+temp_result[2])+"_"+str(temp_result[3])
if dup_key not in dup:
dup[dup_key] = 1
to_write.append(temp_result)
else:
to_write.append(temp_result)
if is_hit == 0:
to_write.append(
unannotated_PSM_to_SAM(
psm, row, decoy, key, enzyme,
enzyme_specificity))
print(" ")
print("Writing SAM-file")
for line in to_write:
write_psm(line, file)
file.close()
#
# Function to convert unannotated PSMs to SAM
#
def unannotated_PSM_to_SAM(psm, row, decoy, key, enzyme, enzyme_specificity):
'''
:param psm: psm dictionairy
:param row: unnanotated PSM row
:param decoy: decoy boolean
:param file: output file
:return: sam of unnanotated PSM
'''
decoy = int(decoy)
temp_result = [None] * 33
#
# Mandatory columns adapted from SAM/BAM format
#
#QNAME
temp_result[0] = psm['spectrum']
#FLAG
temp_result[1] = '4'
#RNAME
temp_result[2] = '*'
#POS
temp_result[3] = 0
#MAPQ
temp_result[4] = 255
#CIGAR
temp_result[5] = '*'
#RNEXT
temp_result[6] = '*'
#PNEXT
temp_result[7] = 0
#TLEN
temp_result[8] = 0
#SEQ
temp_result[9] = '*'
#QUAL
temp_result[10] = '*'
#
#Mandatory proteomics specific columns added to the proBam format
#
#NH: number of genomic location the peptide mapping to
temp_result[11] = 'NH:i:-1'
#XA: Whether the peptide is well annotated
temp_result[12] = 'XA:i:2'
#XB: Mass error
temp_result[13] = "XB:f:" + str(row['massdiff'])
#XC: Peptide charge
temp_result[14] = 'XC:i:' + str(psm['assumed_charge'])
#XE: enzyme
temp_result[15] = "XE:i:" + str(enzyme)
#XF: Reading frame of the peptide
temp_result[16] = "XF:Z:*"
#XG: Petide type
if decoy == 1:
temp_result[17] = "XG:Z:D"
else:
temp_result[17] = "XG:Z:U"
#XI: Peptide intensity
temp_result[18] = "XI:f:-1"
#XL: number of peptides the spectrum mapping to
temp_result[19] = 'XL:i:-1'
#XM: Modification
temp_result[20] = 'XM:Z:' + create_XM(row['modifications'])
#XN: number of mis-cleavages
if 'num_missed_cleavages' in row:
temp_result[21] = 'XN:i:' + str(row['num_missed_cleavages'])
else:
temp_result[21] = 'XN:i:-1'
#XO: uniqueness
temp_result[22] = 'XO:Z:*'
#XP; peptide sequence
temp_result[23] = 'XP:Z:' + row['peptide']
# XQ: PSM-Qvalue
temp_result[24] = 'XQ:f:' + str(row['search_score']['evalue'])
#XR: reference peptide sequence
temp_result[25] = 'XR:Z:*'
# XS: PSM score
temp_result[26] = "XS:f:" + str(row['search_score']['score'])
#XT: non/semi/tryptic
temp_result[27] = "XT:i:" + str(enzyme_specificity)
#XU
temp_result[28] = "XU:Z:*"
#YA: 2 AA after
temp_result[29] = 'YA:Z:*'
#YB: 2 AA before
temp_result[30] = 'YB:Z:*'
#YP: protein accession id
temp_result[31] = "YP:Z:" + str(key)
#ZA additional field specifiying the transcript/protein id used for mapping
temp_result[32] = "ZA:Z:*"
return temp_result
#
# Function to convert decoy PSM to SAM format
#
def decoy_PSM_to_SAM(psm, row, key, enzyme, enzyme_specificity):
'''
:param psm: psm dictionairy
:param row: row where decoy found
:param key: psm key
:param transcript_hash: transcript dictionairy
:param exon_hash: exon dictionairy
:param allowed_mismatches: number of allowed mismatches
:param file: output file
:return: SAM of decoy PSM
'''
# LEGACY: map decoy to genome if map_decoy=Y
return unannotated_PSM_to_SAM(psm, row, 1, key, enzyme, enzyme_specificity)
'''
temp_result=[None]*23
if map_decoy=="Y":
protein_hit=map_peptide_to_protein(row['peptide'][::-1],transcript_hash[id_map[key]]['protein_seq'],allowed_mismatches)[0]
pre_post_aa=map_peptide_to_protein(row['peptide'][::-1],transcript_hash[id_map[key]]['protein_seq'],allowed_mismatches)[1]
else:
protein_hit=[]
if len(protein_hit)==0:
return unannotated_PSM_to_SAM(psm,row,1,key,enzyme,enzyme_specificity)
else:
# map peptide on protein and retrieve hit position, iterate over all hits
for phit in protein_hit:
temp_result=[None]*32
#
# Mandatory columns adapted from SAM/BAM format
#
#QNAME
temp_result[0]=psm['spectrum']
#FLAG
temp_result[1]=calculate_FLAG(transcript_hash[id_map[key]]['strand'],row['hit_rank'],
1)
#RNAME
temp_result[2]='chr'+str(transcript_hash[id_map[key]]['chr'])
#POS
temp_result[3]=calculate_genome_position(phit[0],
transcript_hash[id_map[key]]['strand'],
transcript_hash[id_map[key]]['5UTR_offset'],
transcript_hash[id_map[key]]['start_exon_rank'],
row['peptide'][::-1],
exon_hash[transcript_hash[id_map[key]]['transcript_id']],
transcript_hash[id_map[key]]['chr'],
three_frame_translation)
#MAPQ
temp_result[4]=255
#CIGAR
temp_result[5]=compute_cigar(temp_result[3],
exon_hash[transcript_hash[id_map[key]]['transcript_id']],
transcript_hash[id_map[key]]['strand'],row['peptide'])
#RNEXT
temp_result[6]='*'
#PNEXT
temp_result[7]=0
#TLEN
temp_result[8]=0
#SEQ
if int(transcript_hash[id_map[key]]['strand'])==1:
temp_result[9]=str(transcript_hash[id_map[key]]['transcript_seq']\
[(phit[0]*3):((phit[0]*3)+(len(row['peptide'])*3))])
else:
temp_result[9]=reverse_complement(str(transcript_hash[id_map[key]]['transcript_seq']\
[(phit[0]*3):((phit[0]*3)+(len(row['peptide'])*3))]))
#QUAL
temp_result[10]='*'
#
#Mandatory proteomics specific columns added to the proBam format
#
#NH: number of genomic location the peptide mapping to
temp_result[11]='NH:i:'+str(len(row['proteins'])+len(phit)-1)
# todo figure this one out
temp_result[12] = 'XO:z:*'
# XL: number of peptides the spectrum mapping to
temp_result[13] = 'XL:i:*'
# XP; peptide sequence
temp_result[14] = 'XP:Z:' + row['modified_peptide']
# YP: protein accession ID from the original search
temp_result[15] = 'YP:Z:' + str(key)
# XF: reading frame of the peptide
temp_result[16] = 'XF:Z:' + compute_cigar(temp_result[3],
exon_hash[transcript_hash[id_map[key]]['transcript_id']],
transcript_hash[id_map[key]]['strand'], row['peptide'])[1]
# XI: peptide intensity
temp_result[17] = "XI:f:*"
# XB: Mass error (experimental - calculated)
temp_result[18] = "XB:f:" + str(row['massdiff'])
# XR: reference peptide sequence
temp_result[19] = 'XR:Z:' + row['peptide']
# YB: preceding 2AA
temp_result[20] = "YB:Z:*"
# YA: following 2AA:
temp_result[21] = "YA:Z:*"
# XS: PSM score
temp_result[22] = "XS:f:" + str(row['search_score']['score'])
# XQ: PSM-Qvalue
temp_result[23] = 'XQ:f:' + str(row['search_score']['evalue'])
#XC: Peptide charge
temp_result[24]='XC:i:'+str(psm['assumed_charge'])
#XA: Whether the peptide is well annotated
temp_result[25]=create_XA(phit[1])
#XM: Modification
temp_result[26]='XM:Z:'+create_XM(row['modifications'])
#XN: number of mis-cleavages
if 'num_missed_cleaveges' in row.keys():
temp_result[27]='XN:i:'+str(row['num_missed_cleavages'])
else:
temp_result[27]='XN:i:*'
#XT: non/semi/tryptic
temp_result[28]="XT:i:"+str(enzyme_specificity)
#XE enzyme
temp_result[29]="XE:i"+str(enzyme)
#XG: Petide type
temp_result[30]='XG:Z:D'
#XU= url
temp_result[31]="XU:Z:*"
return temp_result
'''
#
# Create SAM header
#
def create_SAM_header(file, version, database, sorting_order, database_v,
species, command_line, psm_file, comments, name):
'''
:param file: output file
:param version: proBAMconvert version
:param database: database name
:param sorting_order: SAM sorting order
:param database_v: database version
:return:
'''
print('Creating SAM header')
header = []
header.append('@HD\tVN:' + str(version) + ' SO:' + sorting_order)
if database.upper() == "ENSEMBL":
SQ = proBAM_ENSEMBL.create_SQ_header(database_v, species)
for row in SQ:
header.append(row)
header.append('@PG\tID:proBamPy\tVN:1.0\tCL:' + str(command_line))
header.append('@RG\tID:' + str(name))
header.append('@CO\tAS:' + str(database) + '\tVN:' + str(database_v))
# get comments and append comments to file
if comments != []:
for comment in comments:
comment = str(comment).rstrip()
comment = re.sub(r'(^[ \t]+|[ \t]+(?=:))', '', comment, flags=re.M)
header.append('@CO\t' + str(comment))
comments = extract_comments(psm_file)
if comments != []:
for comment in comments:
comment = str(comment).rstrip()
comment = re.sub(r'(^[ \t]+|[ \t]+(?=:))', '', comment, flags=re.M)
header.append('@CO\t' + str(comment))
for row in header:
file.write(row + '\n')
#
# Function to convert SAM to BAM
#
def sam_2_bam(directory, name):
'''
:param directory:
:param name: file name
'''
print("Converting SAM to BAM")
infile = pysam.AlignmentFile((directory + name + '.sam'), "r")
outfile = pysam.AlignmentFile((directory + name + '.bam'),
"wb",
template=infile)
for s in infile:
outfile.write(s)
infile.close()
outfile.close()
# create EOF
bam = open((directory + name + '.bam'), 'ab')
bam.write("\x1f\x8b\x08\x04\x00\x00\x00\x00\x00\xff\x06\x00BC" + \
"\x02\x00\x1b\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00")
bam.close()
# Pysam v 0.8.4.:
#pysam.sort((directory + name + '.bam'), (directory + name + '.sorted'))
# For new pysam version, has error for bigger files
pysam.sort("-o", (directory + name + '.sorted.bam'),
(directory + name + '.bam'))
time.sleep(5)
pysam.index(directory + name + '.sorted.bam')
#
# function to calculate and adjust NH for every peptide
# Also depending on mode, can create peptide based proBAM
#
def compute_NH_XL(directory, name, include_unmapped, mode):
'''
:param directory: directory of the proBAM file
:param name: proBAM file name
:param include_unmapped: 'Y' or 'N', whether to include unmapped PSMs
:param mode: proBAM mode (psm,peptide,peptide-mod...)
'''
if mode in ['proBAM_peptide', 'proBAM_peptide_mod']:
print("Create peptide-based proBAM")
sam_file = open(directory + name + '.sam', 'r')
original_file = sam_file.read()
nh_hash = {}
score_hash = {}
peptide_hash = {}
for line in original_file.split("\n"):
if len(line) < 1:
continue
elif line[0] == "@":
continue
else:
if line.split("\t")[5] == "*":
continue
else:
line = line.split("\t")
if mode == 'proBAM_peptide':
key = line[23] + '_' + line[2] + '_' + line[
3] + '_' + line[5]
id = line[23].split(':')[2]
elif mode == 'proBAM_peptide_mod':
if line[20] == "XM:Z:*":
id = line[23].split(':')[2]
else:
id = line[23].split(':')[2] + "," + line[20].split(
'XM:Z:')[1].replace(";", ",")
key = id + '_' + line[2] + '_' + line[3] + '_' + line[5]
if id in nh_hash:
if create_id_from_list([line[2], line[3], line[5]]) in \
nh_hash[id]:
continue
else:
nh_hash[id].append(
create_id_from_list(
[line[2], line[3], line[5]]))
else:
nh_hash[id] = [(create_id_from_list(
[line[2], line[3], line[5]]))]
if key not in peptide_hash:
peptide_hash[key] = [
id, line[1], line[2], line[3], line[4], line[5],
line[6], line[7], line[8], line[9], line[10],
line[11], line[12], 'XB:f:*', 'XC:i:*', line[15],
line[16], line[17], 'XI:f:-1', 'XL:i:-1', 'XM:Z:*',
line[21], line[22], line[23], line[24], line[25],
line[26], line[27], line[28], line[29], line[30],
line[31], line[32]
]
if id not in score_hash:
score_hash[id] = 1
else:
if line[26] != 'XS:f:-1':
try:
if float(line[26].split('XS:f:')[1]) > float(
peptide_hash[key][26].split(
'XS:f:')[1]):
peptide_hash[key][26] = line[26]
except:
pass
if line[24] != 'XQ:f:-1':
try:
if float(line[24].split('XQ:f:')[1]) > float(
peptide_hash[key][24].split(
'XQ:f:')[1]):
peptide_hash[key][24] = line[24]
except:
pass
sam_file.close()
sam_file = open(directory + name + '.sam', 'w')
for line in original_file.split("\n"):
if len(line) < 1:
continue
elif line[0] == "@":
sam_file.write(line)
| |
channels[idx]
def getPreviousChannel(self, currentChannel):
channels = self.getChannelList()
idx = channels.index(currentChannel)
idx -= 1
if idx < 0:
idx = len(channels) - 1
return channels[idx]
def saveChannelList(self, callback, channelList):
self.eventQueue.append([self._saveChannelList, callback, channelList])
self.event.set()
def _saveChannelList(self, channelList):
c = self.conn.cursor()
for idx, channel in enumerate(channelList):
c.execute(
'INSERT OR IGNORE INTO channels(id, title, logo, stream_url, visible, weight, source) VALUES(?, ?, ?, ?, ?, (CASE ? WHEN -1 THEN (SELECT COALESCE(MAX(weight)+1, 0) FROM channels WHERE source=?) ELSE ? END), ?)',
[channel.id, channel.title, channel.logo, channel.streamUrl, channel.visible, channel.weight,
self.source.KEY, channel.weight, self.source.KEY])
if not c.rowcount:
c.execute(
'UPDATE channels SET title=?, logo=?, stream_url=?, visible=?, weight=(CASE ? WHEN -1 THEN weight ELSE ? END) WHERE id=? AND source=?',
[channel.title, channel.logo, channel.streamUrl, channel.visible, channel.weight, channel.weight,
channel.id, self.source.KEY])
c.execute("UPDATE sources SET channels_updated=? WHERE id=?", [datetime.datetime.now(), self.source.KEY])
self.channelList = None
self.conn.commit()
def getChannelList(self, onlyVisible=True):
if not self.channelList or not onlyVisible:
result = self._invokeAndBlockForResult(self._getChannelList, onlyVisible)
if not onlyVisible:
return result
self.channelList = result
return self.channelList
def _getChannelList(self, onlyVisible):
c = self.conn.cursor()
channelList = list()
if onlyVisible:
c.execute('SELECT * FROM channels WHERE source=? AND visible=? ORDER BY weight', [self.source.KEY, True])
else:
c.execute('SELECT * FROM channels WHERE source=? ORDER BY weight', [self.source.KEY])
for row in c:
channel = Channel(row['id'], row['title'], row['logo'], row['stream_url'], row['visible'], row['weight'])
channelList.append(channel)
c.close()
return channelList
def getCurrentProgram(self, channel):
return self._invokeAndBlockForResult(self._getCurrentProgram, channel)
def _getCurrentProgram(self, channel):
"""
@param channel:
@type channel: source.Channel
@return:
"""
program = None
now = datetime.datetime.now()
c = self.conn.cursor()
c.execute('SELECT * FROM programs WHERE channel=? AND source=? AND start_date <= ? AND end_date >= ?',
[channel.id, self.source.KEY, now, now])
row = c.fetchone()
if row:
program = Program(channel, row['title'], row['start_date'], row['end_date'], row['description'],
row['image_large'], row['image_small'])
c.close()
return program
def getNextProgram(self, channel):
return self._invokeAndBlockForResult(self._getNextProgram, channel)
def _getNextProgram(self, program):
nextProgram = None
c = self.conn.cursor()
c.execute(
'SELECT * FROM programs WHERE channel=? AND source=? AND start_date >= ? ORDER BY start_date ASC LIMIT 1',
[program.channel.id, self.source.KEY, program.endDate])
row = c.fetchone()
if row:
nextProgram = Program(program.channel, row['title'], row['start_date'], row['end_date'], row['description'],
row['image_large'], row['image_small'])
c.close()
return nextProgram
def getPreviousProgram(self, channel):
return self._invokeAndBlockForResult(self._getPreviousProgram, channel)
def _getPreviousProgram(self, program):
previousProgram = None
c = self.conn.cursor()
c.execute(
'SELECT * FROM programs WHERE channel=? AND source=? AND end_date <= ? ORDER BY start_date DESC LIMIT 1',
[program.channel.id, self.source.KEY, program.startDate])
row = c.fetchone()
if row:
previousProgram = Program(program.channel, row['title'], row['start_date'], row['end_date'],
row['description'], row['image_large'], row['image_small'])
c.close()
return previousProgram
def _getProgramList(self, channels, startTime):
"""
@param channels:
@type channels: list of source.Channel
@param startTime:
@type startTime: datetime.datetime
@return:
"""
endTime = startTime + datetime.timedelta(hours=2)
programList = list()
channelMap = dict()
for c in channels:
if c.id:
channelMap[c.id] = c
if not channels:
return []
c = self.conn.cursor()
c.execute('SELECT p.*, (SELECT 1 FROM notifications n WHERE n.channel=p.channel AND n.program_title=p.title AND n.source=p.source) AS notification_scheduled FROM programs p WHERE p.channel IN (\'' + ('\',\''.join(channelMap.keys())) + '\') AND p.source=? AND p.end_date > ? AND p.start_date < ?', [self.source.KEY, startTime, endTime])
for row in c:
program = Program(channelMap[row['channel']], row['title'], row['start_date'], row['end_date'], row['description'], row['image_large'], row['image_small'], row['notification_scheduled'])
programList.append(program)
return programList
def _isProgramListCacheExpired(self, date=datetime.datetime.now()):
# check if data is up-to-date in database
dateStr = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
c.execute('SELECT programs_updated FROM updates WHERE source=? AND date=?', [self.source.KEY, dateStr])
row = c.fetchone()
today = datetime.datetime.now()
expired = row is None or row['programs_updated'].day != today.day
c.close()
return expired
def setCustomStreamUrl(self, channel, stream_url):
if stream_url is not None:
self._invokeAndBlockForResult(self._setCustomStreamUrl, channel, stream_url)
# no result, but block until operation is done
def _setCustomStreamUrl(self, channel, stream_url):
if stream_url is not None:
c = self.conn.cursor()
c.execute("DELETE FROM custom_stream_url WHERE channel=?", [channel.id])
c.execute("INSERT INTO custom_stream_url(channel, stream_url) VALUES(?, ?)",
[channel.id, stream_url.decode('utf-8', 'ignore')])
self.conn.commit()
c.close()
def getCustomStreamUrl(self, channel):
return self._invokeAndBlockForResult(self._getCustomStreamUrl, channel)
def _getCustomStreamUrl(self, channel):
c = self.conn.cursor()
c.execute("SELECT stream_url FROM custom_stream_url WHERE channel=?", [channel.id])
stream_url = c.fetchone()
c.close()
if stream_url:
return stream_url[0]
else:
return None
def deleteCustomStreamUrl(self, channel):
self.eventQueue.append([self._deleteCustomStreamUrl, None, channel])
self.event.set()
def _deleteCustomStreamUrl(self, channel):
c = self.conn.cursor()
c.execute("DELETE FROM custom_stream_url WHERE channel=?", [channel.id])
self.conn.commit()
c.close()
def getStreamUrl(self, channel):
customStreamUrl = self.getCustomStreamUrl(channel)
if customStreamUrl:
customStreamUrl = customStreamUrl.encode('utf-8', 'ignore')
return customStreamUrl
elif channel.isPlayable():
streamUrl = channel.streamUrl.encode('utf-8', 'ignore')
return streamUrl
return None
@staticmethod
def adapt_datetime(ts):
# http://docs.python.org/2/library/sqlite3.html#registering-an-adapter-callable
return time.mktime(ts.timetuple())
@staticmethod
def convert_datetime(ts):
try:
return datetime.datetime.fromtimestamp(float(ts))
except ValueError:
return None
def _createTables(self):
c = self.conn.cursor()
try:
c.execute('SELECT major, minor, patch FROM version')
(major, minor, patch) = c.fetchone()
version = [major, minor, patch]
except sqlite3.OperationalError:
version = [0, 0, 0]
try:
if version < [1, 3, 0]:
c.execute('CREATE TABLE IF NOT EXISTS custom_stream_url(channel TEXT, stream_url TEXT)')
c.execute('CREATE TABLE version (major INTEGER, minor INTEGER, patch INTEGER)')
c.execute('INSERT INTO version(major, minor, patch) VALUES(1, 3, 0)')
# For caching data
c.execute('CREATE TABLE sources(id TEXT PRIMARY KEY, channels_updated TIMESTAMP)')
c.execute(
'CREATE TABLE updates(id INTEGER PRIMARY KEY, source TEXT, date TEXT, programs_updated TIMESTAMP)')
c.execute(
'CREATE TABLE channels(id TEXT, title TEXT, logo TEXT, stream_url TEXT, source TEXT, visible BOOLEAN, weight INTEGER, PRIMARY KEY (id, source), FOREIGN KEY(source) REFERENCES sources(id) ON DELETE CASCADE)')
c.execute(
'CREATE TABLE programs(channel TEXT, title TEXT, start_date TIMESTAMP, end_date TIMESTAMP, description TEXT, image_large TEXT, image_small TEXT, source TEXT, updates_id INTEGER, FOREIGN KEY(channel, source) REFERENCES channels(id, source) ON DELETE CASCADE, FOREIGN KEY(updates_id) REFERENCES updates(id) ON DELETE CASCADE)')
c.execute('CREATE INDEX program_list_idx ON programs(source, channel, start_date, end_date)')
c.execute('CREATE INDEX start_date_idx ON programs(start_date)')
c.execute('CREATE INDEX end_date_idx ON programs(end_date)')
# For active setting
c.execute('CREATE TABLE settings(key TEXT PRIMARY KEY, value TEXT)')
# For notifications
c.execute(
"CREATE TABLE notifications(channel TEXT, program_title TEXT, source TEXT, FOREIGN KEY(channel, source) REFERENCES channels(id, source) ON DELETE CASCADE)")
if version < [1, 3, 1]:
# Recreate tables with FOREIGN KEYS as DEFERRABLE INITIALLY DEFERRED
c.execute('UPDATE version SET major=1, minor=3, patch=1')
c.execute('DROP TABLE channels')
c.execute('DROP TABLE programs')
c.execute(
'CREATE TABLE channels(id TEXT, title TEXT, logo TEXT, stream_url TEXT, source TEXT, visible BOOLEAN, weight INTEGER, PRIMARY KEY (id, source), FOREIGN KEY(source) REFERENCES sources(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED)')
c.execute(
'CREATE TABLE programs(channel TEXT, title TEXT, start_date TIMESTAMP, end_date TIMESTAMP, description TEXT, image_large TEXT, image_small TEXT, source TEXT, updates_id INTEGER, FOREIGN KEY(channel, source) REFERENCES channels(id, source) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, FOREIGN KEY(updates_id) REFERENCES updates(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED)')
c.execute('CREATE INDEX program_list_idx ON programs(source, channel, start_date, end_date)')
c.execute('CREATE INDEX start_date_idx ON programs(start_date)')
c.execute('CREATE INDEX end_date_idx ON programs(end_date)')
# make sure we have a record in sources for this Source
c.execute("INSERT OR IGNORE INTO sources(id, channels_updated) VALUES(?, ?)", [self.source.KEY, 0])
self.conn.commit()
c.close()
except sqlite3.OperationalError, ex:
raise DatabaseSchemaException(ex)
def addNotification(self, program):
self._invokeAndBlockForResult(self._addNotification, program)
# no result, but block until operation is done
def _addNotification(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("INSERT INTO notifications(channel, program_title, source) VALUES(?, ?, ?)",
[program.channel.id, program.title, self.source.KEY])
self.conn.commit()
c.close()
def removeNotification(self, program):
self._invokeAndBlockForResult(self._removeNotification, program)
# no result, but block until operation is done
def _removeNotification(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("DELETE FROM notifications WHERE channel=? AND program_title=? AND source=?",
[program.channel.id, program.title, self.source.KEY])
self.conn.commit()
c.close()
def getNotifications(self, daysLimit=2):
return self._invokeAndBlockForResult(self._getNotifications, daysLimit)
def _getNotifications(self, daysLimit):
start = datetime.datetime.now()
end = start + datetime.timedelta(days=daysLimit)
c = self.conn.cursor()
c.execute(
"SELECT DISTINCT c.title, p.title, p.start_date FROM notifications n, channels c, programs p WHERE n.channel = c.id AND p.channel = c.id AND n.program_title = p.title AND n.source=? AND p.start_date >= ? AND p.end_date <= ?",
[self.source.KEY, start, end])
programs = c.fetchall()
c.close()
return programs
def isNotificationRequiredForProgram(self, program):
return self._invokeAndBlockForResult(self._isNotificationRequiredForProgram, program)
def _isNotificationRequiredForProgram(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("SELECT 1 FROM notifications WHERE channel=? AND program_title=? AND source=?",
[program.channel.id, program.title, self.source.KEY])
result = c.fetchone()
c.close()
return result
def clearAllNotifications(self):
self._invokeAndBlockForResult(self._clearAllNotifications)
# no result, but block until operation is done
def _clearAllNotifications(self):
c = self.conn.cursor()
c.execute('DELETE FROM notifications')
self.conn.commit()
c.close()
class Source(object):
def getDataFromExternal(self, date, progress_callback=None):
"""
Retrieve data from external as a list or iterable. Data may contain both Channel and Program objects.
The source may choose to ignore the date parameter and return all data available.
@param date: the date to retrieve the data for
@param progress_callback:
@return:
"""
return None
def isUpdated(self, channelsLastUpdated, programsLastUpdated):
today = datetime.datetime.now()
if channelsLastUpdated is None or channelsLastUpdated.day | |
<filename>foliage/change_tab.py
'''
change_tab.py: implementation of the "Change records" tab
Copyright
---------
Copyright (c) 2021-2022 by the California Institute of Technology. This code
is open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
from collections import namedtuple, defaultdict
from commonpy.data_utils import unique, pluralized, flattened
from commonpy.exceptions import Interrupted
from commonpy.file_utils import exists, readable
from commonpy.interrupt import wait, reset_interrupts, interrupt, interrupted
from decouple import config
from pywebio.input import input, select, checkbox, radio
from pywebio.input import NUMBER, TEXT, input_update, input_group
from pywebio.output import put_text, put_markdown, put_row, put_html
from pywebio.output import toast, popup, close_popup, put_buttons, put_button
from pywebio.output import use_scope, set_scope, clear, remove, put_warning
from pywebio.output import put_info, put_table, put_grid, span, put_link
from pywebio.output import put_tabs, put_image, put_scrollable, put_code
from pywebio.output import put_processbar, set_processbar, put_loading
from pywebio.output import put_column, put_scope, clear_scope
from pywebio.pin import pin, pin_wait_change, put_input, put_actions
from pywebio.pin import put_textarea, put_radio, put_checkbox, put_select
from pywebio.session import run_js, eval_js
from sidetrack import set_debug, log
import sys
import threading
from foliage.base_tab import FoliageTab
from foliage.exceptions import *
from foliage.export import export_data
from foliage.folio import Folio, RecordKind, IdKind, TypeKind, Record
from foliage.folio import unique_identifiers, back_up_record
from foliage.ui import confirm, notify, user_file
from foliage.ui import PROGRESS_BOX, PROGRESS_TEXT
from foliage.ui import tell_success, tell_warning, tell_failure, stop_processbar
from foliage.ui import note_info, note_warn, note_error, tell_success, tell_failure
# Tab definition class.
# .............................................................................
class ChangeTab(FoliageTab):
def contents(self):
return {'title': 'Change records', 'content': tab_contents()}
def pin_watchers(self):
return {'chg_op': lambda value: update_tab(value)}
# Tab creation function.
# .............................................................................
def tab_contents():
log(f'generating change tab contents')
# FIXME what causes these diffs on windows?
textarea_rows = 14 if sys.platform.startswith('win') else 13
margin_adjust = 'margin-top: -1.1em' if sys.platform.startswith('win') else ''
return [
put_grid([[
put_markdown('Input item and/or holdings identifiers'
+ ' (i.e., barcodes, id\'s, or hrid\'s). All'
+ ' records will be changed the same way. Changing a'
+ ' holdings record will also change all its items.'),
put_button('Upload', outline = True,
onclick = lambda: load_file()).style('text-align: right'),
]], cell_widths = 'auto 100px'),
put_grid([[
put_grid([
[put_textarea('textbox_ids', rows = textarea_rows)],
]),
put_grid([
[put_text('Select the field to be changed:').style('margin-top: -0.5em')],
[put_row([
put_button('Select', onclick = lambda: select_field_name()
).style('text-align: left'),
put_textarea('chg_field', rows = 1, readonly = True),
], size = '95px auto').style('text-align: right')],
[put_text('Select the action to perform:')],
[put_radio('chg_op', inline = True,
options = [ ('Add value', 'add', True),
('Change value', 'change'),
('Delete value', 'delete')]
).style(f'margin-bottom: 0.3em; {margin_adjust}')],
[put_text('Current field value (records must match this):').style('opacity: 0.3')],
[put_row([
put_button('Select', onclick = lambda: select_field_value('old')),
put_textarea('old_value', rows = 1, readonly = True),
], size = '95px auto').style('z-index: 8; opacity: 0.3')],
[put_text('New value (field will be set to this):')],
[put_row([
put_button('Select', onclick = lambda: select_field_value('new')),
put_textarea('new_value', rows = 1, readonly = True),
], size = '95px auto').style('z-index: 9')],
]).style('margin-left: 12px'),
]], cell_widths = '50% 50%').style('margin-top: 1em'),
put_row([
put_button('Change records', color = 'danger',
onclick = lambda: do_change()),
put_button('Clear', outline = True,
onclick = lambda: clear_tab()).style('text-align: right'),
])
]
# Implementation of tab functionality.
# .............................................................................
# This next bit is an egregious and unobvious hack. Here's the deal. When
# the user selects different options using the radio buttons for the
# operation type (add value, change value, delete value), we want different
# buttons and fields to look visually "unavailable" as a clue to the user
# that they don't have to fill in those values. However, PyWebIO doesn't
# provide a way to control the properties of the elements dynamically: there's
# PyWebIO API for changing a CSS attribute at run-time.
#
# One *can* do it using JavaScript using well-known methods, and PyWebIO
# *does* provide a function (eval_js) to execute JavaScript at run-time in
# the web page. But, here we face a new challenge: how do do you refer to
# the things on the page whose CSS attributes you want to change?
#
# For some of the elements, it's possible to target them by searching for the
# element content. That's the case for the text fields, where we can use a
# jQuery selector such as
# $("p.contains('Current field value')")
# to get at the element, and from there, to change the CSS. This is used in
# the code below for elements for which it's possible to do that. But you
# can't do that for buttons -- you need to find another way to refer to them.
# Frustratingly, PyWebIO doesn't provide a way to put id attributes on
# elements; if you could do that, it would make it easy to target exactly the
# elements you need to change. You also can't target CSS classes, because
# that would end up catching other elements with the same class on the page.
#
# So to get the other elements (the ones that can't be found using the jQuery
# "contains" operator mentioned above), I ended up using an insane hack:
#
# 1. Add distinguishing features to specific elements using one of the few
# CSS/HTML controls that PyWebIO does provide, namely the style()
# function, to add a property that we can uniquely find in the DOM at run
# time. I'm using the z-index, setting it specific numbers (8 and 9) in
# tab_contents() above. The z-index is not used for anything else on
# this page so it's irrelevant as far as layout is concerned.
#
# 2. Invoke some JavaScript code in the web page that uses jQuery to look
# for the elements that have the specific z-index values. That's how the
# specific elements are found. Then it's easy to change the value of
# desired CSS properties on those elements.
#
# Why use the numbers 8 and 9? In case a future change ends up using a z-index
# value for something. I hypothesized that a future developer who used z-index
# for a real purpose would use a value like 1, 2, or maybe 100, 1000, etc.
# The values 8 and 9 seemed off-beat enough that they wouldn't clash with
# something in the future, even if a future developer doesn't read this comment
# explaining what's going on.
def update_tab(value):
log(f'updating form in response to radio box selection: "{value}"')
if value == 'add':
eval_js('''$("p:contains('Current field value')").css("opacity", "0.3");''')
eval_js('''$("div").filter((i, n) => $(n).css("z-index") == 8).css("opacity", "0.3");''')
eval_js('''$("p:contains('New field value')").css("opacity", "1");''')
eval_js('''$("div").filter((i, n) => $(n).css("z-index") == 9).css("opacity", "1");''')
elif value == 'delete':
eval_js('''$("p:contains('Current field value')").css("opacity", "1");''')
eval_js('''$("p:contains('New field value')").css("opacity", "0.3");''')
eval_js('''$("div").filter((i, n) => $(n).css("z-index") == 8).css("opacity", "1");''')
eval_js('''$("div").filter((i, n) => $(n).css("z-index") == 9).css("opacity", "0.3");''')
else:
eval_js('''$("p:contains('Current field value')").css("opacity", "1");''')
eval_js('''$("div").filter((i, n) => $(n).css("z-index") == 8).css("opacity", "1");''')
eval_js('''$("p:contains('New field value')").css("opacity", "1");''')
eval_js('''$("div").filter((i, n) => $(n).css("z-index") == 9).css("opacity", "1");''')
def clear_tab():
log(f'clearing tab')
clear('output')
pin.textbox_ids = ''
pin.chg_op = 'add'
pin.chg_field = ''
pin.old_value = ''
pin.new_value = ''
update_tab('add')
def select_field_name():
# Clear any previous value.
pin.new_value = ''
if (answer := selected('Select field to change', known_fields)):
# Show the selected value.
pin.chg_field = answer
log(f'user selected field {answer}')
def select_field_value(old_new):
# No way to prevent clicks when the op is not valid, so just ignore them.
# Setting an old field value is only valid for change and delete.
# Setting a new field value is only valid for add and change.
if ((old_new == 'old' and pin.chg_op == 'add')
or (old_new == 'new' and pin.chg_op == 'delete')):
return
if not pin.chg_field:
notify('Please first select the field to be changed.')
return
fname = pin.chg_field.lower()
log(f'getting list of values for {fname}')
type_list = Folio().types(known_fields[pin.chg_field].type)
if not type_list:
note_error(f'Could not retrieve the list of possible {fname} values')
return
value_list = sorted(item.data['name'] for item in type_list)
if (val := selected(f'Select the {old_new} value for {fname}', value_list)):
field = old_new + '_value'
setattr(pin, field, val)
log(f'user selected {old_new} field value {val}')
def selected(title, values):
log(f'showing list selection popup')
event = threading.Event()
clicked_ok = False
def clk(val):
nonlocal clicked_ok
clicked_ok = val
event.set()
pins = [
put_select('list_selection', options = values),
put_buttons([
{'label': 'Submit', 'value': True},
{'label': 'Cancel', 'value': False, 'color': 'secondary'},
], onclick = clk).style('float: right')
]
popup(title = title, content = pins, closable = False)
event.wait()
close_popup()
wait(0.5) # Give time for popup to go away.
log(f'user {"made a selection" if clicked_ok else "cancelled"}')
return pin.list_selection if clicked_ok else None
def all_selections_made():
return (pin.chg_field
and ((pin.chg_op == 'add' and pin.new_value)
or (pin.chg_op == 'delete' and pin.old_value)
or (pin.chg_op == 'change' and pin.new_value and pin.old_value)))
def load_file():
log(f'user requesting file upload')
if (contents | |
thankfully _it is!_
# Rather than the `.corr()`, we will use `.cov()` to get the raw covariance matrix. This is more useful in some respects, since it more clearly shows how much variance there is in the individual parameters. For instance, stellar wind velocity has very low dispersion. Since we have taken log of all quantities, there is no problem with disparate numerical scales.
mdf[['LIR_will', 'L4', 'R0', 'R/L', 'V3', 'Md']].cov()
# So, from that it is clear that _operationally_ the principal determinant of my $\dot M$ is the infrared luminosity. Also look at correlation matrix for good measure.
mdf[['LIR_will', 'L4', 'R0', 'R/L', 'V3', 'Md']].corr()
# So, infrared luminosity determines 75% of $\dot M$ variance, while $V$ clearly has no influence on anything (< 6% level). Interestingly neither does $R/L_*$ even though that is part of the operational determinant of $\dot M$ and $R$ and $L$ do individually contribute to $\dot M$ variance at 25% level. Part of the reason must be simply that $R/L_*$ does not vary much, having a rms dispersion of 0.3 dex (about factor 2).
# So, where does the extra 25% variance in $\dot M$ come from? There is a weak negative correlation of $L_{\mathrm{IR}}$ with $R/L_*$ that might have something to do with it. This means that mass loss rate shows less dispersion (0.55 dex) than the IR luminosity (0.62 dex). Whatever, it doesn't really matter.
#
# Note that the diagonal elements of the covariance array are the variances, so std is sqrt of that.
# _Alternatively ..._ we can put it in exclusively empirical terms: $F_{\mathrm{IR}}$, $D$, $\theta$.
# $$
# \dot M \propto \frac{\theta D^3 F_{\mathrm{IR}}}{L_* V}
# $$
mdf[['D_kpc', 'theta', 'FIR', 'L4', 'V3', 'Md']].corr()
# But, as can be seen, $\dot M$ is correlated with none of these very well. So, best stick with $L$ and $R$.
ttt
# ### Now, look at their Mdots
# What do they really depend on?
#
# 1. They use the LOS column density, instead of the radial column density, which they estimate from the 70 micron surface brightness, together with an estimate of the 70 micron emissivity, $j_\nu$, which depends on the radiation field $U$. They claim that $j_\nu \propto U^{1/2}$ ($j_\nu$ is emissivity
#
# 2. They skip out the middle man of the shell and directly balance the ram pressure of the wind with the ram pressure of the ambient stream. _Except that not really, since they use the density of the shell and then say that it is factor of 4 times the ambient density._
#
# 3. They just assume a stream velocity of 30 km/s, so they don't depend on the gas temperature in the shell (although, in reality it should effect the compression ratio).
#
# $$
# \frac{\dot M V}{4 \pi R^2} = (\rho_s / \delta) v_a^2
# $$
# Also, apparently
# $$
# \rho_s = m I_\nu / \ell j_\nu
# $$
# From their equation (8) this gives the following dependency for $\dot M$:
# $$
# \dot M = 4 \pi \left[ \frac{R^2 I_\nu}{\ell V_w} \right] \left[ \frac{V_a^2 m }{j_\nu(U) \delta } \right]
# $$
# where first term in square brackets is measured (or at least estimated per star) quantities, while second term is in square brackets is assumed parameters, except that $j_\nu (U)$ is the dust emissivity (Jy.cm2/sr/nucleon = 1e-23 erg/s/sr/nucleon) as a function of radiation field where $U F_0 = L / 4 \pi R^2$ with $F_0 = 0.0217$ erg/cm2/s being the ISRF flux, but integrated across the whole SED.
# We show below in the next section that (1) they are not using the $j_\nu(U)$ that they claim to be using, and (2) they should be using a somewhat different one anyway (because of SED shape). However, we have corrected that, so we can proceed with the analysis. As in K18, approximate the $U$ dependence as $U^{1/2} = L_*^{1/2} R^{-1}$. So, the $\dot M$ breakdown becomes:
# $$
# \dot M \propto \left[ \frac{R^3 I_\nu}{\ell L_*^{1/2} V_w} \right] \left[ \frac{V_a^2 m }{j_0 \delta } \right]
# $$
# Or
# $$
# \dot M \propto \left[ \frac{R I_\nu}{(\ell/R) U^{1/2} V_w} \right] \left[ \frac{V_a^2 m }{j_0 \delta } \right]
# $$
# ## An investigation of radiation field, $U$, dust emissivity, $j_\nu$, and mass loss rate for K17 and K18
# _Revisiting this, now that I have established that the DL07 emissivities are too low for OB stars if one uses bolometric flux for the U (because the ISRF has only a small fraction at UV wavelengths)._
# What is the `U` column in the table? It comes from Table 5 of K17 and supposedly comes from the $T_{\mathrm{eff}}$, $R_*$ and $R_0$ from the same table. We have this table, so we can check this.
tab05['UU'] = 0.01329*(tab05['Teff']/1e4)**4 * tab05['R*']**2 / tab05['Dist2']**2
tab05
ddf = tab05['ID', 'U', 'UU'].to_pandas()
fig, ax = plt.subplots(figsize=(10, 10))
vmin, vmax = 150, 4e5
ax.scatter(x='U', y='UU', data=ddf)
ax.plot([vmin, vmax], [vmin, vmax])
for id_, x, y in zip(tab05['ID'], tab05['U'], tab05['UU']):
ax.annotate(
str(id_), (x, y),
xytext=(4,4), textcoords='offset points',
)
ax.set(xscale='log', yscale='log',
xlim=[vmin, vmax], ylim=[vmin, vmax],
xlabel='U 2017', ylabel='UU 2017',
)
ax.set_aspect('equal')
# So the previous plot shows the `U` column from K17 Tab 5 on the x axis, and the $U$ that I calculate from the `Teff`, `R*`, and `Dist2` (equals radius) columns of the same table on the y axis.
#
# It is strange that there is any scatter at all, but this shows that there isn't a serious problem with the way that K17 calculated their $U$.
# Next we compare with the K18 values, to work out where they got their $U$ from. We have got the original tables from the ApJ website and exported them to FITS (see `dust-wave-case-studies.org`).
#
# _Note that we have to explicitly change some columns from string to float._
k18tab1 = Table.read('data/Kobulnicky2018/k18tab1.fits')
k18tab2 = Table.read('data/Kobulnicky2018/k18tab2.fits')
k18tab = join(k18tab1, k18tab2, keys=('ID', 'Name', 'Alt. name'), join_type='left')
for col in 'U', 'j_nu', 'Mdot':
k18tab[col] = k18tab[col].astype('float')
k18tab
# First, check the U values against what they should be: $U = 14.7 L_4 R_{\mathrm{pc}}^{-2}$.
k18tab['UU'] = 14.7*k18tab['Lum.']/k18tab['R_0']**2
ddf = k18tab['ID', 'U', 'UU'].to_pandas()
fig, ax = plt.subplots(figsize=(10, 10))
vmin, vmax = 150, 4e5
ax.scatter(x='U', y='UU', data=ddf)
ax.plot([vmin, vmax], [vmin, vmax])
for id_, x, y in zip(k18tab['ID'], k18tab['U'], k18tab['UU']):
ax.annotate(
str(id_), (x, y),
xytext=(4,4), textcoords='offset points',
)
ax.set(xscale='log', yscale='log',
xlim=[vmin, vmax], ylim=[vmin, vmax],
xlabel='U 2018', ylabel='UU 2018',
)
ax.set_aspect('equal')
(ddf['U']/ddf['UU']).describe()
# So, it looks like they are using a factor of about 16.3 instead of 14.7, which is odd. This makes their values about 1.1 times higher than mine. But apart from that, the U values look fine.
# Now, look at the emissivity as a function of $U$, and compare it with the DL07 values.
DL07tab = Table.read('../cloudy-dust-charging/DL07-data/emissivities.fits')
ddf = k18tab['ID', 'U', 'j_nu'].to_pandas()
fig, ax = plt.subplots(figsize=(8, 7))
umin, umax = 110, 5e5
jmin, jmax = 2e-13, 5e-11
ax.plot(k18tab['U'], k18tab['j_nu'], 'x', alpha=1.0, markeredgewidth=2, color='r', label='K18 sources')
ax.plot(DL07tab['U'], DL07tab['70'], '-', color='k', alpha=0.3, lw=10, label='DL07 models')
ax.plot(DL07tab['U']/8.0, DL07tab['70'], ':', color='k', alpha=0.3, lw=10, label=r'DL07($U \times 8$)')
ax.legend(loc='lower right')
ax.set(xscale='log', yscale='log',
xlim=[umin, umax],
ylim=[jmin, jmax],
xlabel=r'Radiation field: $U = F / F_{\mathrm{MMP83}}$',
ylabel=r'70 $\mu$m emissivity: $j_\nu$, Jy cm$^{2}$ sr$^{-1}$ H$^{-1}$',
)
sns.despine()
fig.tight_layout()
fig.savefig('K18-emissivity-vs-U.pdf')
None
# So, they are not even using the emissivities that they say they are using. But we need to check if it is just a problem with the table, or if they are actuslly using these values to calculate the $\dot M$. _Yes, they are – see below._
# So, we will calculate the $\dot M$ from their table quantities, using their equation (8).
k18tab['Va'] = 30.0
k18tab['Va'][0] = 26.5
k18tab['Mdot2'] = 1.67e-28*k18tab['R_0,as']**2 * k18tab['D'] * k18tab['Va']**2 * 1e7*k18tab['Peak_70'] / (k18tab['V_inf_{}'] * k18tab['ell'] * k18tab['j_nu'])
fig, ax = plt.subplots(figsize=(10, 8))
xx, yy = k18tab['Mdot'], k18tab['Mdot2']
c = ax.scatter(xx, yy,
c=4.0 + np.log10(k18tab['Lum.']), cmap='magma', vmin=4.0, vmax=6.0,
edgecolors='k', alpha=1.0)
fig.colorbar(c, ax=ax).set_label(r'$\log_{10}\ \left[L_* / L_\odot \right]$')
for id_, x, y in zip(k18tab['ID'], xx, yy):
ax.annotate(
str(id_), (x, y), fontsize='xx-small',
xytext=(4,4), textcoords='offset points',
)
fmin, fmax = 1e-9, 3e-6
ax.plot([fmin, fmax], [fmin, fmax], ls='--')
ax.set(
xscale='log', yscale='log',
xlim=[fmin, fmax], ylim=[fmin, fmax],
xlabel=r'From K18 Table 2, $\dot M$',
ylabel=r'From K18 eq. (8), $\dot M$',
)
ax.set_aspect('equal')
fig.savefig('K18-mdot-internal-comparison.pdf')
None
k18tab['Md/Md'] = k18tab['Mdot2']/k18tab['Mdot']
print(f"Ratio of Mdot = {k18tab['Md/Md'].mean():.4f} | |
set(self._ids) > set(other._ids)
def __ge__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
return NotImplemented
if not other or other in self:
return True
return set(self._ids) >= set(other._ids)
def __int__(self):
return self.id or 0
def __repr__(self):
return "%s%s" % (self._name, getattr(self, '_ids', ""))
def __hash__(self):
if hasattr(self, '_ids'):
return hash((self._name, frozenset(self._ids)))
else:
return hash(self._name)
def __getitem__(self, key):
""" If ``key`` is an integer or a slice, return the corresponding record
selection as an instance (attached to ``self.env``).
Otherwise read the field ``key`` of the first record in ``self``.
Examples::
inst = model.search(dom) # inst is a recordset
r4 = inst[3] # fourth record in inst
rs = inst[10:20] # subset of inst
nm = rs['name'] # name of first record in inst
"""
if isinstance(key, str):
# important: one must call the field's getter
return self._fields[key].__get__(self, type(self))
elif isinstance(key, slice):
return self.browse(self._ids[key])
else:
return self.browse((self._ids[key],))
def __setitem__(self, key, value):
""" Assign the field ``key`` to ``value`` in record ``self``. """
# important: one must call the field's setter
return self._fields[key].__set__(self, value)
#
# Cache and recomputation management
#
@property
def _cache(self):
""" Return the cache of ``self``, mapping field names to values. """
return RecordCache(self)
def _in_cache_without(self, field, limit=PREFETCH_MAX):
""" Return records to prefetch that have no value in cache for ``field``
(:class:`Field` instance), including ``self``.
Return at most ``limit`` records.
"""
ids = expand_ids(self.id, self._prefetch_ids)
ids = self.env.cache.get_missing_ids(self.browse(ids), field)
if limit:
ids = itertools.islice(ids, limit)
# Those records are aimed at being either fetched, or computed. But the
# method '_fetch_field' is not correct with new records: it considers
# them as forbidden records, and clears their cache! On the other hand,
# compute methods are not invoked with a mix of real and new records for
# the sake of code simplicity.
return self.browse(ids)
@api.model
def refresh(self):
""" Clear the records cache.
.. deprecated:: 8.0
The record cache is automatically invalidated.
"""
self.invalidate_cache()
@api.model
def invalidate_cache(self, fnames=None, ids=None):
""" Invalidate the record caches after some records have been modified.
If both ``fnames`` and ``ids`` are ``None``, the whole cache is cleared.
:param fnames: the list of modified fields, or ``None`` for all fields
:param ids: the list of modified record ids, or ``None`` for all
"""
if fnames is None:
if ids is None:
return self.env.cache.invalidate()
fields = list(self._fields.values())
else:
fields = [self._fields[n] for n in fnames]
# invalidate fields and inverse fields, too
spec = [(f, ids) for f in fields] + \
[(invf, None) for f in fields for invf in self._field_inverses[f]]
self.env.cache.invalidate(spec)
def modified(self, fnames, create=False, before=False):
""" Notify that fields will be or have been modified on ``self``. This
invalidates the cache where necessary, and prepares the recomputation of
dependent stored fields.
:param fnames: iterable of field names modified on records ``self``
:param create: whether called in the context of record creation
:param before: whether called before modifying records ``self``
"""
if not self or not fnames:
return
# The triggers of a field F is a tree that contains the fields that
# depend on F, together with the fields to inverse to find out which
# records to recompute.
#
# For instance, assume that G depends on F, H depends on X.F, I depends
# on W.X.F, and J depends on Y.F. The triggers of F will be the tree:
#
# [G]
# X/ \Y
# [H] [J]
# W/
# [I]
#
# This tree provides perfect support for the trigger mechanism:
# when F is # modified on records,
# - mark G to recompute on records,
# - mark H to recompute on inverse(X, records),
# - mark I to recompute on inverse(W, inverse(X, records)),
# - mark J to recompute on inverse(Y, records).
if len(fnames) == 1:
tree = self.pool.field_triggers.get(self._fields[next(iter(fnames))])
else:
# merge dependency trees to evaluate all triggers at once
tree = {}
for fname in fnames:
node = self.pool.field_triggers.get(self._fields[fname])
if node:
trigger_tree_merge(tree, node)
if tree:
# determine what to compute (through an iterator)
tocompute = self.sudo().with_context(active_test=False)._modified_triggers(tree, create)
# When called after modification, one should traverse backwards
# dependencies by taking into account all fields already known to be
# recomputed. In that case, we mark fieds to compute as soon as
# possible.
#
# When called before modification, one should mark fields to compute
# after having inversed all dependencies. This is because we
# determine what currently depends on self, and it should not be
# recomputed before the modification!
if before:
tocompute = list(tocompute)
# process what to compute
for field, records, create in tocompute:
records -= self.env.protected(field)
if not records:
continue
if field.compute and field.store:
if field.recursive:
recursively_marked = self.env.not_to_compute(field, records)
self.env.add_to_compute(field, records)
else:
# Dont force the recomputation of compute fields which are
# not stored as this is not really necessary.
if field.recursive:
recursively_marked = records & self.env.cache.get_records(records, field)
self.env.cache.invalidate([(field, records._ids)])
# recursively trigger recomputation of field's dependents
if field.recursive:
recursively_marked.modified([field.name], create)
def _modified_triggers(self, tree, create=False):
""" Return an iterator traversing a tree of field triggers on ``self``,
traversing backwards field dependencies along the way, and yielding
tuple ``(field, records, created)`` to recompute.
"""
if not self:
return
# first yield what to compute
for field in tree.get(None, ()):
yield field, self, create
# then traverse dependencies backwards, and proceed recursively
for key, val in tree.items():
if key is None:
continue
elif create and key.type in ('many2one', 'many2one_reference'):
# upon creation, no other record has a reference to self
continue
else:
# val is another tree of dependencies
model = self.env[key.model_name]
for invf in model._field_inverses[key]:
# use an inverse of field without domain
if not (invf.type in ('one2many', 'many2many') and invf.domain):
if invf.type == 'many2one_reference':
rec_ids = set()
for rec in self:
try:
if rec[invf.model_field] == key.model_name:
rec_ids.add(rec[invf.name])
except MissingError:
continue
records = model.browse(rec_ids)
else:
try:
records = self[invf.name]
except MissingError:
records = self.exists()[invf.name]
# TODO: find a better fix
if key.model_name == records._name:
if not any(self._ids):
# if self are new, records should be new as well
records = records.browse(it and NewId(it) for it in records._ids)
break
else:
new_records = self.filtered(lambda r: not r.id)
real_records = self - new_records
records = model.browse()
if real_records:
records |= model.search([(key.name, 'in', real_records.ids)], order='id')
if new_records:
cache_records = self.env.cache.get_records(model, key)
records |= cache_records.filtered(lambda r: set(r[key.name]._ids) & set(self._ids))
yield from records._modified_triggers(val)
@api.model
def recompute(self, fnames=None, records=None):
""" Recompute all function fields (or the given ``fnames`` if present).
The fields and records to recompute have been determined by method
:meth:`modified`.
"""
def process(field):
recs = self.env.records_to_compute(field)
if not recs:
return
if field.compute and field.store:
# do not force recomputation on new records; those will be
# recomputed by accessing the field on the records
recs = recs.filtered('id')
try:
field.recompute(recs)
except MissingError:
existing = recs.exists()
field.recompute(existing)
# mark the field as computed on missing records, otherwise
# they remain forever in the todo list, and lead to an
# infinite loop...
for f in recs.pool.field_computed[field]:
self.env.remove_to_compute(f, recs - existing)
else:
self.env.cache.invalidate([(field, recs._ids)])
self.env.remove_to_compute(field, recs)
if fnames is None:
# recompute everything
for field in list(self.env.fields_to_compute()):
process(field)
else:
fields = [self._fields[fname] for fname in fnames]
# check whether any 'records' must be computed
if records is not None and not any(
records & self.env.records_to_compute(field)
for field in fields
):
return
# recompute the given fields on self's model
for field in fields:
process(field)
#
# Generic onchange method
#
def _dependent_fields(self, field):
""" Return an iterator on the fields that depend on ``field``. """
def traverse(node):
for key, val in node.items():
if key is None:
yield from val
else:
yield from traverse(val)
return traverse(self.pool.field_triggers.get(field, {}))
def _has_onchange(self, field, other_fields):
""" Return whether ``field`` should trigger an onchange event in the
presence of ``other_fields``.
"""
return (field.name in self._onchange_methods) or any(
dep in other_fields for | |
from numpy import dot, sign, zeros, all, isfinite, array, sqrt, any, isnan, pi, sin, arccos, inf, argmax, asfarray
import numpy
from numpy.linalg import norm
class DilationUnit():
#vectorNorm = 0 # TODO: remove it?
#dilationCoeff = 1.0
maxScalarComponentsLength = 2
def __init__(self, vector, dilationCoeff):
#self.vectorDirection = None
self.scalarComponents = []
nv = norm(vector)
assert nv != 0
#self.vectorDirection, self.vectorNorm, self.dilationCoeff = vector/nv, nv, dilationCoeff
self.vectorDirection, self.dilationCoeff = vector/nv, dilationCoeff
class Dilation():
#maxUnits = 10
#treshhold = 1.01
th_phi = 0.1
dilationCoeffThreshold = 0.999999
prevRest = None
#maxVectorNum = 50
def __init__(self, maxUnitsNum):
self.maxUnitsNum = maxUnitsNum
self.units = []
self.unitsNum = 0
self.T = numpy.float64
if hasattr(numpy, 'float128'):
pass
self.T = numpy.float128
def addDilationUnit(self, vector, dilationCoeff = 0.99999):
assert all(isfinite(vector))
self.unitsNum += 1
v = self.T(vector.copy())
nv = norm(v)
v /= nv
# TODO: COMMENT IT OUT
# M = 0
# for i, unit in enumerate(self.units):
# M = max((M, abs(dot(unit.vectorDirection, v))))
# if M > 1e-2:
# print 'warning: max M=', M, 'nv=', nv
# return
#self.units.add(DilationUnit(vector.copy(), dilationCoeff))
self.units.append(DilationUnit(v, dilationCoeff))
print 'add new dilation vector; curr num: ', len(self.units)
def getProjections(self, vv):
#assert len(self.units) != 0
V = self.T(vv)
NV = norm(V)
V /= NV
r= []
#print 'norm(V):', norm(V)
for unit in self.units:
# TODO: try to involve less multiplication ops
scalarComponent = dot(unit.vectorDirection, V)
# print 'norm(unit.vectorDirection):', norm(unit.vectorDirection)
# print 'scalarComponent>>>', scalarComponent
component = unit.vectorDirection * scalarComponent#V
r.append((scalarComponent, component, unit))
for scalarComponent, component, unit in r:
V -= component
return r, V#*NV
def getDilatedDirection(self, direction):
projectionsInfo, rest = self.getProjections(direction)
dilatedDirection = zeros(direction.size)
for scalarComponent, component, unit in projectionsInfo:
dilatedDirection += component * unit.dilationCoeff
return projectionsInfo, dilatedDirection+rest
def getRestrictedDilatedDirection(self, direction):
projectionsInfo, rest = self.getProjections(direction)
dilatedDirection = zeros(direction.size)
s, ns = [], []
for scalarComponent, component, unit in projectionsInfo:
t = component * unit.dilationCoeff
s.append(t)
ns.append(norm(t))
dilatedDirection += t
r = dilatedDirection+rest
nr = norm(r)
for i in xrange(len(s)):
if ns[i] < 1e-10*nr:
r += 1e-10*nr*s[i]/ns[i]-s[i]
return projectionsInfo, r
def getMostInsufficientUnit(self, scalarComponents):
assert self.unitsNum != 0
ind, miUnit, miValue = 0, self.units[0], self.units[0].dilationCoeff#abs(scalarComponents[0])*(1-self.units[0].dilationCoeff)
for i, unit in enumerate(self.units):
#newValue = unit.dilationCoeff*abs(scalarComponents[i])
newValue = unit.dilationCoeff#abs(scalarComponents[i])*(1-unit.dilationCoeff)
if newValue > miValue:
ind, miUnit, miValue = i, unit, newValue
return ind, miUnit
def updateDilationCoeffs2(self, scalarComponents, rest):
arr_u = array([unit.dilationCoeff for unit in self.units])
if self.unitsNum == 1:
self.units[0].dilationCoeff /= 2.0
return
m = self.unitsNum
n = rest.size
#th = norm(rest) * sqrt(n-m)
for i, unit in enumerate(self.units):
c = unit.dilationCoeff * abs(scalarComponents[i]) / n
if c < 0.125:
unit.dilationCoeff *= 2.0
elif c > 0.25 :
unit.dilationCoeff /= 2.0
print i, unit.dilationCoeff
def updateDilationCoeffs(self, scalarComponents, rest):
arr_u = array([unit.dilationCoeff for unit in self.units])
# if self.unitsNum == 1:
# self.units[0].dilationCoeff /= 2.0
# return
Ui2 = arr_u ** 2
UiSCi = abs(array([unit.dilationCoeff*scalarComponents[i] for i, unit in enumerate(self.units)]))
Ui2SCi2 = array(UiSCi) ** 2
S, S2 = sum(Ui2), sum(Ui2SCi2)
SCi = abs(array(scalarComponents))
SCi2 = SCi ** 2
alp = 2.0
beta = 1.0 / alp
#k = sqrt(S2 / (alp*sum((1.0-UiSCi)**2 * UiSCi2)))
#rr = k * sqrt(1.0-UiSCi)**2
m, n = self.unitsNum, rest.size
b = abs(beta)
#b = min((abs(beta), m/(16*n*sqrt(sum(UiSCi)))))
#b = m/(n*sqrt(sum(UiSCi)))
nr2 = norm(rest) ** 2
k = b*sqrt(S2 / sum(Ui2SCi2*(1.0-UiSCi)))
#k = sqrt(((b2-1)*nr2 + b2 * S2) / sum(Ui2SCi2*(1.0-UiSCi)))
# k1 = sqrt(b2 * S2 / sum(Ui2SCi2*(1.0-UiSCi)))
# m, n = self.unitsNum, rest.size
# rr1 = k1 * (1-UiSCi)
# u1 = rr1 * arr_u
rr = k * (1-UiSCi)
assert k > 0
#k = sqrt(S2 / (alp*sum((1.0-SCi)**2 * Ui2SCi2)))
#rr = k * sqrt(1.0-SCi)**2
rr[rr>4.0] = 4.0
rr[rr<0.25] = 0.25
r = rr * arr_u
#r[r<1e-20] = 1e-20
assert len(r) == self.unitsNum == len(self.units)
#print '--------------------'
for i, unit in enumerate(self.units):
unit.dilationCoeff = r[i]
print 'nU=%d k=%0.1g r_min=%0.1g r_max=%0.1g' % (self.unitsNum, k, min(r), max(r))
#print r
#print i, unit.dilationCoeff
# print 'old sum:', S2,'new sum:', sum(array([unit.dilationCoeff*scalarComponents[i] for i, unit in enumerate(self.units)])**2)
# print '====================='
def _updateDilationInfo(self, _dilationDirection_, ls, _moveDirection_):
r = {'increased':0, 'decreased':0}
#projectionsInfo, dilatedDirectionComponent, rest = self.getDilatedDirection(_moveDirection_)
projectionsInfo1, rest1 = self.getProjections(_dilationDirection_)
print 'norm(rest1):', norm(rest1)
#cond_add = norm(rest1) > 1e-2
s = abs(asfarray([scalarComponent for (scalarComponent, component, unit) in projectionsInfo1]))
#cond_add = self.unitsNum == 0 or any(norm(rest1) > 64.0*asfarray([unit.dilationCoeff * s[i] for i, unit in enumerate(self.units)]))
cond_add = norm(rest1) > 1e-3 #or (self.prevRest is not None and dot(self.prevRest, rest1) <= 0)
if cond_add:
self.addDilationUnit(rest1)
projectionsInfo1, rest1 = self.getProjections(_dilationDirection_)
print 'norm(rest11):', norm(rest1)
#print '!>', dot(dilatedDirectionComponent1, rest1) / norm(dilatedDirectionComponent1) / norm(rest1)
#print '!!>', dilatedDirectionComponent1, rest1
#assert norm(dilatedDirectionComponent1) > 1e-10
# mostUnusefulUnitNumber = -1
# mostUnusefulUnitCoeff = -1
# for i, u in enumerate(self.units):
# if u.dilationCoeff > mostUnusefulUnitCoeff:
# mostUnusefulUnitNumber, mostUnusefulUnitCoeff = i, u.dilationCoeff
#print 'norm(_dilationDirection_) :', norm(_dilationDirection_)
#s = norm(rest1) / norm(dilatedDirectionComponent1)
#print 's:', s
scalarComponents = [scalarComponent for (scalarComponent, component, unit) in projectionsInfo1]
#print argmax(scalarComponents)
#m = len(self.units)
#n = _dilationDirection_.size
#print 'norm(rest1), norm(scalarComponents, inf):', norm(rest1) , norm(scalarComponents, inf)
#print ''
#condReasonableBigRest = self.unitsNum == 0 or norm(rest1) > norm(scalarComponents, inf)#miUnit.dilationCoeff* abs(scalarComponents[ind_mi]) # s > 0.05
#print 'norm(rest1):', norm(rest1), 'miUnit.dilationCoeff:', miUnit.dilationCoeff, 'miUnit.sc:', scalarComponents[ind_mi]
#if 1 or norm(rest1) > 0.9:
#print '>', rest1/norm(rest1), norm(rest1), [Unit.dilationCoeff for Unit in self.units], [Unit.vectorDirection for Unit in self.units]
#and self.prevRest is not None and (True or dot(self.prevRest, rest) <= 0)
#projectionsInfo2, dilatedDirectionComponent2, rest2 = self.getDilatedDirection( dilatedDirectionComponent1 + rest1)
#assert norm(dilatedDirectionComponent2) > 1e-10
#projectionsInfo3, dilatedDirectionComponent, rest = self.getDilatedDirection( dilatedDirectionComponent + rest)
projectionsInfo, rest = projectionsInfo1, rest1
#print 'norm(r1), norm(d1):', norm(rest1), norm(dilatedDirectionComponent1)
#abs_tan_phi = norm(rest1) / norm(dilatedDirectionComponent1)
#projectionsInfo, dilatedDirectionComponent, rest = projectionsInfo2, dilatedDirectionComponent2, rest2
#print 'norm(r2), norm(d2):', norm(rest2), norm(dilatedDirectionComponent2)
#cond_drift = self.prevRest is not None and dot(self.prevRest, rest1) > 0
# TODO: what if self.prevRest is None?
#haveToAdd = condReasonableBigRest and any(rest1!=0) #and not cond_drift
self.updateDilationCoeffs(scalarComponents, rest)
self.prevRest = rest.copy()
#print 'self.unitsNum, self.maxUnitsNum:', self.unitsNum, self.maxUnitsNum
if self.unitsNum >= self.maxUnitsNum:
self.unitsNum = 0
self.units = []
# ind_mi, miUnit = self.getMostInsufficientUnit(scalarComponents)
# self.units.pop(ind_mi)
# self.unitsNum -= 1
# for unit in self.units:
# unit.dilationCoeff /= miUnit.dilationCoeff
#print 'mi removed:', ind_mi
nRemoved = self.cleanUnnessesaryDilationUnits()
if nRemoved: print 'nRemoved:', nRemoved
return r
#d = 1.0
#projectionsInfo, dilatedDirectionComponent, rest = self.getDilatedDirection(_dilationDirection_)
# for scalarComponent, component, unit in projectionsInfo:
# # angle between ort and dilation direction
# angle = arccos(scalarComponent) # values from 0 to pi
# if angle > pi/2: angle = pi - angle
# if d < 1.0:
# unit.dilationCoeff *= max((0.5, sin(d*angle)))
# elif d > 1.0:
# unit.dilationCoeff *= max((2.0, d/sqrt(1-scalarComponent**2)))
#
# if cond_overdilated:
# #TODO - omit repeated calculations
# nRemoved = self.cleanUnnessesaryDilationUnits()
# print 'REMOVED: ', nRemoved#, 'increaseMultiplier:', increaseMultiplier
# if sign(dot(_moveDirection_, component)) == sign(scalarComponent):
# #print 'case 1'
# unit.dilationCoeff *= multiplier
# if unit.dilationCoeff > 1.0: unit.dilationCoeff = 1.0
# else:
# reduceMultiplier = max((0.5, sqrt(1 - scalarComponent**2)))
# unit.dilationCoeff *= reduceMultiplier
#unit.dilationCoeff /= 2.0#multiplier
#print 'case 2'
# #cond_overDilated = abs_tan_phi > 0.5#min((2.0, 1.0 / self.th_phi))
# if abs_tan_phi < self.th_phi: #abs_tan_phi < 0.1 * len(self.units) / (_dilationDirection_.size-len(self.units)):
# koeff = 0.5
# for scalarComponent, component, unit in projectionsInfo:
# unit.dilationCoeff *= koeff * max((0.1, sqrt(1 - scalarComponent**2)))
# #elif cond_overDilated: # TODO: use separate parameter instead of (1.0 / self.th_phi)
# else:
# #elif abs_tan_phi >
# multiplier = self.th_phi / abs_tan_phi
# if multiplier > 2.0: multiplier = 2.0
# elif multiplier < 1.3: multiplier = 1.3
# for scalarComponent, component, unit in projectionsInfo:
# if sign(dot(_moveDirection_, component)) == sign(scalarComponent):
# unit.dilationCoeff *= multiplier
# #pass
# for scalarComponent, component, unit in projectionsInfo:
# pass
#reduceMultiplier = max((0.25, sqrt(1 - scalarComponent**2)))
#unit.dilationCoeff *= reduceMultiplier
##########################################
# get NEW rest
# TODO - omit repeated calculations
#projectionsInfo, dilatedDirectionComponent, rest = self.getDilatedDirection(_dilationDirection_)
##########################################
#if self.prevRest is not None:print 'sign dot:', sign(dot(self.prevRest, rest))
#print 'norm(rest) / norm(dilatedDirectionComponent:', norm(rest)/ norm(dilatedDirectionComponent)
#haveToAdd = True
def cleanUnnessesaryDilationUnits(self):
indUnitsToRemove = []
for i, unit in enumerate(self.units):
if unit.dilationCoeff > self.dilationCoeffThreshold:
print '>>', unit.dilationCoeff , self.dilationCoeffThreshold
indUnitsToRemove.append(i)
#unitsToRemove.add(unit)
for j in xrange(len(indUnitsToRemove)):
self.units.pop(indUnitsToRemove[-1-j])
nRemoved = len(indUnitsToRemove)
self.unitsNum -= | |
<reponame>rambasnet/MAKE2
#-----------------------------------------------------------------------------
# Name: DBFunctions.py
# Purpose:
#
# Author: <NAME>
#
# Created: 2007/11/10
# RCS-ID: $Id: DBFunctions.py,v 1.11 2008/03/25 03:02:12 rbasnet Exp $
# Copyright: (c) 2007
# Licence: All Rights Reserved.
# New field: Whatever
#-----------------------------------------------------------------------------
import string
#from MySqlDatabase import *
from SqliteDatabase import *
import Globals
import Constants
import PlatformMethods
import Classes
import cPickle
import CommonFunctions
def CreateCaseSettingsTable(CaseFileName):
db = SqliteDatabase(CaseFileName)
if not db.OpenConnection():
return
query = "CREATE TABLE IF NOT EXISTS `" + Constants.CaseSettingsTable + "` ("
query += "ID text,"
query += "DisplayName text, "
query += "DateTimestamp text, "
query += "Description text, "
query += "CreatedBy text, "
query += "MimeTypes text, "
query += "`DBHostName` text, "
query += "`DBUsername` text, "
query += "`DBPassword` text, "
query += "DBName text "
query += ");"
#query += "`GetKeywordFrequencyCount` integer, "
#query += "`GetFileProperties` integer,"
#query += "CaseSensitive integer,"
#query += "SearchInPrefix integer,"
#query += "SearchInSuffix integer,"
#query += "SearchInMiddle integer,"
#query += "GetFileExtension integer,"
#query += "GetFileSize integer,"
#query += "GetCreatedTime integer,"
#query += "GetModifiedTime integer,"
#query += "GetAccessedTime integer,"
#query += "GetFileOwner integer, "
#query += "MacStartTime text,"
#query += "MacFinishTime text,"
#query += "MacTotalTime text,"
db.ExecuteNonQuery(query)
def CreateCaseEvidencesTable(CaseFileName, drop=True):
db = SqliteDatabase(CaseFileName)
if not db.OpenConnection():
return
if drop:
query = "DROP TABLE IF EXISTS " + Constants.EvidencesTable
db.ExecuteNonQuery(query)
query = "CREATE TABLE IF NOT EXISTS `" + Constants.EvidencesTable + "` ("
query += "`ID` text PRIMARY KEY, "
query += "`DisplayName` text not null default 'N/A', "
query += "`Location` text not null, "
query += "Comment text not null default 'N/A',"
query += "AddedBy text not null default 'N/A',"
query += "AddedTimestamp text not null default 'N/A',"
query += "GenMD5Hash integet not null default 1, "
query += "GenSHA1Hash integer not null default 0, "
query += "IgnoreKnownFile integer not null default 1, "
query += "EntropyTest integer not null default 1, "
query += "FullTextIndex integer not null default 1, "
query += "DataCarve integer not null default 1, "
query += "StoreThumbnails integer not null default 1,"
query += "HTMLFileListing integer not null default 1,"
query += "TotalFolders integer not null default 1,"
query += "TotalFiles integer not null default 0,"
query += "UnalocatedSpace integer not null default 0,"
query += "TotalImages integer not null default 0,"
query += "TotalEmails integer not null default 0,"
query += "ScanStartTimestamp integer not null default 0,"
query += "ScanEndTimestamp integer not null default 0"
query += ");"
db.ExecuteNonQuery(query)
db.CloseConnection()
def GetCaseEvidences(fileName):
db = SqliteDatabase(fileName)
if not db.OpenConnection():
return False
query = "select ID, DisplayName, Location from " + Constants.EvidencesTable + ";"
rows = db.FetchAllRows(query)
for row in rows:
Globals.EvidencesDict[row[0]] = {'DisplayName': row[1], 'Location':row[2]}
def GetCaseSettings(CaseFileName):
db = SqliteDatabase(CaseFileName)
if not db.OpenConnection():
return False
query = "select ID, DisplayName, DateTimestamp, CreatedBy, Description, "
query += "DBHostName, DBUsername, DBPassword, DBName, MimeTypes from " + Constants.CaseSettingsTable + ";"
rows = db.FetchAllRows(query)
Globals.CurrentCase = Classes.CFICase()
for row in rows:
Globals.CurrentCase.ID = row[0]
Globals.CurrentCase.DisplayName = row[1]
Globals.CurrentCase.DateTimestamp = row[2]
Globals.CurrentCase.CreatedBy = row[3]
Globals.CurrentCase.Description = row[4]
Globals.CurrentCase.DBHostName = row[5]
Globals.CurrentCase.DBUsername = row[6]
Globals.CurrentCase.DBPassword = row[7]
Globals.CurrentCase.DBName = row[8]
try:
Globals.MimeTypeSet = set(row[9].split("|"))
except Exception, value:
print "Failed to Load File System Database. Error: %s"%(value)
db.CloseConnection()
return True
def CreateFileSystemTable(FileSystemName, tableName, drop=True):
db = SqliteDatabase(FileSystemName)
if not db.OpenConnection():
return False
if drop:
query = "DROP TABLE IF EXISTS " + tableName
db.ExecuteNonQuery(query)
#for tableName in Constants.MACTables:
query = """CREATE TABLE IF NOT EXISTS %s (
Name text,
DirPath text,
Extension text,
Category text,
Size float,
Created number,
CDate number,
CMonth number,
Modified number,
MDate nuber,
MMonth number,
Accessed number,
ADate number,
AMonth number,
Owner text default 'None',
MimeType text,
Description text,
MD5 text,
SHA1 text default 'None',
SHA224 text default 'None',
SHA256 text default 'None',
SHA384 text default 'None',
SHA512 text default 'None',
NewPath text default 'None',
KnownFile Number,
Export integer default 0)
"""%(tableName)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS FileNameIndex on %s(Name);"""%(tableName)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS MACDirPathIndex on %s(DirPath);"""%(tableName)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS ExtensionIndex on %s(Extension);"""%(tableName)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS MimeIndex on %s(MimeType);"""%(tableName)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS MDateIndex on %s(MDate);"""%(tableName)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS ADateIndex on %s(ADate);"""%(tableName)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS CDateIndex on %s(CDate);"""%(tableName)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS MMonthIndex on %s(MMonth);"""%(tableName)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS AMonthIndex on %s(AMonth);"""%(tableName)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS CMonthIndex on %s(CMonth);"""%(tableName)
db.ExecuteNonQuery(query)
table = "%s%s"%(tableName, Constants.DirListTable)
if drop:
query = """DROP TABLE IF EXISTS %s"""%(table)
db.ExecuteNonQuery(query)
query = """CREATE TABLE IF NOT EXISTS %s (
DirPath text,
SubDirList BLOB) """%table
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS DirPathIndex on %s(DirPath);"""%(tableName)
db.ExecuteNonQuery(query)
db.CloseConnection()
return True
def CreateMACTables(MACFileName, tableName, drop=True):
db = SqliteDatabase(MACFileName)
if not db.OpenConnection():
return False
query = """CREATE TABLE IF NOT EXISTS %s%s (
MMinDate number,
MMaxDate number,
MMinMonth number,
MMaxMonth number,
AMinDate number,
AMaxDate number,
AMinMonth number,
AMaxMonth number,
CMinDate number,
CMaxDate number,
CMinMonth number,
CMaxMonth number) """%(tableName, Constants.MACRangeTable)
db.ExecuteNonQuery(query)
db.CloseConnection()
return True
def LoadMACMinMaxValues():
db = SqliteDatabase(Globals.MACFileName)
if not db.OpenConnection():
return False
for evidenceID in Globals.EvidencesDict:
query ="SELECT CMinDate, CMaxDate, CMinMonth, CMaxMonth, MMinDate, MMaxDate, MMinMonth, MMaxMonth, AMinDate, AMaxDate, AMinMonth, AMaxMonth from %s%s;"%(evidenceID, Constants.MACRangeTable)
row = db.FetchOneRow(query)
if not row:
continue
Globals.TimelinesDict['Created'] = {'MinDate': row[0], 'MaxDate': row[1], 'MinMonth': row[2], 'MaxMonth': row[3]}
Globals.TimelinesDict['Modified'] = {'MinDate': row[4], 'MaxDate': row[5], 'MinMonth': row[6], 'MaxMonth': row[7]}
Globals.TimelinesDict['Accessed'] = {'MinDate': row[8], 'MaxDate': row[9], 'MinMonth': row[10], 'MaxMonth': row[11]}
def UpdateDatabaseTables():
db = SqliteDatabase(Globals.FileSystemName)
if not db.OpenConnection():
return False
for evidenceID in Globals.EvidencesDict:
try:
row = db.FetchOneRow('select Export from %s'%evidenceID)
except Exception, value:
#print value
query = "alter table %s add column Export integer default 0;"%evidenceID
db.ExecuteNonQuery(query)
db.CloseConnection()
def CreateKeywordsFrequencyTable(KeywordsFileName, drop=False):
db = SqliteDatabase(KeywordsFileName)
if not db.OpenConnection():
return False
if drop:
query = "DROP TABLE IF EXISTS " + Constants.KeywordsFrequencyTable
db.ExecuteNonQuery(query)
query = "CREATE TABLE IF NOT EXISTS " + Constants.KeywordsFrequencyTable + " ( "
query += "ID INTEGER PRIMARY KEY, "
query += "FileName text"
keycolumns = ""
#print Globals.Keywords
for keyword in Globals.Keywords:
if keyword:
keycolumns += "," + keyword + "_CI INTEGER"
if Globals.CurrentCase.CaseSensitive:
keycolumns += "," + keyword + "_CS INTEGER"
query += keycolumns
query += " )"
print query
db.ExecuteNonQuery(query)
db.CloseConnection()
return True
def CreateStopwordsTable(TextCatFileName, drop=True):
db = SqliteDatabase(TextCatFileName)
if not db.OpenConnection():
return False
if drop:
query = "DROP TABLE IF EXISTS " + Constants.StopwordsTable
db.ExecuteNonQuery(query)
query = "CREATE TABLE IF NOT EXISTS " + Constants.StopwordsTable + " ( "
query += "ID INTEGER PRIMARY KEY, "
query += "Stopword text )"
db.ExecuteNonQuery(query)
db.CloseConnection()
return True
def CreateThumbnailsTable(ImagesFileName, tableName, drop=True):
db = SqliteDatabase(ImagesFileName)
if not db.OpenConnection():
return False
if drop:
query = "DROP TABLE IF EXISTS " + tableName
db.ExecuteNonQuery(query)
query = """CREATE TABLE IF NOT EXISTS %s(
DirPath text,
Filename text,
Thumbnail BLOB )"""%(tableName)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS ImageDirPathIndex on %s(DirPath);"""%tableName
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS ImageFileNameIndex on %s(Filename);"""%tableName
db.ExecuteNonQuery(query)
db.CloseConnection()
return True
def SetupSqliteIndexTables(dbFileName):
db = SqliteDatabase(dbFileName)
if not db.OpenConnection():
return
query = """CREATE TABLE IF NOT EXISTS %s (
Word varchar(500),
StemmedWord varchar(500),
Frequency int unsigned,
IDF float)
"""%(Constants.WordsTable)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS WordIndex ON %s (Word);"""%(Constants.WordsTable)
db.ExecuteNonQuery(query)
query = """CREATE INDEX IF NOT EXISTS StemmedWordIndex ON %s (StemmedWord);"""%(Constants.WordsTable)
db.ExecuteNonQuery(query)
query = """CREATE TABLE IF NOT EXISTS %s (
DocID INT UNSIGNED NOT NULL,
WordID INT UNSIGNED NOT NULL,
Location INT UNSIGNED NOT NULL,
InPath | |
import logging
import os
import pathlib
import sys
import threading as th
import time
import traceback as tb
import io
import urllib.request as ur
import zipfile
import shutil
from io import open
from os import path
from urllib.error import URLError
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QDialog
from modi_firmware_updater.util.connection_util import list_modi_ports
from modi_firmware_updater.core.esp32_updater import ESP32FirmwareUpdater
from modi_firmware_updater.core.stm32_updater import STM32FirmwareUpdater
from modi_firmware_updater.core.stm32_network_updater import NetworkFirmwareUpdater
class StdoutRedirect(QObject):
printOccur = pyqtSignal(str, str, name="print")
def __init__(self):
QObject.__init__(self, None)
self.daemon = True
self.sysstdout = sys.stdout.write
self.sysstderr = sys.stderr.write
self.logger = None
def stop(self):
sys.stdout.write = self.sysstdout
sys.stderr.write = self.sysstderr
def start(self):
sys.stdout.write = self.write
sys.stderr.write = lambda msg: self.write(msg, color="red")
def write(self, s, color="black"):
sys.stdout.flush()
self.printOccur.emit(s, color)
if self.logger and not self.__is_redundant_line(s):
self.logger.info(s)
@staticmethod
def __is_redundant_line(line):
return (
line.startswith("\rUpdating") or
line.startswith("\rFirmware Upload: [") or
len(line) < 3
)
class PopupMessageBox(QtWidgets.QMessageBox):
def __init__(self, main_window, level):
QtWidgets.QMessageBox.__init__(self)
self.window = main_window
self.setSizeGripEnabled(True)
self.setWindowTitle("System Message")
def error_popup():
self.setIcon(self.Icon.Warning)
self.setText("ERROR")
def warning_popup():
self.setIcon(self.Icon.Information)
self.setText("WARNING")
self.addButton("Ok", self.ActionRole)
# restart_btn.clicked.connect(self.restart_btn)
func = {
"error": error_popup,
"warning": warning_popup,
}.get(level)
func()
close_btn = self.addButton("Exit", self.ActionRole)
close_btn.clicked.connect(self.close_btn)
# report_btn = self.addButton('Report Error', self.ActionRole)
# report_btn.clicked.connect(self.report_btn)
self.show()
def event(self, e):
MAXSIZE = 16_777_215
MINHEIGHT = 100
MINWIDTH = 200
MINWIDTH_CHANGE = 500
result = QtWidgets.QMessageBox.event(self, e)
self.setMinimumHeight(MINHEIGHT)
self.setMaximumHeight(MAXSIZE)
self.setMinimumWidth(MINWIDTH)
self.setMaximumWidth(MAXSIZE)
self.setSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding
)
textEdit = self.findChild(QtWidgets.QTextEdit)
if textEdit is not None:
textEdit.setMinimumHeight(MINHEIGHT)
textEdit.setMaximumHeight(MAXSIZE)
textEdit.setMinimumWidth(MINWIDTH_CHANGE)
textEdit.setMaximumWidth(MAXSIZE)
textEdit.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding,
)
return result
def close_btn(self):
self.window.close()
def report_btn(self):
pass
# def restart_btn(self):
# self.window.stream.thread_signal.connect(self.restart_update)
# self.window.stream.thread_signal.emit(True)
# @pyqtSlot(object)
# def restart_update(self, click):
# self.window.update_network_stm32.clicked(click)
class ThreadSignal(QObject):
thread_error = pyqtSignal(object)
thread_signal = pyqtSignal(object)
def __init__(self):
super().__init__()
class Form(QDialog):
"""
GUI Form of MODI Firmware Updater
"""
def __init__(self, installer=False):
QDialog.__init__(self)
self.logger = self.__init_logger()
self.__excepthook = sys.excepthook
sys.excepthook = self.__popup_excepthook
th.excepthook = self.__popup_thread_excepthook
self.err_list = list()
self.is_popup = False
ui_path = os.path.join(os.path.dirname(__file__), "assets", "updater.ui")
esp32_update_list_ui_path = os.path.join(os.path.dirname(__file__), "assets", "esp32_update_list.ui")
stm32_update_list_ui_path = os.path.join(os.path.dirname(__file__), "assets", "stm32_update_list.ui")
if sys.platform.startswith("win"):
self.component_path = pathlib.PurePosixPath(pathlib.PurePath(__file__), "..", "assets", "component")
else:
self.component_path = os.path.join(os.path.dirname(__file__), "assets", "component")
self.ui = uic.loadUi(ui_path)
self.ui.setStyleSheet("background-color: white")
self.ui.console.hide()
self.ui.setFixedHeight(600)
# Set LUXROBO logo image
logo_path = os.path.join(self.component_path, "luxrobo_logo.png")
qPixmapVar = QtGui.QPixmap()
qPixmapVar.load(logo_path)
self.ui.lux_logo.setPixmap(qPixmapVar)
self.esp32_update_list_form = ESP32UpdateListForm(esp32_update_list_ui_path, self.component_path)
self.stm32_update_list_form = STM32UpdateListForm(stm32_update_list_ui_path, self.component_path)
# Buttons image
self.active_path = pathlib.PurePosixPath(self.component_path, "btn_frame_active.png")
self.inactive_path = pathlib.PurePosixPath(self.component_path, "btn_frame_inactive.png")
self.pressed_path = pathlib.PurePosixPath(self.component_path, "btn_frame_pressed.png")
self.language_frame_path = pathlib.PurePosixPath(self.component_path, "lang_frame.png")
self.language_frame_pressed_path = pathlib.PurePosixPath(self.component_path, "lang_frame_pressed.png")
self.ui.update_network_esp32.setStyleSheet(f"border-image: url({self.active_path}); font-size: 16px")
self.ui.update_network_esp32_interpreter.setStyleSheet(f"border-image: url({self.active_path}); font-size: 16px")
self.ui.update_stm32_modules.setStyleSheet(f"border-image: url({self.active_path}); font-size: 16px")
self.ui.update_network_stm32.setStyleSheet(f"border-image: url({self.active_path}); font-size: 16px")
self.ui.update_network_stm32_bootloader.setStyleSheet(f"border-image: url({self.active_path}); font-size: 16px")
self.ui.translate_button.setStyleSheet(f"border-image: url({self.language_frame_path}); font-size: 13px")
self.ui.devmode_button.setStyleSheet(f"border-image: url({self.language_frame_path}); font-size: 13px")
self.ui.console.setStyleSheet("font-size: 10px")
self.ui.setWindowTitle("MODI Firmware Updater")
self.ui.setWindowIcon(QtGui.QIcon(os.path.join(self.component_path, "network_module.ico")))
# Redirect stdout to text browser (i.e. console in our UI)
self.stdout = StdoutRedirect()
self.stdout.start()
self.stdout.printOccur.connect(
lambda line: self.__append_text_line(line)
)
self.stdout.logger = self.logger
# Set signal for thread communication
self.stream = ThreadSignal()
# Connect up the buttons
self.ui.update_network_esp32.clicked.connect(self.update_network_esp32)
self.ui.update_network_esp32_interpreter.clicked.connect(self.update_network_esp32_interpreter)
self.ui.update_stm32_modules.clicked.connect(self.update_stm32_modules)
self.ui.update_network_stm32.clicked.connect(self.update_network_stm32)
self.ui.update_network_stm32_bootloader.clicked.connect(self.update_network_bootloader_stm32)
self.ui.translate_button.clicked.connect(self.translate_button_text)
self.ui.devmode_button.clicked.connect(self.dev_mode_button)
self.buttons = [
self.ui.update_network_esp32,
self.ui.update_network_esp32_interpreter,
self.ui.update_stm32_modules,
self.ui.update_network_stm32,
self.ui.update_network_stm32_bootloader,
self.ui.devmode_button,
self.ui.translate_button,
]
# Disable the first button to be focused when UI is loaded
self.ui.update_network_esp32.setAutoDefault(False)
self.ui.update_network_esp32.setDefault(False)
# Print init status
time_now_str = time.strftime("[%Y/%m/%d@%X]", time.localtime())
print(time_now_str + " GUI MODI Firmware Updater has been started!")
# Set up field variables
self.firmware_updater = None
self.button_in_english = False
self.console = False
# Set up ui field variables
self.ui.is_english = False
self.ui.active_path = self.active_path
self.ui.pressed_path = self.pressed_path
self.ui.language_frame_path = self.language_frame_path
self.ui.language_frame_pressed_path = self.language_frame_pressed_path
self.ui.stream = self.stream
self.ui.popup = self._thread_signal_hook
# Check module firmware
self.local_firmware_path = path.join(path.dirname(__file__), "assets", "firmware", "latest")
# module
self.local_module_firmware_path = path.join(self.local_firmware_path, "stm32")
self.local_module_version_path = path.join(self.local_module_firmware_path, "version.txt")
self.latest_module_firmware_path = "https://download.luxrobo.com/modi-skeleton/skeleton.zip"
self.latest_module_version_path = "https://download.luxrobo.com/modi-skeleton/version.txt"
# network base
self.local_network_firmware_path = path.join(self.local_firmware_path, "stm32")
self.local_network_version_path = path.join(self.local_network_firmware_path, "base_version.txt")
self.latest_network_firmware_path = "https://download.luxrobo.com/modi-network-os/network.zip"
self.latest_network_version_path = "https://download.luxrobo.com/modi-network-os/version.txt"
#esp32
self.local_esp32_firmware_path = path.join(self.local_firmware_path, "esp32")
self.local_esp32_version_path = path.join(self.local_esp32_firmware_path, "esp_version.txt")
self.latest_esp32_firmware_path = [
"https://download.luxrobo.com/modi-ota-firmware/ota.zip",
"https://download.luxrobo.com/modi-esp32-firmware/esp.zip",
]
self.latest_esp32_version_path = "https://download.luxrobo.com/modi-esp32-firmware/version.txt"
self.check_module_firmware()
# Set Button Status
self.translate_button_text()
self.translate_button_text()
self.dev_mode_button()
self.dev_mode_button()
self.ui.show()
#
# Main methods
#
def update_network_esp32(self):
button_start = time.time()
if self.firmware_updater and self.firmware_updater.update_in_progress:
self.esp32_update_list_form.ui.show()
return
self.ui.update_network_esp32.setStyleSheet(f"border-image: url({self.pressed_path}); font-size: 16px")
self.ui.console.clear()
print("ESP32 Firmware Updater has been initialized for esp update!")
th.Thread(
target=self.__click_motion, args=(0, button_start), daemon=True
).start()
modi_ports = list_modi_ports()
if not modi_ports:
raise Exception("No MODI port is connected")
esp32_updater = ESP32FirmwareUpdater()
esp32_updater.set_ui(self.ui)
th.Thread(
target=esp32_updater.update_firmware,
args=(False,),
daemon=True
).start()
self.firmware_updater = esp32_updater
def update_network_esp32_interpreter(self):
button_start = time.time()
if self.firmware_updater and self.firmware_updater.update_in_progress:
self.esp32_update_list_form.ui.show()
return
self.ui.update_network_esp32_interpreter.setStyleSheet(f"border-image: url({self.pressed_path}); font-size: 16px")
self.ui.console.clear()
print("ESP32 Firmware Updater has been initialized for esp interpreter update!")
th.Thread(
target=self.__click_motion, args=(1, button_start), daemon=True
).start()
modi_ports = list_modi_ports()
if not modi_ports:
raise Exception("No MODI port is connected")
# self.esp32_update_list_form.reset_device_list()
# self.esp32_update_list_form.ui.show()
esp32_updater = ESP32FirmwareUpdater()
esp32_updater.set_ui(self.ui)
th.Thread(
target=esp32_updater.update_firmware,
args=(modi_ports, True,),
daemon=True
).start()
self.firmware_updater = esp32_updater
def update_stm32_modules(self):
button_start = time.time()
if self.firmware_updater and self.firmware_updater.update_in_progress:
self.stm32_update_list_form.ui.show()
return
self.ui.update_stm32_modules.setStyleSheet(f"border-image: url({self.pressed_path}); font-size: 16px")
self.ui.console.clear()
print("STM32 Firmware Updater has been initialized for module update!")
th.Thread(
target=self.__click_motion, args=(2, button_start), daemon=True
).start()
modi_ports = list_modi_ports()
if not modi_ports:
raise Exception("No MODI port is connected")
# self.stm32_update_list_form.reset_device_list()
# self.stm32_update_list_form.ui.show()
stm32_updater = STM32FirmwareUpdater(port = modi_ports[0].device)
stm32_updater.set_print(False)
stm32_updater.set_raise_error(False)
stm32_updater.set_ui(self.ui)
th.Thread(
target=stm32_updater.update_module_firmware,
# args=(modi_ports, ),
daemon=True,
).start()
self.firmware_updater = stm32_updater
def update_network_stm32(self):
button_start = time.time()
if self.firmware_updater and self.firmware_updater.update_in_progress:
# self.stm32_update_list_form.ui.show()
return
self.ui.update_network_stm32.setStyleSheet(f"border-image: url({self.pressed_path}); font-size: 16px")
self.ui.console.clear()
print("STM32 Firmware Updater has been initialized for base update!")
th.Thread(
target=self.__click_motion, args=(3, button_start), daemon=True
).start()
modi_ports = list_modi_ports()
if not modi_ports:
raise Exception("No MODI port is connected")
# self.stm32_update_list_form.reset_device_list()
# self.stm32_update_list_form.ui.show()
network_updater = NetworkFirmwareUpdater(modi_ports[0].device)
network_updater.set_ui(self.ui)
th.Thread(
target=network_updater.update_module_firmware,
args=(False,),
daemon=True,
).start()
self.firmware_updater = network_updater
def update_network_bootloader_stm32(self):
button_start = time.time()
if self.firmware_updater and self.firmware_updater.update_in_progress:
# self.stm32_update_list_form.ui.show()
return
self.ui.update_network_stm32_bootloader.setStyleSheet(f"border-image: url({self.pressed_path}); font-size: 16px")
self.ui.console.clear()
print("STM32 Firmware Updater has been initialized for base update!")
th.Thread(
target=self.__click_motion, args=(4, button_start), daemon=True
).start()
modi_ports = list_modi_ports()
if not modi_ports:
raise Exception("No MODI port is connected")
# self.stm32_update_list_form.reset_device_list()
# self.stm32_update_list_form.ui.show()
network_updater = NetworkFirmwareUpdater()
network_updater.set_ui(self.ui)
th.Thread(
target=network_updater.update_module_firmware,
args=(True,),
daemon=True,
).start()
self.firmware_updater = network_updater
def dev_mode_button(self):
button_start = time.time()
self.ui.devmode_button.setStyleSheet(f"border-image: url({self.language_frame_pressed_path});font-size: 13px")
th.Thread(
target=self.__click_motion, args=(5, button_start), daemon=True
).start()
if self.console:
self.ui.console.hide()
self.ui.setFixedHeight(600)
else:
self.ui.console.show()
self.ui.setFixedHeight(780)
self.console = not self.console
def translate_button_text(self):
button_start = time.time()
self.ui.translate_button.setStyleSheet(f"border-image: url({self.language_frame_pressed_path}); font-size: 13px")
th.Thread(
target=self.__click_motion, args=(6, button_start), daemon=True
).start()
button_en = [
"Update Network ESP32",
"Update Network ESP32 Interpreter",
"Update STM32 Modules",
"Update Network STM32",
"Set Network Bootloader STM32",
"Dev Mode",
"한국어",
]
button_kr = [
"네트워크 모듈 업데이트",
"네트워크 모듈 인터프리터 초기화",
"모듈 초기화",
"네트워크 모듈 초기화",
"네트워크 모듈 부트로더",
"개발자 모드",
"English",
]
appropriate_translation = (
button_kr if self.button_in_english else button_en
)
self.button_in_english = not self.button_in_english
self.ui.is_english = not self.ui.is_english
for i, button in enumerate(self.buttons):
button.setText(appropriate_translation[i])
def check_module_firmware(self):
if not os.path.exists(self.local_firmware_path):
os.mkdir(self.local_firmware_path)
self.__check_module_version()
self.__check_network_base_version()
self.__check_esp32_version()
def __download_module_firmware(self):
try:
# read latest version
with ur.urlopen(self.latest_module_version_path, timeout=5) as conn:
last_version_name = conn.read().decode("utf8")
# skeleton update
with ur.urlopen(self.latest_module_firmware_path, timeout=5) as conn:
module_name = [
"button",
"dial",
"display",
"environment",
"gyro",
"ir",
"led",
"mic",
"motor",
"speaker",
"ultrasonic"
]
download_response = conn.read()
zip_content = zipfile.ZipFile(io.BytesIO(download_response), "r")
for i, module in enumerate(module_name):
src_path = module + "/Base_module.bin"
bin_buffer = zip_content.read(src_path)
if module == "environment":
dest_path = path.join(self.local_module_firmware_path, "env" + ".bin")
else:
dest_path = path.join(self.local_module_firmware_path, module + ".bin")
with open(dest_path, "wb") as data_file:
data_file.write(bin_buffer)
# version update
with open(path.join(self.local_module_firmware_path, "version.txt"), "w") as data_file:
data_file.write(last_version_name)
return True
except URLError:
return False
def __download_network_firmware(self):
try:
# read latest version
with ur.urlopen(self.latest_network_version_path, timeout=5) as conn:
last_version_name = conn.read().decode("utf8")
# network base update
with ur.urlopen(self.latest_network_firmware_path, timeout=5) as conn:
download_response = conn.read()
zip_content = zipfile.ZipFile(io.BytesIO(download_response), "r")
with open(path.join(self.local_network_firmware_path, "network.bin"), "wb") as data_file:
data_file.write(zip_content.read("network.bin"))
# version update
with open(path.join(self.local_network_firmware_path, "base_version.txt"), "w") as data_file:
data_file.write(last_version_name)
return True
except URLError:
return False
def __download_esp32_firmware(self):
try:
# read latest version
with ur.urlopen(self.latest_esp32_version_path, timeout=5) as conn:
last_version_name = conn.read().decode("utf8")
# ota update
with ur.urlopen(self.latest_esp32_firmware_path[0], timeout=5) as conn:
download_response = conn.read()
zip_content = zipfile.ZipFile(io.BytesIO(download_response), "r")
with open(path.join(self.local_esp32_firmware_path, "modi_ota_factory.bin"), "wb") as data_file:
data_file.write(zip_content.read("modi_ota_factory.bin"))
with open(path.join(self.local_esp32_firmware_path, "ota_data_initial.bin"), "wb") as data_file:
data_file.write(zip_content.read("ota_data_initial.bin"))
# bootloader, partitions, esp32 update
with ur.urlopen(self.latest_esp32_firmware_path[1], timeout=5) as conn:
download_response = conn.read()
zip_content = zipfile.ZipFile(io.BytesIO(download_response), "r")
with open(path.join(self.local_esp32_firmware_path, "bootloader.bin"), "wb") as data_file:
data_file.write(zip_content.read("bootloader.bin"))
with open(path.join(self.local_esp32_firmware_path, "partitions.bin"), "wb") as data_file:
data_file.write(zip_content.read("partitions.bin"))
with open(path.join(self.local_esp32_firmware_path, "esp32.bin"), "wb") as data_file:
data_file.write(zip_content.read("esp32.bin"))
# version update
with open(path.join(self.local_esp32_firmware_path, "esp_version.txt"), "w") as data_file:
data_file.write(last_version_name)
return True
except URLError:
return False
def __check_module_version(self):
try:
local_version_info = None
latest_version_info = None
with ur.urlopen(self.latest_module_version_path, timeout=5) as conn:
| |
import copy
import numpy as np
from spysort.functions import convolution, cut_sgl_evt
from spysort.Events import events
class align_events(events.build_events):
""" Alignment of spike forms after clustering using a Brute-Force method"""
def __init__(self, data, positions, goodEvts, clusters, CSize, win=[],
before=14, after=30, thr=3):
""" Performs a PCA-aided k-Means clustering and creates the proper
indexes for further alignment of the raw data.
**Parameters**
data : double
The input normalized data list
positions : int
The positions of spike events as they have been computed by the
spike_detection (becareful the user has to define treat explicitly
the size and the contents of the position array)
clusters : double
The clustered data
CSize : int
The number of the chosen clusters
win : double
The filtering window
before : int
The number of sampling point to keep before the peak
after : int
The number of sampling point to keep after the peak
thr : double
Filtering threshold value
"""
self.win = win
self.thr = thr
self.before = before
self.after = after
self.positions = positions
# Converts input list data to a numpy array
self.data = np.asarray(data)
self.goodEvts = goodEvts
# Events instance
events.build_events.__init__(self, self.data, self.positions, self.win,
self.before, self.after)
# k-Means clustering
self.kmc = clusters
# Construction of the proper cluster indices
self.gcpos = copy.deepcopy([self.positions[self.goodEvts]
[np.array(self.kmc) == i]
for i in range(CSize)])
def classify_and_align_evt(self, evt_pos, centers, abs_jitter_max=3,
otherData=False, x=[]):
""" One step of the Brute-Force method of realignment. It returns the
name of the closest center in terms of Euclidean distance or "?" if
none of the clusters' waveform does better than a uniformly null one,
the new position of the event (the previous position corrected by the
integer part of the estimated jitter), the remaining jitter.
**Parameters**
evt_pos : int
A sampling point at which an event was detected.
centers : dict
A dictionary that contains all the necessary arrays and parameters
in order to perform properly the classification and the alignment
of the raw data
abs_jitter_max : double
The absolute maximum permitted value of the jitter
**Returns**
A list with the following components: The name of the closest center
if it was close enough or ’?’. The nearest sampling point to the events
peak. The jitter: difference between the estimated actual peak
position and the nearest sampling point.
"""
if otherData is False:
data = self.data
else:
data = np.asarray(x)
cluster_names = sorted(list(centers))
n_sites = data.shape[0]
centersM = np.array([centers[c_name]["center"]
[np.tile((-self.before <= centers[c_name]
["center_idx"]).__and__(centers[c_name]
["center_idx"] <= self.after), n_sites)]
for c_name in cluster_names])
evt = cut_sgl_evt(data, evt_pos, self.before, self.after)
delta = -(centersM - evt)
cluster_idx = np.argmin(np.sum(delta**2, axis=1))
good_cluster_name = cluster_names[cluster_idx]
good_cluster_idx = np.tile((-self.before <=
centers[good_cluster_name]
["center_idx"]).__and__(
centers[good_cluster_name]
["center_idx"] <= self.after), n_sites)
centerD = centers[good_cluster_name]["centerD"][good_cluster_idx]
centerD_norm2 = np.dot(centerD, centerD)
centerDD = centers[good_cluster_name]["centerDD"][good_cluster_idx]
centerDD_norm2 = np.dot(centerDD, centerDD)
centerD_dot_centerDD = np.dot(centerD, centerDD)
h = delta[cluster_idx, :]
h_order0_norm2 = np.sum(h**2)
h_dot_centerD = np.dot(h, centerD)
jitter0 = h_dot_centerD/centerD_norm2
# print jitter0
h_order1_norm2 = np.sum((h-jitter0*centerD)**2)
if h_order0_norm2 > h_order1_norm2:
h_dot_centerDD = np.dot(h, centerDD)
first = (-2. * h_dot_centerD + 2. * jitter0 *
(centerD_norm2 - h_dot_centerDD) + 3. * jitter0**2 *
centerD_dot_centerDD + jitter0**3 * centerDD_norm2)
second = (2. * (centerD_norm2 - h_dot_centerDD) + 6. * jitter0 *
centerD_dot_centerDD + 3. * jitter0**2 * centerDD_norm2)
jitter1 = jitter0 - first/second
h_order2_norm2 = sum((h-jitter1*centerD-jitter1**2/2*centerDD)**2)
if h_order1_norm2 <= h_order2_norm2:
jitter1 = jitter0
else:
jitter1 = 0
if np.abs(np.round(jitter1)) > 0:
evt_pos -= int(np.round(jitter1))
evt = cut_sgl_evt(data, evt_pos, self.before, self.after)
h = evt - centers[good_cluster_name]["center"][good_cluster_idx]
h_order0_norm2 = np.sum(h**2)
h_dot_centerD = np.dot(h, centerD)
jitter0 = h_dot_centerD/centerD_norm2
h_order1_norm2 = np.sum((h - jitter0 * centerD)**2)
if h_order0_norm2 > h_order1_norm2:
h_dot_centerDD = np.dot(h, centerDD)
first = (-2. * h_dot_centerD + 2. * jitter0 *
(centerD_norm2 - h_dot_centerDD) + 3. * jitter0**2 *
centerD_dot_centerDD + jitter0**3 * centerDD_norm2)
second = (2. * (centerD_norm2 - h_dot_centerDD) + 6. * jitter0
* centerD_dot_centerDD + 3. * jitter0**2 *
centerDD_norm2)
jitter1 = jitter0 - first/second
h_order2_norm2 = np.sum((h - jitter1 * centerD - jitter1**2 /
2 * centerDD)**2)
if h_order1_norm2 <= h_order2_norm2:
jitter1 = jitter0
else:
jitter1 = 0
if np.sum(evt**2) > np.sum((h - jitter1 * centerD - jitter1**2/2. *
centerDD)**2):
return [cluster_names[cluster_idx], evt_pos, jitter1]
else:
return ['?', evt_pos, jitter1]
def get_jitter(self, evts, center, centerD, centerDD):
""" Estimates the jitter given an event or a matrix of events where
individual events form the rows, a median event and the first two
derivatives of the latter.
**Parameters**
evts : double (array)
The actual clean events to be realigned
center : double (array)
The estimate of the center (obtained from the median)
centerD : double (array)
The estimate of the center's derivative (obtained from the median
of events cut on the derivative of data)
centerDD : double (array)
The estimate of the center's second derivative (obtained from the
median of events cut on the second derivative of data)
**Returns**
The first approximation of the jitter (numerical value).
"""
centerD_norm2 = np.dot(centerD, centerD)
centerDD_norm2 = np.dot(centerDD, centerDD)
centerD_dot_centerDD = np.dot(centerD, centerDD)
if evts.ndim == 1:
evts = evts.reshape(1, len(center))
evts = evts - center
h_dot_centerD = np.dot(evts, centerD)
delta0 = h_dot_centerD/centerD_norm2
h_dot_centerDD = np.dot(evts, centerDD)
first = (-2. * h_dot_centerD + 2. * delta0 *
(centerD_norm2 - h_dot_centerDD) + 3. * delta0**2 *
centerD_dot_centerDD + delta0**3 * centerDD_norm2)
second = (2. * (centerD_norm2 - h_dot_centerDD) + 6. * delta0 *
centerD_dot_centerDD + 3. * delta0**2 * centerDD_norm2)
return delta0 - first/second
def mk_aligned_events(self, positions):
""" Aligns the events of one realization. It returns a matrix of
aligned events, a vector of spike positions giving the nearest sampling
point to the actual peak, a vector of jitter giving the offset between
the previous spike position and the "actual" peak position.
**Parameters**
positions : int (list)
Spike times
**Returns**
A tuple whose elements are:
A matrix with as many rows as events and whose rows are the cuts on the
different recording sites glued one after the other. These events have
been jitter corrected using the second order Taylor expansion.
A vector of events positions where ”actual” positions have been rounded
to the nearest index.
A vector of jitter values.
**Details**
1. The data first and second derivatives are estimated first.
2. Events are cut next on each of the three versions of the data.
3. The global median event for each of the three versions are obtained.
4. Each event is then aligned on the median using a first order Taylor
expansion.
5. If this alignment decreases the squared norm of the event.
6. An improvement is looked for using a second order expansion.
If this second order expansion still decreases the squared norm and if
the estimated jitter is larger than 1, the whole procedure is repeated
after cutting a new the event based on a better peak position.
"""
win = np.array([1., 0., -1.])/2.
Dx = np.apply_along_axis(convolution, 1, self.data, win)
DDx = np.apply_along_axis(convolution, 1, Dx, win)
evts = self.mkEvents(otherPos=True, x=self.data, pos=positions)
evtsD = self.mkEvents(otherPos=True, x=Dx, pos=positions)
evtsDD = self.mkEvents(otherPos=True, x=DDx, pos=positions)
evts_median = np.apply_along_axis(np.median, 0, evts)
evtsD_median = np.apply_along_axis(np.median, 0, evtsD)
evtsDD_median = np.apply_along_axis(np.median, 0, evtsDD)
evts_jitter = self.get_jitter(evts, evts_median, evtsD_median,
evtsDD_median)
positions = positions-np.round(evts_jitter).astype('int')
evts = self.mkEvents(otherPos=True, x=self.data, pos=positions)
evtsD = self.mkEvents(otherPos=True, x=Dx, pos=positions)
evtsDD = self.mkEvents(otherPos=True, x=DDx, pos=positions)
evts_median = np.apply_along_axis(np.median, 0, evts)
evtsD_median = np.apply_along_axis(np.median, 0, evtsD)
evtsDD_median = np.apply_along_axis(np.median, 0, evtsDD)
evts_jitter = self.get_jitter(evts, evts_median, evtsD_median,
evtsDD_median)
evts -= (np.outer(evts_jitter, evtsD_median) +
np.outer(evts_jitter**2/2., evtsDD_median))
return (evts, positions, evts_jitter)
def mk_center_dictionary(self, positions):
""" Creates a dictionary containing all the necessary information in
order to facilitate the realignment method.
**Parameters**
positions : int (list)
A vector of spike times, that should all come from the same
cluster and correspond to reasonably ’clean’ events.
**Returns**
A dictionary with the following components:
center: the estimate of the center (obtained from the median).
centerD: the estimate of the center’s derivative (obtained from the
median of events cut on the | |
<reponame>haroonf/azure-cli-extensions<gh_stars>1-10
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
import json
from azure.cli.testsdk import (
ResourceGroupPreparer,
ScenarioTest,
StorageAccountPreparer
)
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
class StreamAnalyticsClientTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
def test_job_crud(self):
self.kwargs.update({
"job_name": "job",
"locale": "en-US"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5",
checks=[
self.check("name", "{job_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs")
]
)
# retrieve/update a streaming job
self.cmd(
"stream-analytics job list -g {rg}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{job_name}")
]
)
self.cmd(
"stream-analytics job update -n {job_name} -g {rg} \
--order-max-delay 10 --arrival-max-delay 29"
)
self.cmd(
"stream-analytics job show -n {job_name} -g {rg}",
checks=[
self.check("eventsOutOfOrderMaxDelayInSeconds", 10),
self.check("eventsLateArrivalMaxDelayInSeconds", 29)
]
)
# delete a streaming job
self.cmd("stream-analytics job delete -n {job_name} -g {rg} --yes")
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
def test_transformation_crud(self):
self.kwargs.update({
"job_name": "job",
"transformation_name": "transformation",
"input_name": "input",
"output_name": "output",
"locale": "en-US"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# create a transformation
self.kwargs["saql"] = f"SELECT * INTO {self.kwargs['output_name']} FROM {self.kwargs['input_name']}"
self.cmd(
"stream-analytics transformation create -n {transformation_name} -g {rg} \
--job-name {job_name} \
--saql '{saql}' --streaming-units 6",
checks=[
self.check("name", "{transformation_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/transformations")
]
)
# retrieve/update a transformation
self.cmd(
"stream-analytics transformation update -n {transformation_name} -g {rg} \
--job-name {job_name} --saql '{saql}' --streaming-units 3"
)
self.cmd(
"stream-analytics transformation show -n {transformation_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{transformation_name}"),
self.check("streamingUnits", 3)
]
)
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
@StorageAccountPreparer(parameter_name="storage_account")
def test_input_crud(self, storage_account):
self.kwargs.update({
"job_name": "job",
"input_name": "input",
"locale": "en-US",
"account": storage_account,
"container": "container"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# prepare storage account
self.kwargs["key"] = self.cmd(
"storage account keys list --account-name {account}"
).get_output_in_json()[0]["value"]
self.cmd(
"storage container create -n {container} \
--account-name {account} --account-key {key}"
)
# create/test an input
props = {
"type": "Reference",
"datasource": {
"type": "Microsoft.Storage/Blob",
"properties": {
"container": self.kwargs["container"],
"dateFormat": "yyyy/MM/dd",
"pathPattern": "{date}/{time}",
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"timeFormat": "HH"
}
},
"serialization": {
"type": "Csv",
"properties": {
"encoding": "UTF8",
"fieldDelimiter": ","
}
}
}
self.kwargs["properties"] = json.dumps(props)
self.cmd(
"stream-analytics input create -n {input_name} -g {rg} \
--job-name {job_name} \
--properties '{properties}'",
checks=[
self.check("name", "{input_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/inputs")
]
)
self.cmd(
"stream-analytics input test -n {input_name} -g {rg} \
--job-name {job_name} \
--properties '{properties}'",
checks=[
self.check("status", "TestSucceeded")
]
)
# retrieve/update an input
self.cmd(
"stream-analytics input list -g {rg} --job-name {job_name}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{input_name}")
]
)
props["datasource"]["properties"]["dateFormat"] = "MM/dd/yyyy"
self.kwargs["properties"] = json.dumps(props)
self.cmd(
"stream-analytics input update -n {input_name} -g {rg} \
--job-name {job_name} --properties '{properties}'"
)
self.cmd(
"stream-analytics input show -n {input_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{input_name}"),
self.check("properties.datasource.dateFormat", "MM/dd/yyyy")
]
)
# delete an input
self.cmd("stream-analytics input delete -n {input_name} -g {rg} --job-name {job_name} --yes")
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
@StorageAccountPreparer(parameter_name="storage_account")
def test_output_crud(self, storage_account):
self.kwargs.update({
"job_name": "job",
"output_name": "output",
"locale": "en-US",
"account": storage_account,
"container": "container"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# prepare storage account
self.kwargs["key"] = self.cmd(
"storage account keys list --account-name {account}"
).get_output_in_json()[0]["value"]
self.cmd(
"storage container create -n {container} \
--account-name {account} --account-key {key}"
)
# create/test an output
datasource_props = {
"type": "Microsoft.Storage/Blob",
"properties": {
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"container": self.kwargs["container"],
"pathPattern": "{date}/{time}",
"dateFormat": "yyyy/MM/dd",
"timeFormat": "HH"
}
}
serialization_props = {
"type": "Csv",
"properties": {
"fieldDelimiter": ",",
"encoding": "UTF8"
}
}
self.kwargs["datasource"] = json.dumps(datasource_props)
self.kwargs["serialization"] = json.dumps(serialization_props)
self.cmd(
"stream-analytics output create -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'",
checks=[
self.check("name", "{output_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/outputs")
]
)
self.cmd(
"stream-analytics output test -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'",
checks=[
self.check("status", "TestSucceeded")
]
)
# retrieve/update an output
self.cmd(
"stream-analytics output list -g {rg} --job-name {job_name}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{output_name}")
]
)
datasource_props["properties"]["dateFormat"] = "MM/dd/yyyy"
self.kwargs["datasource"] = json.dumps(datasource_props)
self.cmd(
"stream-analytics output update -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'"
)
self.cmd(
"stream-analytics output show -n {output_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{output_name}"),
self.check("datasource.dateFormat", "MM/dd/yyyy")
]
)
# delete an output
self.cmd("stream-analytics output delete -n {output_name} -g {rg} --job-name {job_name} --yes")
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
@StorageAccountPreparer(parameter_name="storage_account")
def test_job_scale(self, storage_account):
self.kwargs.update({
"job_name": "job",
"transformation_name": "transformation",
"input_name": "input",
"output_name": "output",
"locale": "en-US",
"account": storage_account,
"container": "container"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# create a transformation
self.kwargs["saql"] = f"SELECT * INTO {self.kwargs['output_name']} FROM {self.kwargs['input_name']}"
self.cmd(
"stream-analytics transformation create -n {transformation_name} -g {rg} \
--job-name {job_name} \
--saql '{saql}' --streaming-units 6"
)
# prepare storage account
self.kwargs["key"] = self.cmd(
"storage account keys list --account-name {account}"
).get_output_in_json()[0]["value"]
self.cmd(
"storage container create -n {container} \
--account-name {account} --account-key {key}"
)
# create an input
self.kwargs["properties"] = json.dumps({
"type": "Stream",
"datasource": {
"type": "Microsoft.Storage/Blob",
"properties": {
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"container": self.kwargs["container"],
"pathPattern": "{date}/{time}",
"dateFormat": "MM/dd/yyyy",
"timeFormat": "HH",
"sourcePartitionCount": 16
}
},
"serialization": {
"type": "Csv",
"properties": {
"fieldDelimiter": ",",
"encoding": "UTF8"
}
}
})
self.cmd(
"stream-analytics input create -n {input_name} -g {rg} \
--job-name {job_name} --properties '{properties}'"
)
# create an output
self.kwargs["datasource"] = json.dumps({
"type": "Microsoft.Storage/Blob",
"properties": {
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"container": self.kwargs["container"],
"pathPattern": "{date}/{time}",
"dateFormat": "yyyy/MM/dd",
"timeFormat": "HH"
}
})
self.kwargs["serialization"] = json.dumps({
"type": "Csv",
"properties": {
"fieldDelimiter": ",",
"encoding": "UTF8"
}
})
self.cmd(
"stream-analytics output create -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'"
)
# start/stop a running job
self.cmd("stream-analytics job start -n {job_name} -g {rg} --output-start-mode JobStartTime")
self.cmd("stream-analytics job stop -n {job_name} -g {rg}")
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
def test_function_crud(self):
self.kwargs.update({
"job_name": "job",
"function_name": "function",
"workspace_name": "workspace",
"locale": "en-US"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# create/test a function
props = {
"type": "Scalar",
"properties": {
"inputs": [{
"dataType": "Any"
}],
"output": {
"dataType": "Any"
},
"binding": {
"type": "Microsoft.StreamAnalytics/JavascriptUdf",
"properties": {
"script": "function (a, b) { return a + b; }"
}
}
}
}
self.kwargs["props"] = json.dumps(props)
self.cmd(
"stream-analytics function create -n {function_name} -g {rg} \
--job-name {job_name} --properties '{props}'",
checks=[
self.check("name", "{function_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/functions")
]
)
self.cmd(
"stream-analytics function test -n {function_name} -g {rg} \
--job-name {job_name} --properties '{props}'",
checks=[
self.check("status", "TestFailed")
]
)
# retrieve/update a function
self.cmd(
"stream-analytics function list -g {rg} --job-name {job_name}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{function_name}")
]
)
props["properties"]["binding"]["properties"]["script"] = "function (a, b) { return a * b; }"
self.kwargs["props"] = json.dumps(props)
self.cmd(
"stream-analytics function update -n {function_name} -g {rg} \
--job-name {job_name} --properties '{props}'"
)
self.cmd(
"stream-analytics function show -n {function_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{function_name}"),
self.check("properties.binding.script", "function (a, b) {{ return a * b; }}")
]
)
# delete a function
self.cmd("stream-analytics job delete -n {function_name} -g {rg} --job-name {job_name} --yes")
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_")
def test_subscription_inspect(self):
self.kwargs.update({
"location": "westus"
})
self.cmd(
"stream-analytics subscription inspect -l {location}",
checks=[
self.check("length(value)", 2),
self.check("value[0].type", "Microsoft.StreamAnalytics/quotas")
]
)
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_")
def test_cluster_crud(self):
self.kwargs.update({
"cluster": "cli-cluster",
"capacity1": 36,
"capacity2": 72,
})
# create a cluster
self.cmd(
"stream-analytics cluster create -n {cluster} -g {rg} --sku name=Default capacity={capacity1}",
checks=[
self.check("sku.capacity", 36),
self.check("type", "Microsoft.StreamAnalytics/clusters"),
]
)
# retrieve/update a cluster
self.cmd(
"stream-analytics cluster list -g {rg}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{cluster}"),
]
)
self.cmd("stream-analytics cluster update -n {cluster} -g {rg} --sku capacity={capacity2}")
self.cmd(
"stream-analytics cluster show -n {cluster} -g {rg}",
checks=[
self.check("sku.capacity", 72),
self.check("name", "{cluster}"),
]
)
# delete a cluster
self.cmd("stream-analytics cluster delete -n {cluster} | |
= self.backtester_engine.get_result_daily()
self.dailytable.set_data(dailyresults)
def process_optimization_finished_event(self, event: Event):
""""""
self.write_log("请点击[优化结果]按钮查看")
self.result_button.setEnabled(True)
def clear_data(self):
full_sym = self.symbol_line.text()
if self.interval_combo.currentText() == 'tick':
msg = f"will clear Tick data {full_sym} , continue?"
mbox = QtWidgets.QMessageBox().question(None, 'Warning', msg,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if mbox == QtWidgets.QMessageBox.No:
return
sqglobal.history_tick[full_sym].clear()
elif self.interval_combo.currentText() == '1m':
msg = f"will clear Bar(1m) data {full_sym} , continue?"
mbox = QtWidgets.QMessageBox().question(None, 'Warning', msg,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if mbox == QtWidgets.QMessageBox.No:
return
sqglobal.history_bar[full_sym].clear()
def load_data_file(self):
if not self.data_source.currentText() == 'Memory':
return
full_sym = self.symbol_line.text()
if self.interval_combo.currentText() == 'tick':
if sqglobal.history_tick[full_sym]:
QtWidgets.QMessageBox().information(
None, 'Info', 'already has data in memory!', QtWidgets.QMessageBox.Ok)
return
QtWidgets.QMessageBox().information(None, 'Info',
'Please load data to from Tools/Data loader!', QtWidgets.QMessageBox.Ok)
return
elif self.interval_combo.currentText() == '1m':
if sqglobal.history_bar[full_sym]:
QtWidgets.QMessageBox().information(
None, 'Info', 'already has data in memory!', QtWidgets.QMessageBox.Ok)
return
QtWidgets.QMessageBox().information(None, 'Info',
'Please load data to from Tools/Data loader!', QtWidgets.QMessageBox.Ok)
return
QtWidgets.QMessageBox().information(
None, 'Info', 'not implemented yet!', QtWidgets.QMessageBox.Ok)
def reload_strategy(self):
self.class_names.clear()
self.settings.clear()
self.backtester_engine.reload_strategy()
self.init_strategy_settings()
self.class_combo.clear()
self.class_combo.addItems(self.class_names)
def batchaddsetting(self):
full_symbol = self.symbol_line.text()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
setting = {'full_symbol': full_symbol, 'start': start, 'end': end}
self.batchtable.add_data(setting)
def start_backtesting(self):
""""""
if self.batchmode.isChecked():
self.start_batch_bt()
else:
class_name = self.class_combo.currentText()
full_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
datasource = self.data_source.currentText()
if end <= start:
QtWidgets.QMessageBox().information(None, 'Error',
'End date should later than start date!', QtWidgets.QMessageBox.Ok)
return
if (end - start) > timedelta(days=90) and interval == 'tick':
mbox = QtWidgets.QMessageBox().question(None, 'Warning', 'Two many data will slow system performance, continue?',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if mbox == QtWidgets.QMessageBox.No:
return
old_setting = self.settings[class_name]
dialog = BacktestingSettingEditor(class_name, old_setting)
i = dialog.exec()
if i != dialog.Accepted:
return
new_setting = dialog.get_setting()
self.settings[class_name] = new_setting
result = self.backtester_engine.start_backtesting(
class_name,
full_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
new_setting,
datasource
)
if result:
self.statistics_monitor.clear_data()
self.txnstatics_monitor.clear_data()
self.overviewchart.clear_data()
def start_batch_bt(self):
batchsettinglist = self.batchtable.get_data()
class_name = self.class_combo.currentText()
interval = self.interval_combo.currentText()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
datasource = self.data_source.currentText()
old_setting = self.settings[class_name]
dialog = BacktestingSettingEditor(class_name, old_setting)
i = dialog.exec()
if i != dialog.Accepted:
return
new_setting = dialog.get_setting()
self.settings[class_name] = new_setting
result = self.backtester_engine.start_batch_bt(
class_name,
batchsettinglist,
interval,
rate,
slippage,
size,
pricetick,
capital,
new_setting,
datasource
)
if result:
self.statistics_monitor.clear_data()
self.txnstatics_monitor.clear_data()
self.overviewchart.clear_data()
def start_optimization(self):
""""""
class_name = self.class_combo.currentText()
full_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
datasource = self.data_source.currentText()
if (end - start) > timedelta(days=90) and interval == 'tick':
mbox = QtWidgets.QMessageBox().question(None, 'Warning', 'Two many data will slow system performance, continue?',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if mbox == QtWidgets.QMessageBox.No:
return
parameters = self.settings[class_name]
dialog = OptimizationSettingEditor(class_name, parameters)
i = dialog.exec()
if i != dialog.Accepted:
return
optimization_setting, use_ga = dialog.get_setting()
self.target_display = dialog.target_display
self.backtester_engine.start_optimization(
class_name,
full_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
optimization_setting,
use_ga,
datasource
)
self.result_button.setEnabled(False)
def show_optimization_result(self):
""""""
result_values = self.backtester_engine.get_result_values()
dialog = OptimizationResultMonitor(
result_values,
self.target_display
)
dialog.exec_()
def show_data(self):
full_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
datasource = self.data_source.currentText()
if interval == 'tick':
interval = '1m'
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
trades = self.backtester_engine.get_result_trades()
addtrade = bool(trades) and full_symbol == trades[0].full_symbol
if (end - start) > timedelta(days=60) and interval == '1m':
mbox = QtWidgets.QMessageBox().question(None, 'Warning', 'Two many data will slow system performance, continue?',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if mbox == QtWidgets.QMessageBox.No:
return
for i in range(self.bt_bottommiddle.count()):
if self.bt_bottommiddle.tabText(i) == full_symbol:
widget = self.bt_bottommiddle.widget(i)
widget.reset(full_symbol, start, end,
Interval(interval), datasource)
if addtrade:
widget.add_trades(trades)
widget.show_text_signals()
return
dataviewchart = BTQuotesChart()
dataviewchart.reset(full_symbol, start, end,
Interval(interval), datasource)
if addtrade:
dataviewchart.add_trades(trades)
dataviewchart.show_text_signals()
self.bt_bottommiddle.addTab(dataviewchart, full_symbol)
def show_trade(self, trade):
full_symbol = trade.full_symbol
tradetime = trade.datetime
adddaysstart = 2
if tradetime.date().weekday() == 0:
adddaysstart = 4
elif tradetime.date().weekday() == 1:
adddaysstart = 3
start = tradetime - timedelta(days=adddaysstart)
adddaysend = 1
if tradetime.date().weekday() == 4:
adddaysend = 3
end = tradetime + timedelta(days=adddaysend)
datasource = self.data_source.currentText()
trades = self.backtester_engine.get_result_trades()
for i in range(self.bt_bottommiddle.count()):
if self.bt_bottommiddle.tabText(i) == full_symbol:
widget = self.bt_bottommiddle.widget(i)
widget.reset(full_symbol, start.date(),
end.date(), Interval.MINUTE, datasource)
widget.add_trades(trades)
widget.show_text_signals()
return
dataviewchart = BTQuotesChart()
dataviewchart.reset(full_symbol, start.date(),
end.date(), Interval.MINUTE, datasource)
dataviewchart.add_trades(trades)
dataviewchart.show_text_signals()
self.bt_bottommiddle.addTab(dataviewchart, full_symbol)
def show(self):
""""""
self.showMaximized()
class BacktestingSettingEditor(QtWidgets.QDialog):
"""
For creating new strategy and editing strategy parameters.
"""
def __init__(
self, class_name: str, parameters: dict
):
""""""
super(BacktestingSettingEditor, self).__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.init_ui()
def init_ui(self):
""""""
form = QtWidgets.QFormLayout()
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"策略参数配置:{self.class_name}")
button_text = "确定"
parameters = self.parameters
for name, value in parameters.items():
type_ = type(value)
edit = QtWidgets.QLineEdit(str(value))
if type_ is int:
validator = QtGui.QIntValidator()
edit.setValidator(validator)
elif type_ is float:
validator = QtGui.QDoubleValidator()
edit.setValidator(validator)
form.addRow(f"{name} {type_}", edit)
self.edits[name] = (edit, type_)
button = QtWidgets.QPushButton(button_text)
button.clicked.connect(self.accept)
form.addRow(button)
self.setLayout(form)
def get_setting(self):
""""""
setting = {}
for name, tp in self.edits.items():
edit, type_ = tp
value_text = edit.text()
if type_ == bool:
if value_text == "True":
value = True
else:
value = False
else:
value = type_(value_text)
setting[name] = value
return setting
class OptimizationSettingEditor(QtWidgets.QDialog):
"""
For setting up parameters for optimization.
"""
DISPLAY_NAME_MAP = {
"总收益率": "total_return",
"夏普比率": "sharpe_ratio",
"收益回撤比": "return_drawdown_ratio",
"日均盈亏": "daily_net_pnl"
}
def __init__(
self, class_name: str, parameters: dict
):
""""""
super().__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.optimization_setting = None
self.use_ga = False
self.init_ui()
def init_ui(self):
""""""
QLabel = QtWidgets.QLabel
self.target_combo = QtWidgets.QComboBox()
self.target_combo.addItems(list(self.DISPLAY_NAME_MAP.keys()))
grid = QtWidgets.QGridLayout()
grid.addWidget(QLabel("目标"), 0, 0)
grid.addWidget(self.target_combo, 0, 1, 1, 3)
grid.addWidget(QLabel("参数"), 1, 0)
grid.addWidget(QLabel("开始"), 1, 1)
grid.addWidget(QLabel("步进"), 1, 2)
grid.addWidget(QLabel("结束"), 1, 3)
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"优化参数配置:{self.class_name}")
validator = QtGui.QDoubleValidator()
row = 2
for name, value in self.parameters.items():
type_ = type(value)
if type_ not in [int, float]:
continue
start_edit = QtWidgets.QLineEdit(str(value))
step_edit = QtWidgets.QLineEdit(str(1))
end_edit = QtWidgets.QLineEdit(str(value))
for edit in [start_edit, step_edit, end_edit]:
edit.setValidator(validator)
grid.addWidget(QLabel(name), row, 0)
grid.addWidget(start_edit, row, 1)
grid.addWidget(step_edit, row, 2)
grid.addWidget(end_edit, row, 3)
self.edits[name] = {
"type": type_,
"start": start_edit,
"step": step_edit,
"end": end_edit
}
row += 1
parallel_button = QtWidgets.QPushButton("多进程优化")
parallel_button.clicked.connect(self.generate_parallel_setting)
grid.addWidget(parallel_button, row, 0, 1, 4)
row += 1
ga_button = QtWidgets.QPushButton("遗传算法优化")
ga_button.clicked.connect(self.generate_ga_setting)
grid.addWidget(ga_button, row, 0, 1, 4)
self.setLayout(grid)
def generate_ga_setting(self):
""""""
self.use_ga = True
self.generate_setting()
def generate_parallel_setting(self):
""""""
self.use_ga = False
self.generate_setting()
def generate_setting(self):
""""""
self.optimization_setting = OptimizationSetting()
self.target_display = self.target_combo.currentText()
target_name = self.DISPLAY_NAME_MAP[self.target_display]
self.optimization_setting.set_target(target_name)
for name, d in self.edits.items():
type_ = d["type"]
start_value = type_(d["start"].text())
step_value = type_(d["step"].text())
end_value = type_(d["end"].text())
if start_value == end_value:
self.optimization_setting.add_parameter(name, start_value)
else:
self.optimization_setting.add_parameter(
name,
start_value,
end_value,
step_value
)
self.accept()
def get_setting(self):
""""""
return self.optimization_setting, self.use_ga
class OptimizationResultMonitor(QtWidgets.QDialog):
"""
For viewing optimization result.
"""
def __init__(
self, result_values: list, target_display: str
):
""""""
super().__init__()
self.result_values = result_values
self.target_display = target_display
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("参数优化结果")
self.resize(1100, 500)
table = QtWidgets.QTableWidget()
table.setColumnCount(2)
table.setRowCount(len(self.result_values))
table.setHorizontalHeaderLabels(["参数", self.target_display])
table.setEditTriggers(table.NoEditTriggers)
table.verticalHeader().setVisible(False)
table.horizontalHeader().setSectionResizeMode(
0, QtWidgets.QHeaderView.ResizeToContents
)
table.horizontalHeader().setSectionResizeMode(
1, QtWidgets.QHeaderView.Stretch
)
for n, tp in enumerate(self.result_values):
setting, target_value, _ = tp
setting_cell = QtWidgets.QTableWidgetItem(str(setting))
target_cell = QtWidgets.QTableWidgetItem(str(target_value))
setting_cell.setTextAlignment(QtCore.Qt.AlignCenter)
target_cell.setTextAlignment(QtCore.Qt.AlignCenter)
table.setItem(n, 0, setting_cell)
table.setItem(n, 1, target_cell)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(table)
self.setLayout(vbox)
class StatisticsMonitor(QtWidgets.QTableWidget):
""""""
KEY_NAME_MAP = {
"capital": "起始资金",
"end_balance": "结束资金",
"total_net_pnl": "总盈亏",
"total_return": "总收益率",
"annual_return": "年化收益",
"daily_net_pnl": "日均盈亏",
"daily_return": "日均收益率",
"max_drawdown": "最大回撤",
"max_ddpercent": "百分比最大回撤",
"return_std": "收益标准差",
"sharpe_ratio": "夏普比率",
"return_drawdown_ratio": "收益回撤比",
"win_ratio": "胜率",
"win_loss": "盈亏比"
}
def __init__(self):
""""""
super().__init__()
self.cells = {}
self.init_ui()
def init_ui(self):
""""""
self.setRowCount(len(self.KEY_NAME_MAP))
self.setVerticalHeaderLabels(list(self.KEY_NAME_MAP.values()))
self.setColumnCount(1)
self.horizontalHeader().setVisible(False)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch
)
self.setEditTriggers(self.NoEditTriggers)
for row, key in enumerate(self.KEY_NAME_MAP.keys()):
cell = QtWidgets.QTableWidgetItem()
self.setItem(row, 0, cell)
self.cells[key] = cell
self.setMinimumHeight(450)
# self.setFixedWidth(200)
def clear_data(self):
""""""
for cell in self.cells.values():
cell.setText("")
def set_data(self, data: dict):
""""""
data["capital"] = f"{data['capital']:,.2f}"
data["end_balance"] = f"{data['end_balance']:,.2f}"
data["total_return"] = f"{data['total_return']:,.2f}%"
data["annual_return"] = f"{data['annual_return']:,.2f}%"
data["max_drawdown"] = f"{data['max_drawdown']:,.2f}"
data["max_ddpercent"] = f"{data['max_ddpercent']:,.2f}%"
data["total_net_pnl"] = f"{data['total_net_pnl']:,.2f}"
data["daily_net_pnl"] = f"{data['daily_net_pnl']:,.2f}"
data["daily_return"] = f"{data['daily_return']:,.2f}%"
data["return_std"] = f"{data['return_std']:,.2f}%"
data["sharpe_ratio"] = f"{data['sharpe_ratio']:,.2f}"
data["return_drawdown_ratio"] = f"{data['return_drawdown_ratio']:,.2f}"
data["win_ratio"] = f"{data['win_ratio']:,.2f}"
data["win_loss"] = f"{data['win_loss']:,.2f}"
for key, cell in self.cells.items():
value = data.get(key, "")
cell.setText(str(value))
class TxnStatisticsMonitor(QtWidgets.QTableWidget):
""""""
KEY_NAME_MAP = {
"start_date": "首个交易日",
"end_date": "最后交易日",
"total_days": "总交易日数",
"profit_days": "盈利交易日数",
"loss_days": "亏损交易日数",
"total_commission": "总手续费",
"total_slippage": | |
<filename>agilent_scope.py
from __future__ import with_statement
"""
This is to communicate with an Agilent Windows-based oscilloscope over Ethernet.
This version required a server program named "alinemt_scope_server.py" running
on the Oscilloscope computer.
<NAME>, 6 Sep 2007 - 13 May 2015
"""
import socket # needed for socket.error
from thread import allocate_lock
__version__ = "2.1.1"
NaN = 1e1000/1e1000 # generates Not A Number
class agilent_scope(object):
"This is to communicate with an Agilent Windows-based oscilloscope over Ethernet."
def __init__(self,ip_address):
"""ip_address may be given as address:port. If :port is omitted, port
number 2000 is assumed."""
self.timeout = 2.0
if ip_address.find(":") >= 0:
self.ip_address = ip_address.split(":")[0]
self.port = int(ip_address.split(":")[1])
else: self.ip_address = ip_address; self.port = 2000
self.connection = None
# This is to make the query method multi-thread safe.
self.lock = allocate_lock()
self.retries = 2 # used in case of communation error
def __repr__(self):
return "agilent_scope('"+self.ip_address+":"+str(self.port)+"')"
def write(self,command):
"""Sends a command to the oscilloscope that does not generate a reply,
e.g. ":CDISplay" """
if len(command) == 0 or command[-1] != "\n": command += "\n"
with self.lock: # Allow only one thread at a time inside this function.
for attempt in range(0,self.retries):
try:
if self.connection == None:
self.connection = socket.socket()
self.connection.settimeout(self.timeout)
self.connection.connect((self.ip_address,self.port))
self.connection.sendall (command)
return
except Exception,message:
if attempt > 0 or self.retries == 1:
self.log("write %r attempt %d/%d failed: %s" % \
(command,attempt+1,self.retries,message))
self.connection = None
def query(self,command):
"""To send a command that generates a reply, e.g. "InstrumentID.Value".
Returns the reply"""
if len(command) == 0 or command[-1] != "\n": command += "\n"
with self.lock: # Allow only one thread at a time inside this function.
for attempt in range(0,self.retries):
try:
if self.connection == None:
self.connection = socket.socket()
self.connection.settimeout(self.timeout)
self.connection.connect((self.ip_address,self.port))
self.connection.sendall (command)
reply = self.connection.recv(4096)
while reply.find("\n") == -1:
reply += self.connection.recv(4096)
if reply.rstrip("\n") != "": return reply.rstrip("\n")
if attempt > 0 or self.retries == 1:
self.log("query %r attempt %d/%d generated reply %r" %
(command,attempt+1,self.retries,reply))
except Exception,message:
if attempt > 0 or self.retries == 1:
self.log("query %r attempt %d/%d failed: %s" %
(command,attempt+1,self.retries,message))
self.connection = None
return ""
class measurement_object(object):
"""Implements automatic measurements, including averaging and statistics"""
def __init__(self,scope,n=1,type="value"):
"""n=1,2...6 is the waveform parameter number.
The parameter is defined from the "Measure" menu, e.g. DeltaTime(2-3).
The optional 'type' can by "value","min","max","stdev",or "count".
"""
self.scope = scope; self.n = n; self.type = type
def __repr__(self):
return repr(self.scope)+".measurement("+str(self.n)+")."+self.type
def get_value(self):
if self.type == "value": return self.float_result(1)
if self.type == "average": return self.float_result(4)
if self.type == "min": return self.float_result(2)
if self.type == "max": return self.float_result(3)
if self.type == "stdev": return self.float_result(5)
if self.type == "count": return self.int_result(6)
return nan
value = property(get_value,doc="last sample (without averaging)")
def get_average(self):
if self.type == "value": return self.float_result(4)
if self.type == "average": return self.float_result(4)
if self.type == "min": return self.float_result(2)
if self.type == "max": return self.float_result(3)
if self.type == "stdev": return self.float_result(5)
if self.type == "count": return self.int_result(6)
return nan
average = property(get_average,doc="averaged value")
def get_min(self): return self.float_result(2)
min = property(get_min,doc="minimum value contributing to average")
def get_max(self): return self.float_result(3)
max = property(get_max,doc="maximum value contributing to average")
def get_stdev(self): return self.float_result(5)
stdev = property(get_stdev,doc="standard deviation of individuals sample")
def get_count(self): return self.int_result(6)
count = property(get_count,doc="number of measurements averaged")
def get_name(self):
try: return self.result(0)+"."+self.type
except ValueError: return "?."+self.type
name = property(get_name,doc="string representation of the measurment")
def result(self,index):
"""Reads the measurment results from the oscillscope and extracts one
value. index 0=name,1=current,2=min,3=max,4=mean,5=stdev,6=count"""
reply = self.scope.query (":MEASure:RESults?")
# format <name>,<current>,<min>,<max>,<mean>,<stdev>,<count>[,<name>,...]
fields = reply.split(",")
i = (self.n-1)*7 + index
if i < len(fields): return fields[i]
def float_result(self,index):
"""Reads the measurment results from the oscillscope and extracts one
value as floating point number.
index 1=current,2=min,3=max,4=mean,5=stdev"""
x = self.result(index)
if x == None: return NaN
if x == '9.99999E+37': return NaN
try: return float(x)
except ValueError: return NaN
def int_result(self,index):
"""Reads the measurment results from the oscillscope and extracts one
value as floating point number.
index 1=current,2=min,3=max,4=mean,5=stdev"""
x = self.result(index)
if x == None: return NaN
if x == '9.99999E+37': return NaN
try: return int(float(x))
except ValueError: return NaN
def start(self): self.scope.start()
def stop(self): self.scope.stop()
def get_time_range(self): return self.scope.time_range
def set_time_range(self,value): self.scope.time_range = value
time_range = property(get_time_range,set_time_range,
doc="horizontal scale min to max (10 div) in seconds")
def measurement(self,*args,**kws):
return agilent_scope.measurement_object(self,*args,**kws)
def start(self):
"""Clear the accumulated average and restart averaging.
Also re-eneables the trigger in case the scope was stopped."""
self.write (":CDISplay")
self.write (":RUN")
def stop(self):
"Freezes the averaging by disabling the trigger of the oscilloscope."
self.write (":STOP")
def get_time_range(self):
try: return float(self.query(":TIMebase:RANGe?"))
except ValueError: return NaN
def set_time_range(self,value): self.write (":TIMebase:RANGe %g" % value)
time_range = property(get_time_range,set_time_range,
doc="horizontal scale min to max (10 div) in seconds")
def get_sampling_rate(self):
"samples per second"
try: return float(self.query(":ACQuire:SRATe?"))
except ValueError: return NaN
sampling_rate = property(get_sampling_rate)
def get_id(self): return self.query("*IDN?")
id = property(get_id,doc="Model and serial number")
class gated_measurement(object):
"""Common code base for gates measurements.
The Agilent does not support gating on automated measurements.
Gated mesurements are implemented by downloading the waveform and processing
it in client computer memory. The gate is determined by the current position
of the two vertical cursors on the oscilloscope screen.
"""
def __init__(self,scope,channel=1):
self.scope = scope; self.channel = channel
def tstart(self): return float(self.scope.query(":MARKer:TSTArt?"))
def tstop(self): return float(self.scope.query(":MARKer:TSTOp?"))
def get_begin(self): return min(self.tstart(),self.tstop())
def set_begin(self,value): self.scope.write(":MARKer:TSTArt "+str(value))
begin = property(get_begin,set_begin,doc="starting time of integration gate")
def get_end(self): return max(self.tstart(),self.tstop())
def set_end(self,val): self.scope.write(":MARKer:TSTOp "+str(val))
end = property(get_end,set_end,doc="ending time of integration gate")
def tstr(t):
"Convert time given in seconds in more readble format such as ps, ns, ms, s"
try: t=float(t)
except: return "?"
if t != t: return "?" # not a number
if t == 0: return "0"
if abs(t) < 1E-20: return "0"
if abs(t) < 999e-12: return "%.3gps" % (t*1e12)
if abs(t) < 999e-9: return "%.3gns" % (t*1e9)
if abs(t) < 999e-6: return "%.3gus" % (t*1e6)
if abs(t) < 999e-3: return "%.3gms" % (t*1e3)
return "%.3gs" % t
tstr = staticmethod(tstr)
class gated_integral_object(gated_measurement):
"""The Agilent does not support gating on automated measurements.
The "Area" measurement integrates the whole displayed waveform.
Gated integration is implemented by downloading the waveform and processing
it in client computer memory. The integration gate with is determined
by the current position of the two vertical cursors on the oscilloscope
screen.
"""
def __init__(self,scope,channel=1):
agilent_scope.gated_measurement.__init__(self,scope,channel)
self.unit = "Vs"
def get_value(self):
return integral(self.scope.waveform(self.channel),self.begin,self.end)
value = property(get_value,doc="gated integral of waveform")
def get_name(self):
return "int("+str(self.channel)+","+self.tstr(self.begin)+","+self.tstr(self.end)+")"
name = property(get_name,doc="short description")
def gated_integral(self,channel=1):
"Area of waveform between vertical markers"
return agilent_scope.gated_integral_object(self,channel)
class gated_average_object(gated_measurement):
"""Calculates the average of the part of a waveform, enclosed by the two
vertical cursors on the oscilloscope screen."""
def __init__(self,scope,channel=1):
agilent_scope.gated_measurement.__init__(self,scope,channel)
self.unit = "V"
def get_value(self):
return average(self.scope.waveform(self.channel),self.begin,self.end)
value = property(get_value,doc="gated average of waveform")
def get_name(self):
return "ave("+str(self.channel)+","+self.tstr(self.begin)+","+self.tstr(self.end)+")"
name = property(get_name,doc="short description")
def gated_average(self,channel=1):
"Area of waveform between vertical markers"
return agilent_scope.gated_average_object(self,channel)
def waveform(self,channel=1): return self.waveform_16bit(channel)
def waveform_ascii(self,channel=1):
"Downloads waveform data in the form of a list of (t,y) tuples"
self.write(":SYSTEM:HEADER OFF")
self.write(":WAVEFORM:SOURCE CHANNEL"+str(channel))
self.write(":WAVEFORM:FORMAT ASCII")
data = self.query(":WAVeform:DATA?")
# format: <value>,<value>,... example: 5.09E-03,-5.16E-03,...
y = data.split(",")
preamble = self.query(":WAVeform:PREAMBLE?")
xincr = float(preamble.split(",")[4])
xorig = float(preamble.split(",")[5])
waveform = []
for i in range(len(y)): waveform.append((xorig+i*xincr,float(y[i])))
return waveform
def waveform_8bit (self,channel=1):
"""Downloads waveform data in the form of a list of (t,y) tuples.
In contrast to the "waveform" method, this implementation downloads
binary data, not formatted ASCII text, which is faster. (0.0037 s vs 0.120 s
for 20 kSamples)"""
self.write(":SYSTEM:HEADER OFF")
self.write(":WAVEFORM:SOURCE CHANNEL"+str(channel))
self.write(":WAVEFORM:FORMAT BYTE")
data = self.query(":WAVeform:DATA?")
# format: #<n><length><binary data>
# example: #520003...
n = int(data[1:2]) # number of bytes in "length" block
length = int(data[2:2+n]) # number of bytes in waveform data to follow
payload = len(data)-(2+n)
if length > payload:
print "(Waveform truncated from",length,"to",payload,"bytes)"
length = payload
from struct import unpack
bytes = unpack("%db" % length,data[2+n:2+n+length])
preamble = self.query(":WAVeform:PREAMBLE?")
xincr = float(preamble.split(",")[4])
xorig = float(preamble.split(",")[5])
yincr = float(preamble.split(",")[7])
yorig = float(preamble.split(",")[8])
waveform = []
for i in range(length):
waveform.append((xorig+i*xincr,yorig+bytes[i]*yincr))
return waveform
def waveform_16bit (self,channel=1):
"""Downloads waveform data in the form of a | |
within the ARC corpus, without the need for
developing new approaches or algorithms.
It would have been significantly less effort for me to implement three solve functions of
the same type, than three problems of different type - because I would not have needed
to invent / discover new skills or methods (by skills and methods, I am referring to
logical thinking, not python functions or libraries): I simply would have just
parameterised and re-applied the methods and procedures I had already used.
"""
import os, sys
import json
import numpy as np
import re
### YOUR CODE HERE: write at least three functions which solve
### specific tasks by transforming the input x and returning the
### result. Name them according to the task ID as in the three
### examples below. Delete the three examples. The tasks you choose
### must be in the data/training directory, not data/evaluation.
# https://arc-seven.now.sh/testing_interface.html to visualise the puzzles
def solve_row(row):
"""
row : Array of characters representing the row containing 9's (missing squares)
returns: Array of characters representing the corrected row
Component of solve_3631a71a
Take a row containing 9's. Solve using symmetry to return a new row replacing the 9's with correct values.
Procedure:
1. Find the symmetrical centre of the row by checking 2 shifts left and right of the midpoint
2. Symmetrical centre is determined by
a. Divide the row into the left and right side
b. Only one side can have missing squares for the algoritym to work. If both sides are missing squares, we return the
row unchanged
c. Reverse the order of the right side
d. In the side that contains 9's, replace the 9's with a period (.) Then use this side as the match pattern for a regex check for
a match to the other side. If there is a match, we have found the symmetrical centre of the row. Additional padding of period (.) may be required
to the strings depending on where the centre lies
3. If symmertical centre is found
a. Populate the missing squares on one side with the corresponding squares on the reversed other side
b. Undo the reverse of the left side, remove any padding, reconstruct the new row and return it
If any row contains 9's values on both sides of the chosen midpoint, the algorithm will return the row unchanged.
When we transpose the matrix and run the procedure again it will solve these via a vertical symmetry
If the symmetrical midpoint is more than 2 shifts left or right of the center, the algorithm will return the row unchanged. The range of the
mid-point offset could be increased in this case but for all test and training cases this has been sufficient
"""
match_l = False # True if the left side of our row will be the regex pattern for this iteration, otherwise false
match_r = False # True if the right side of our row will be the regex pattern for this iteration, otherwise false
match = None
str_left = ""
str_right = ""
midpoint = 0 # The symmetrical midpoint we are currently evaluating
match_offset = 0
# Check for symmetrical centre 2 cells left of the row midpoint, at the row midpoint, and 2 cells right of the row midpoint
# Increase this range if we find a case where the midpoing is offset more than this, but for all test and training examples this
# is sufficient
for midpoint_offset in range (-2, 3):
midpoint = int(len(row) / 2 + midpoint_offset)
match_l = False
match_r = False
# When we shift the symmetrical midpoint one cell left or right, we heed to add 2 padding cells on the 'shorter' side for the regrex match to work
str_padding = "." * (2 * abs(midpoint_offset))
# Split the row based on the midpoint, reverse the order of the rightpart
l_part = np.array(row[0:midpoint]).astype(str)
r_part = np.array(np.flipud(row[midpoint:])).astype(str)
# Check which side has the 9's
if '9' in l_part: match_l = True # Left part will be the regex pattern
if '9' in r_part: match_r = True # Right part will be the regex pattern
if match_l and match_r:
# This algorithm cannot match wildcards on both sides of the string, continue to the next iteration to shift
# the midpoint, or return the row unchanged if none of the midpoints result in a solve
continue
if midpoint_offset < 0:
# Add padding to the left string, as this is the shorter side, and replace any 9's with .'s
# Only one side will have 9's so the replace operation will be redundant on the side with no 9s's
str_left = str_padding + ''.join(l_part).replace("9",".")
str_right = ''.join(r_part).replace("9",".")
else:
if midpoint_offset > 0:
# Add padding to the right string, as this is the shorter side, and replace any 9's with .'s
str_left = ''.join(l_part).replace("9",".")
str_right = str_padding + ''.join(r_part).replace("9",".") # Add padding to the right string
else:
# Both sides are equal length, just replace any 9's with .'s
str_left = ''.join(l_part).replace("9",".")
str_right = ''.join(r_part).replace("9",".")
# Now see if we have a match ignore the padded spaces during the matching process
if match_l:
match = re.match(str_left[2 * abs(midpoint_offset):], str_right[2 * abs(midpoint_offset):])
else:
match = re.match(str_right[2 * abs(midpoint_offset):], str_left[2 * abs(midpoint_offset):])
if match != None:
match_offset = midpoint_offset
break # exit the loop
# Exited the loop - now check if we found a match
if match == None:
# Failed to find a matching pattern, return the row unchanged
return row
# If we reach this point, then we have the symmetrical midpoint, so replace periods in the target row part with the
# corresponding characters in the source row part
new_str = ""
if match_l:
# replace all occurrances of . in str_left, with their corresponding value in str_right
for i in range(len(str_left)):
if str_left[i] == '.':
new_str += str_right[i]
else:
new_str += str_left[i]
str_left = new_str
if match_r:
# replace all occurrances of . in str_right, with their corresponding value in str_left
for i in range(len(str_right)):
if str_right[i] == '.':
new_str += str_left[i]
else:
new_str += str_left[i]
str_right = new_str
# Remove the padding that was added because of the offset
if match_offset < 0:
str_left = str_left[abs(match_offset) * 2:]
else:
if match_offset > 0:
str_right = str_right[abs(match_offset) * 2:]
# Replace the .'s with 9's for any remaining unmatched positions, so that they can be picked up in transposed reprocess
# And undo the order reversal on the right part
l_part = ''.join(str_left).replace(".","9")
r_part = ''.join(str_right)[::-1].replace(".","9")
# Reconstruct the row and return it
return np.array([int(char) for char in (l_part + r_part)])
def rows_iteration(solve_matrix):
"""
solve_matrix: A 2-D numpy array of integers.
Returns: A 2-D numpy array containing any resolved 9 values
Component of solve_3631a71a
Iterate through each row of solve_matrix and using symmetrical matching between the two 'halves; of the row,
solve for any 9's that appear in the string"""
# Initialise the return array
# We build and return a new array rather than modifying the original in-place
new_array = np.array([])
mask_val = 9 # We are searching for 9's
for row in solve_matrix:
if mask_val in row:
# The row contains 9's - missing squares. So we have to solve to replace the 9's with another value
new_row = solve_row(row)
new_array = np.append(new_array, new_row, axis=0)
else:
# The row has no missing squares, so just add it to the return matrix
new_array = np.append(new_array, row, axis=0)
# reshape and return the new, updated np | |
from asyncio.locks import BoundedSemaphore
import csv
import json
from collections import namedtuple
from typing import Any, List, Tuple, Set, Dict
import asyncio
import httpx
from enum import Enum, auto
from jwcrypto import jwk as _jwk
IssuerEntry = namedtuple('IssuerEntry', 'name iss website canonical_iss')
IssuerEntryChange = namedtuple('IssuerEntryChange', 'old new')
## Reduce SSL context security level due to SSL / TLS error with some domains
## https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set_security_level.html
httpx._config.DEFAULT_CIPHERS = httpx._config.DEFAULT_CIPHERS + ':@SECLEVEL=1'
class IssException(BaseException):
pass
class IssueLevel(Enum):
WARNING = auto()
ERROR = auto()
def __str__(self):
return f'{self.name}'
def __repr__(self):
return f'{self.name}'
class IssueType(Enum):
ISS_ENDS_WITH_TRAILING_SLASH = (auto(), IssueLevel.ERROR)
FETCH_EXCEPTION = (auto(), IssueLevel.ERROR)
KEYS_PROPERTY_MISSING = (auto(), IssueLevel.ERROR)
KEYS_PROPERTY_EMPTY = (auto(), IssueLevel.ERROR)
KEY_IS_INVALID = (auto(), IssueLevel.ERROR)
KID_IS_MISSING = (auto(), IssueLevel.ERROR)
KEY_CONTAINS_PRIVATE_MATERIAL = (auto(), IssueLevel.ERROR)
KID_IS_INCORRECT = (auto(), IssueLevel.ERROR)
KEY_USE_IS_INCORRECT = (auto(), IssueLevel.WARNING)
KEY_ALG_IS_INCORRECT = (auto(), IssueLevel.WARNING)
WEBSITE_DOES_NOT_RESOLVE = (auto(), IssueLevel.ERROR)
CANONICAL_ISS_SELF_REFERENCE = (auto(), IssueLevel.ERROR)
CANONICAL_ISS_REFERENCE_INVALID = (auto(), IssueLevel.ERROR)
CANONICAL_ISS_MULTIHOP_REFERENCE = (auto(), IssueLevel.ERROR)
## TODO - convert CORS issues to ERROR in the future
CORS_HEADER_MISSING = (auto(), IssueLevel.WARNING)
CORS_HEADER_INCORRECT = (auto(), IssueLevel.WARNING)
def __init__(self, id, level):
self.id = id
self.level = level
def __str__(self):
return f'{self.name}: {self.level}'
def __repr__(self):
return f'{self.name}: {self.level}'
class VCIDirectoryDiffs():
def __init__(self, additions: List[IssuerEntry], deletions: List[IssuerEntry], changes: List[IssuerEntryChange]):
self.additions = additions
self.deletions = deletions
self.changes = changes
def __repr__(self):
return f'additons={self.additions}\ndeletions={self.deletions}\nchanges={self.changes}'
Issue = namedtuple('Issue', 'description type')
ValidationResult = namedtuple('ValidationResult', 'issuer_entry is_valid issues')
DEFAULT_NAME_INDEX = 0
DEFAULT_NAME_HEADER = 'name'
DEFAULT_ISS_INDEX = 1
DEFAULT_ISS_HEADER = 'iss'
DEFAULT_ENCODING = 'utf-8'
NAME_KEY = 'name'
ISS_KEY = 'iss'
WEBSITE_KEY = 'website'
CANONICAL_ISS_KEY = 'canonical_iss'
PARTICIPATING_ISSUERS_KEY = 'participating_issuers'
USER_AGENT = "VCIDirectoryValidator/1.0.0"
EXPECTED_KEY_USE = 'sig'
EXPECTED_KEY_ALG = 'ES256'
EXPECTED_KEY_CRV = 'P-256'
MAX_FETCH_RETRY_COUNT=5
FETCH_RETRY_COUNT_DELAY=2
FETCH_REQUEST_ORIGIN = 'https://example.org'
CORS_ACAO_HEADER = 'access-control-allow-origin'
CORS_ACAO_HEADER_ALL = '*'
def read_issuer_entries_from_tsv_file(
input_file: str,
name_index: int = DEFAULT_NAME_INDEX,
name_header: str = DEFAULT_NAME_HEADER,
iss_index: int = DEFAULT_ISS_INDEX,
iss_header: str = DEFAULT_ISS_HEADER,
encoding: str = DEFAULT_ENCODING
) -> List[IssuerEntry]:
with open(input_file, 'r', newline='', encoding=encoding) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
entries = {}
for row in reader:
name = row[name_index].strip()
iss = row[iss_index].strip()
if name != name_header and iss != iss_header:
entry = IssuerEntry(name, iss, None, None)
entries[iss] = entry
return list(entries.values())
def read_issuer_entries_from_json_file(
input_file: str
) -> List[IssuerEntry]:
with open(input_file, 'r') as json_file:
input_dict = json.load(json_file)
entries = {}
for entry_dict in input_dict[PARTICIPATING_ISSUERS_KEY]:
name = entry_dict[NAME_KEY].strip()
iss = entry_dict[ISS_KEY].strip()
website = entry_dict[WEBSITE_KEY].strip() if entry_dict.get(WEBSITE_KEY) else None
canonical_iss = entry_dict[CANONICAL_ISS_KEY].strip() if entry_dict.get(CANONICAL_ISS_KEY) else None
entry = IssuerEntry(
name=name,
iss=iss,
website=website,
canonical_iss=canonical_iss
)
entries[iss] = entry
return list(entries.values())
def issuer_entry_to_dict(issuer_entry: IssuerEntry) -> dict:
d = {ISS_KEY: issuer_entry.iss, NAME_KEY: issuer_entry.name}
if issuer_entry.website:
d[WEBSITE_KEY] = issuer_entry.website
if issuer_entry.canonical_iss:
d[CANONICAL_ISS_KEY] = issuer_entry.canonical_iss
return d
def write_issuer_entries_to_json_file(
output_file: str,
entries: List[IssuerEntry]
):
entry_dicts = [issuer_entry_to_dict(entry) for entry in entries]
output_dict = {
PARTICIPATING_ISSUERS_KEY: entry_dicts
}
with open(output_file, 'w') as json_file:
json.dump(output_dict, json_file, indent=2)
def validate_key(jwk_dict) -> Tuple[bool, List[Issue]]:
'''
Validates a JWK represented by jwk_dict
'''
try:
kid = jwk_dict['kid']
except:
issues = [
Issue('kid is missing', IssueType.KID_IS_MISSING)
]
return [False, issues]
try:
jwk = _jwk.JWK(**jwk_dict)
except:
issues = [
Issue(f'Key with kid={kid} is invalid', IssueType.KEY_IS_INVALID)
]
return [False, issues]
if jwk.has_private:
issues = [
Issue(f'Key with kid={kid} contains private key material', IssueType.KEY_CONTAINS_PRIVATE_MATERIAL)
]
return [False, issues]
is_valid = True
issues = []
## check that use matches expected use
if kid != jwk.thumbprint():
is_valid = False
issues = [
Issue(f'Key with kid={kid} has an incorrect kid value. It should be {jwk.thumbprint()}', IssueType.KID_IS_INCORRECT)
]
return [False, issues]
if jwk_dict.get('use') != EXPECTED_KEY_USE:
is_valid = False
issues.append(
Issue(f'Key with kid={kid} has an incorrect key use. It should be \"{EXPECTED_KEY_USE}\"', IssueType.KEY_USE_IS_INCORRECT)
)
if jwk_dict.get('alg') != EXPECTED_KEY_ALG:
is_valid = False
issues.append(
Issue(f'Key with kid={kid} has an incorrect key alg. It should be \"{EXPECTED_KEY_ALG}\"', IssueType.KEY_ALG_IS_INCORRECT)
)
return [is_valid, issues]
def validate_keyset(jwks_dict) -> Tuple[bool, List[Issue]]:
'''
Validates a JWKS represented by jwks_dict
Ensures that at least one key is fully valid for signing and that NO keys contains errors (warnings are ok)
'''
try:
keys = jwks_dict['keys']
except:
issues = [
Issue(f'\"keys\" property missing from jwks.json', IssueType.KEYS_PROPERTY_MISSING)
]
return [False, issues]
if len(keys) == 0:
issues = [
Issue(f'jwks.json contains no keys', IssueType.KEYS_PROPERTY_EMPTY)
]
return [False, issues]
at_least_one_valid_keyset = False
keyset_issues = []
for key in keys:
(is_valid, issues) = validate_key(key)
at_least_one_valid_keyset = at_least_one_valid_keyset or is_valid
keyset_issues.extend(issues)
errors = [issue for issue in keyset_issues if issue.type.level == IssueLevel.ERROR]
keyset_is_valid = at_least_one_valid_keyset and len(errors) == 0
return [keyset_is_valid, keyset_issues]
def validate_response_headers(
response_headers: any,
) -> List[Issue]:
'''
Validates response headers from the jwks.json fetch
Ensures that CORS headers are configured properly
'''
acao_header = response_headers.get(CORS_ACAO_HEADER)
if acao_header == None or len(acao_header) == 0:
issues = [
Issue(f'{CORS_ACAO_HEADER} header is missing', IssueType.CORS_HEADER_MISSING)
]
return issues
elif acao_header == CORS_ACAO_HEADER_ALL or acao_header == FETCH_REQUEST_ORIGIN:
return []
else:
issues = [
Issue(f'{CORS_ACAO_HEADER} header is incorrect. Expected {CORS_ACAO_HEADER_ALL} or {FETCH_REQUEST_ORIGIN}, but got {acao_header}', IssueType.CORS_HEADER_INCORRECT)
]
return issues
async def fetch_jwks(
jwks_url: str,
retry_count: int = 0
) -> Any:
try:
async with httpx.AsyncClient() as client:
headers = {
'User-Agent': USER_AGENT,
'Origin': FETCH_REQUEST_ORIGIN
}
res = await client.get(jwks_url, headers=headers, follow_redirects=True)
res.raise_for_status()
return (res.json(), res.headers)
except BaseException as ex:
if retry_count < MAX_FETCH_RETRY_COUNT:
## Add exponential backoff, starting with 1s
delay_seconds = pow(FETCH_RETRY_COUNT_DELAY, retry_count)
await asyncio.sleep(delay_seconds)
return await fetch_jwks(
jwks_url,
retry_count = retry_count + 1
)
else:
raise ex
async def validate_website(
website_url: str,
retry_count: int = 0
) -> Tuple[bool, List[Issue]]:
try:
async with httpx.AsyncClient() as client:
headers = {'User-Agent': USER_AGENT}
res = await client.get(website_url, headers=headers, follow_redirects=True)
res.raise_for_status()
except BaseException as ex:
if retry_count < MAX_FETCH_RETRY_COUNT:
## Add exponential backoff, starting with 1s
delay_seconds = pow(FETCH_RETRY_COUNT_DELAY, retry_count)
await asyncio.sleep(delay_seconds)
return await validate_website(
website_url,
retry_count = retry_count + 1
)
else:
raise ex
async def validate_issuer(
issuer_entry: IssuerEntry
) -> Tuple[bool, List[Issue]]:
iss = issuer_entry.iss
if iss.endswith('/'):
issues = [
Issue(f'{iss} ends with a trailing slash', IssueType.ISS_ENDS_WITH_TRAILING_SLASH)
]
return (False, issues)
else:
jwks_url = f'{iss}/.well-known/jwks.json'
try:
(jwks, response_headers) = await fetch_jwks(jwks_url)
headers_issues = validate_response_headers(response_headers)
header_errors = [issue for issue in headers_issues if issue.type.level == IssueLevel.ERROR]
headers_are_valid = len(header_errors) == 0
(keyset_is_valid, keyset_issues) = validate_keyset(jwks)
is_valid = headers_are_valid and keyset_is_valid
issues = headers_issues + keyset_issues
return (is_valid, issues)
except BaseException as ex:
issues = [
Issue(f'An exception occurred when fetching {jwks_url}: {ex}', IssueType.FETCH_EXCEPTION)
]
return (False, issues)
async def validate_entry(
issuer_entry: IssuerEntry,
entry_map: Dict[str, IssuerEntry],
semaphore: BoundedSemaphore
) -> ValidationResult:
async with semaphore:
print('.', end='', flush=True)
(iss_is_valid, iss_issues) = await validate_issuer(issuer_entry)
website_is_valid = True
website_issues = []
if issuer_entry.website:
try:
await validate_website(issuer_entry.website)
except BaseException as e:
website_is_valid = False
website_issues.append(
Issue(f'An exception occurred when fetching {issuer_entry.website}', IssueType.WEBSITE_DOES_NOT_RESOLVE)
)
canonical_iss_is_valid = True
canonical_iss_issues = []
if issuer_entry.canonical_iss:
## check that canonical_iss does not reference itself
if issuer_entry.iss == issuer_entry.canonical_iss:
canonical_iss_is_valid = False
canonical_iss_issues.append(
Issue('canonical_iss references iss in this entry', IssueType.CANONICAL_ISS_SELF_REFERENCE)
)
## check that canonical_iss is included in the list
elif issuer_entry.canonical_iss not in entry_map:
canonical_iss_is_valid = False
canonical_iss_issues.append(
Issue(f'canonical_iss {issuer_entry.canonical_iss} not found in the directory', IssueType.CANONICAL_ISS_REFERENCE_INVALID)
)
else:
## check that canonical_iss does not refer to another entry that has canonical_iss defined
canonical_entry = entry_map[issuer_entry.canonical_iss]
if canonical_entry.canonical_iss:
canonical_iss_is_valid = False
canonical_iss_issues.append(
Issue(f'canonical_iss {issuer_entry.canonical_iss} refers to an entry with a canonical_iss value', IssueType.CANONICAL_ISS_MULTIHOP_REFERENCE)
)
is_valid = iss_is_valid and website_is_valid and canonical_iss_is_valid
issues = iss_issues + website_issues + canonical_iss_issues
return ValidationResult(
issuer_entry,
is_valid,
issues
)
async def validate_all_entries(
entries: List[IssuerEntry]
) -> List[ValidationResult]:
entry_map = {entry.iss: entry for entry in entries}
asyncio_semaphore = asyncio.BoundedSemaphore(50)
aws = [validate_entry(issuer_entry, entry_map, asyncio_semaphore) for issuer_entry in entries]
return await asyncio.gather(
*aws
)
def validate_entries(
entries: List[IssuerEntry]
) -> List[ValidationResult]:
results = asyncio.run(validate_all_entries(entries))
print('')
return results
def duplicate_entries(
entries: List[IssuerEntry]
) -> List[IssuerEntry]:
seen_set = set()
duplicate_set = set()
for entry in entries:
if entry.iss in seen_set:
duplicate_set.add(entry.iss)
else:
seen_set.add(entry.iss)
duplicate_list = [entry for entry in entries if entry.iss in duplicate_set]
duplicate_list.sort(key=lambda x: x.iss)
return duplicate_list
def analyze_results(
validation_results: List[ValidationResult],
show_errors_and_warnings: bool,
show_warnings: bool,
cors_issue_is_error: bool = False
) -> bool:
is_valid = True
for result in validation_results:
## Remove this once CORS issues are marked errors
if cors_issue_is_error:
for issue in result.issues:
if issue.type == IssueType.CORS_HEADER_MISSING or issue.type == IssueType.CORS_HEADER_INCORRECT:
is_valid = False
print(f'{result.issuer_entry.iss}: {issue.description}')
errors = [issue for issue in result.issues if issue.type.level == IssueLevel.ERROR]
assert(result.is_valid == (len(errors) == 0))
if not result.is_valid:
is_valid = False
if show_errors_and_warnings:
print(f'{result.issuer_entry.iss} is INVALID')
for error | |
<reponame>ecaroth/blender-mini-base-tools-plugin
bl_info = {
"name": "EC3D Base Tools",
"version": (1, 0, 0),
"blender": (2, 80, 0),
"location": "3D View > Sidebar",
"category": "Object"
}
import bpy, bmesh, math, os, mathutils
from bpy_extras.io_utils import ExportHelper
BOTTOM_TOLERANCE = 0.05
BOTTOM_MERGE_VERTS_DISTANCE = .01
BASE_BEVEL_DEPTH = .7
SIMPLE_BEVEL_SHRINK_DISTANCE = .5
BOTTOM_TRIM_VALUE_SHORT = .05
BOTTOM_TRIM_VALUE_TALL = .1
# ------- UI --------
class VIEW3D_PT_EC3D_Bases_Tools_Panel(bpy.types.Panel):
bl_label = "EC3D Base Tools"
bl_category = "Bases"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
@classmethod
def poll(cls, context):
obj = context.active_object
return obj is not None and obj.type == 'MESH' and obj.mode in {'OBJECT', 'EDIT'}
def draw(self, context):
layout = self.layout
layout.label(text="RESIN BASE TOOLS")
col1 = layout.column(align=True)
col1.operator("ec3d_bases.bevel_simple", text="Simple Bevel", icon='MOD_BEVEL')
col1.operator("ec3d_bases.bevel_simple_additive", text="Simple Bevel (Additive)", icon='MOD_BEVEL')
col1.operator("ec3d_bases.bevel_fancy_small", text="Channeled Bevel (1 inch)", icon='OUTLINER_OB_MESH')
col1.operator("ec3d_bases.bevel_fancy_small_additive", text="Channeled Bevel (1 inch, Additive)", icon='OUTLINER_OB_MESH')
col1.operator("ec3d_bases.bevel_fancy_large", text="Channeled Bevel (2+ inch)", icon='OUTLINER_OB_MESH')
col1.operator("ec3d_bases.bevel_fancy_large_additive", text="Channeled Bevel (2+ inch, Additive)", icon='OUTLINER_OB_MESH')
layout.label(text="GENERAL")
col2 = layout.column(align=True)
#col2.operator("ec3d_bases.fix_bottom", text="Fix bottom", icon='TRIA_DOWN_BAR')
col2.operator("ec3d_bases.trim_bottom_small", text="Trim bottom (small)", icon='TRIA_DOWN_BAR')
col2.operator("ec3d_bases.trim_bottom_large", text="Trim bottom (large)", icon='TRIA_DOWN_BAR')
col3 = layout.column(align=True)
col3.operator("ec3d_bases.export_to_stl", text="STL Export", icon='FILE_NEW')
export_folder = context.scene.ec3d.export_path
if export_folder != "//":
# truncate to last 3 dirs
dirs = export_folder.split(os.sep)
shorter = os.sep.join(dirs[-3:])
layout.label(text="> " + shorter)
col3.operator("ec3d.export_repeat", text="Repeat Export", icon='RECOVER_LAST')
# ------- HELPER FUNCTIONS -----
def bottomZ(obj):
bottom_z = 10000
for v in obj.data.vertices:
z = gco(obj, v.co)[2]
if z < bottom_z:
bottom_z = z
return bottom_z
def topZ(obj):
top_z = -10000
for v in obj.data.vertices:
z = v.co[2]
if z > top_z:
top_z = z
return top_z
# get global coordss
def gco(obj, co):
return obj.matrix_world @ co
def fixBottom(obj, remove_depth=None):
# make sure scale and rotation are applied or numbers won't work right
bpy.ops.object.transform_apply(rotation=True, scale=True)
bottom_z = bottomZ(obj)
bm = bmesh.new()
bm.from_mesh(obj.data)
modified = 0
if remove_depth:
bottom_z = bottom_z + remove_depth
# move all verts below the remove depth UP to that depth
for v in bm.verts:
if v.co[2] < bottom_z:
v.co[2] = bottom_z
# Fix any close tolerance verts to bottom Z
bottom_verts = []
bottom_edges = []
for v in bm.verts:
if (bottom_z + BOTTOM_TOLERANCE) > v.co[2] and bottom_z != v.co[2]:
modified += 1
v.co[2] = bottom_z
# select all verts and do merge by distance, then reslect only bottom verts that are left
bmesh.ops.remove_doubles(bm, verts=bottom_verts, dist=BOTTOM_MERGE_VERTS_DISTANCE)
for v in bm.verts:
if v.co[2] == bottom_z:
bottom_verts.append(v)
# now select all edges and perform limited dissolve
for edge in bm.edges:
if edge.verts[0].co[2] == bottom_z and edge.verts[1].co[2] == bottom_z:
bottom_verts.append(edge.verts[0])
bottom_verts.append(edge.verts[1])
bottom_edges.append(edge)
# Then limited dissolve bottom
bmesh.ops.dissolve_limit(bm, angle_limit=math.radians(1), verts=list(set(bottom_verts)), edges=list(set(bottom_edges)))
bm.to_mesh(obj.data)
obj.data.update()
return modified
def exportToFolder(context, filepath, add_folder=None):
# Path comes in with /path/blah/whatever.stl or as just a dir
save_to = filepath
if filepath.endswith(".stl"):
save_to = os.path.dirname(filepath)
if add_folder:
save_to = os.path.join(save_to, add_folder)
if not os.path.isdir(save_to):
os.makedirs(save_to)
context.scene.ec3d.export_path = save_to
print("NEW LOCATION = " + save_to)
orig_selection = context.selected_objects
for obj in orig_selection:
bpy.ops.object.select_all(action='DESELECT')
fname = obj.name
fpath = os.path.join(save_to, fname + ".stl")
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.ops.export_mesh.stl(filepath=fpath, check_existing=False, use_selection=True)
# reselect
for obj in orig_selection:
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
return len(orig_selection)
def duplicate(context, name_append=None):
orig_name = context.object.name
bpy.ops.object.duplicate(linked=False)
new_obj = context.object
# set origin to center of geometry
bpy.ops.object.origin_set(type="ORIGIN_GEOMETRY")
# rename it appropriately
if name_append:
new_obj.name = orig_name+" ["+name_append+"]"
# move it X to the width of the obj
width = new_obj.dimensions[0]
new_obj.location.x = new_obj.location.x-width
return new_obj
def scaleCubeToChanne(obj):
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type="VERT")
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.transform.resize(value=(1.0, 100, .8))
top_z = topZ(obj)
selected = []
for v in obj.data.vertices:
if v.co[2] == top_z:
v.select = True
selected.append(v)
else:
v.select = False
bpy.ops.object.mode_set(mode='EDIT')
# now that all verts are selected scale 50% on X axis
bpy.ops.transform.resize(value=(.04, 1.0, 1.0))
bpy.ops.object.mode_set(mode='OBJECT')
def selectBottomVerts(context, obj):
#NOTE this operation assumes fix_bottom has been run, so if not you might miss many vertices
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type="VERT")
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
bottom_z = bottomZ(obj)
selected = []
for v in obj.data.vertices:
if v.co[2] == bottom_z:
v.select = True
selected.append(v)
else:
v.select = False
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type="VERT")
context.view_layer.objects.active = context.view_layer.objects.active
return selected
def basicBevel(context, additive=False):
bpy.ops.object.mode_set(mode='OBJECT')
obj = duplicate(context, 'simple_base_bevel')
fixBottom(obj, remove_depth=None if additive else BASE_BEVEL_DEPTH)
# select bottom verts, then put 3D cursor to center of selected
selectBottomVerts(context, obj)
# now extrude down the right distance
bpy.ops.mesh.extrude_region_move(TRANSFORM_OT_translate={"value": (0.0, 0.0, 0 - BASE_BEVEL_DEPTH)})
obj.data.update()
selected_verts = [v for v in obj.data.vertices if v.select]
coords = []
for vert in selected_verts:
coords.append([vert.co[0], vert.co[1]])
x_coordinates, y_coordinates = zip(*coords)
bounding_box = [(min(x_coordinates), min(y_coordinates)), (max(x_coordinates), max(y_coordinates))]
# rudimentary, we're just gonna find the width of the bounding box, an scale down till its roughly th size
# we want based on SIMPLE_BEVEL_SHRINK_DISTANCE
width = abs(bounding_box[1][0] - bounding_box[0][0])
target_width = width - (SIMPLE_BEVEL_SHRINK_DISTANCE * 2)
scale = target_width / width
bpy.ops.transform.resize(value=(scale, scale, 1.0))
obj.data.update()
# now snap cursor to bottom center
bpy.ops.view3d.snap_cursor_to_selected()
bpy.ops.object.mode_set(mode='OBJECT')
return obj
def channelCutout(context, obj, is_large=False):
# we know cursor is at the center bottom due to previous op (fixBottom)
cursor = bpy.context.scene.cursor.location
# Create center sphere
bpy.ops.mesh.primitive_uv_sphere_add(segments=50, ring_count=25, radius=3.5,
location=(cursor[0], cursor[1], cursor[2] - 2))
sphere = bpy.context.active_object
sphere.name = "_basetemp_center"
# Create first cube (cutout)
bpy.ops.mesh.primitive_cube_add(size=2.2, location=(cursor[0], cursor[1], cursor[2] + .5))
cube1 = bpy.context.active_object
cube1.name = "_basetemp_cube1"
scaleCubeToChanne(cube1)
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
# Duplicate cube and rotate 90 degrees
bpy.ops.object.duplicate(linked=False)
cube2 = bpy.context.active_object
cube2.name = "_basetemp_cube2"
bpy.ops.transform.rotate(value=math.radians(90), orient_axis='Z', orient_type='GLOBAL')
if is_large:
bpy.ops.object.duplicate(linked=False)
cube3 = bpy.context.active_object
cube3.name = "_basetemp_cube3"
bpy.ops.transform.translate(value=(0.0,0.0,.01))
bpy.ops.transform.rotate(value=math.radians(45), orient_axis='Z', orient_type='GLOBAL')
bpy.ops.object.duplicate(linked=False)
cube4 = bpy.context.active_object
cube4.name = "_basetemp_cube4"
bpy.ops.transform.rotate(value=math.radians(90), orient_axis='Z', orient_type='GLOBAL')
# Create 3rd cube to cut top flat where we want it
cube_size = 200
bpy.ops.mesh.primitive_cube_add(size=cube_size, location=(cursor[0], cursor[1], cursor[2] + (cube_size/2) + BASE_BEVEL_DEPTH - .01))
cut_cube = bpy.context.active_object
cut_cube.name = "_basetemp_cutcube"
# Now create all modifiers
cube_1_add_mod = "_basetemp_mod1"
bool_add1 = sphere.modifiers.new(type="BOOLEAN", name=cube_1_add_mod)
bool_add1.object = cube1
bool_add1.operation = 'UNION'
cube_2_add_mod = "_basetemp_mod2"
bool_add1 = sphere.modifiers.new(type="BOOLEAN", name=cube_2_add_mod)
bool_add1.object = cube2
bool_add1.operation = 'UNION'
if is_large:
cube_3_add_mod = "_basetemp_mod2"
bool_add1 = sphere.modifiers.new(type="BOOLEAN", name=cube_3_add_mod)
bool_add1.object = cube3
bool_add1.operation = 'UNION'
cube_4_add_mod = "_basetemp_mod2"
bool_add1 = sphere.modifiers.new(type="BOOLEAN", name=cube_4_add_mod)
bool_add1.object = cube4
bool_add1.operation = 'UNION'
cut_cube_cut_mod = "_basetemp_mod3"
bool_cut1 = sphere.modifiers.new(type="BOOLEAN", name=cut_cube_cut_mod)
bool_cut1.object = cut_cube
bool_cut1.operation = 'DIFFERENCE'
obj_cut_mod = "_basetemp_mod4"
bool_cut2 = obj.modifiers.new(type="BOOLEAN", name=obj_cut_mod)
bool_cut2.object = sphere
bool_cut1.operation = 'DIFFERENCE'
# Now apply modifier for final channel cut obj to base
context.view_layer.objects.active = obj
bpy.ops.object.modifier_apply(modifier=obj_cut_mod)
# Finally delete all the temp objects
obj.select_set(False)
sphere.select_set(True)
cube1.select_set(True)
cube2.select_set(True)
if is_large:
cube3.select_set(True)
cube4.select_set(True)
cut_cube.select_set(True)
bpy.ops.object.delete()
obj.select_set(True)
context.view_layer.objects.active = obj
def trimBottom(context, obj, remove):
bottom_z = bottomZ(obj)
print("BOTTOM Z IS "+str(bottom_z))
bpy.ops.object.origin_set(type="ORIGIN_GEOMETRY")
bpy.ops.view3d.snap_cursor_to_active()
cursor = bpy.context.scene.cursor.location
# Create cube to cut bottom flat with
cube_size = 200
bpy.ops.mesh.primitive_cube_add(size=cube_size, location=(
cursor[0], cursor[1], bottom_z - (cube_size/2) + remove ))
cut_cube = bpy.context.active_object
cut_cube.name = "_basetemp_cutcube"
# add modifier
obj_cut_mod = "_basetemp_trimmod"
bool_cut1 = obj.modifiers.new(type="BOOLEAN", name=obj_cut_mod)
bool_cut1.object = cut_cube
bool_cut1.operation = 'DIFFERENCE'
context.view_layer.objects.active = obj
bpy.ops.object.modifier_apply(modifier=obj_cut_mod)
# Finally delete all the temp objects
obj.select_set(False)
cut_cube.select_set(True)
bpy.ops.object.delete()
obj.select_set(True)
context.view_layer.objects.active = obj
# ------- OPERATORS ------
class OP_ExportRepeat(bpy.types.Operator):
bl_idname = "ec3d_bases.export_repeat"
bl_label = "Export Again"
bl_description = "Repeat export to last destination"
bl_options = {'REGISTER'}
def execute(self, context):
cnt = exportToFolder(context, context.scene.ec3d.export_path)
self.report({"INFO"}, "%s files exported!" % cnt)
return {'FINISHED'}
class OP_ExportToSTL(bpy.types.Operator, ExportHelper):
bl_idname = "ec3d_bases.export_to_stl"
bl_label = "Export Here"
bl_description = "Export selected object to STL file"
bl_options = {'REGISTER'}
filename_ext = ".stl"
filter_glob: bpy.props.StringProperty(
default="*.stl",
options={'HIDDEN'},
maxlen=255, # Max internal buffer length, longer would be clamped.
)
def execute(self, context):
cnt = exportToFolder(context, self.filepath)
self.report({"INFO"}, "%s files exported!" % cnt)
return {'FINISHED'}
class OP_FixBottom(bpy.types.Operator):
bl_idname = "ec3d_bases.fix_bottom"
bl_label = "Fix bottom variance"
bl_description = "Attempt to fix/cleanup the flat bottom of a model"
bl_options = {'REGISTER', 'UNDO'} # Enable undo for the operator.
def execute(self, context):
if len(context.selected_objects) != 1:
self.report({"WARNING"}, "No object selected")
return {"CANCELLED"}
bpy.ops.object.mode_set(mode='OBJECT')
obj = context.view_layer.objects.active
updated = fixBottom(obj)
self.report({"INFO"}, 'Bottom fixed, updated %s verts' % updated )
return {'FINISHED'}
class OP_TrimBottomSmall(bpy.types.Operator):
bl_idname = "ec3d_bases.trim_bottom_small"
bl_label = "Trim Bottom (small)"
bl_description = "Trim the bottom of model, a little, to try and make it completely flat"
bl_options = {'REGISTER', 'UNDO'} # Enable undo for the operator.
def execute(self, context):
if len(context.selected_objects) != 1:
self.report({"WARNING"}, "No object selected")
return {"CANCELLED"}
bpy.ops.object.mode_set(mode='OBJECT')
obj = context.view_layer.objects.active
trimBottom(context, obj, BOTTOM_TRIM_VALUE_SHORT)
self.report({"INFO"}, 'Bottom trimmed' )
return {'FINISHED'}
class OP_TrimBottomLarge(bpy.types.Operator):
bl_idname = "ec3d_bases.trim_bottom_large"
bl_label | |
# This file is a part of DeerLab. License is MIT (see LICENSE.md).
# Copyright(c) 2019-2021: <NAME>, <NAME> and other contributors.
import numpy as np
from deerlab.utils import Jacobian, nearest_psd
from scipy.stats import norm
from scipy.signal import fftconvolve
from scipy.linalg import block_diag
from scipy.optimize import brentq
from scipy.interpolate import interp1d
import copy
class FitResult(dict):
# ========================================================================
r""" Represents the results of a fit.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
cost : float
Value of the cost function at the solution.
residuals : ndarray
Vector of residuals at the solution.
stats : dict
Goodness of fit statistical estimators:
* ``stats['chi2red']`` - Reduced \chi^2 test
* ``stats['r2']`` - R^2 test
* ``stats['rmsd']`` - Root-mean squared deviation (RMSD)
* ``stats['aic']`` - Akaike information criterion
* ``stats['aicc']`` - Corrected Akaike information criterion
* ``stats['bic']`` - Bayesian information criterion
Methods
-------
plot()
Display the fit results on a Matplotlib window. The script returns a
`matplotlib.axes <https://matplotlib.org/api/axes_api.html>`_ object.
All graphical parameters can be adjusted from this object.
Notes
-----
There may be additional attributes not listed above depending of the
specific fit function. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
# =========================================================================
class UQResult:
# =========================================================================
r""" Represents the uncertainty quantification of fit results.
Attributes
----------
type : string
Uncertainty quantification approach:
* 'covariance' - Covariance-based uncertainty analysis
* 'bootstrap' - Bootstrapped uncertainty analysis
mean : ndarray
Mean values of the uncertainty distribution of the parameters.
median : ndarray
Median values of the uncertainty distribution of the parameters.
std : ndarray
Standard deviations of the uncertainty distribution of the parameters.
covmat : ndarray
Covariance matrix
nparam : int scalar
Number of parameters in the analysis.
Methods
-------
"""
def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None,threshold=None,profiles=None,noiselvl=None):
#Parse inputs schemes
if uqtype=='covariance':
# Scheme 1: UQResult('covariance',parfit,covmat,lb,ub)
self.type = uqtype
parfit = data
nParam = len(parfit)
elif uqtype == 'profile':
# Scheme 2: UQResult('profile',profiles)
if not isinstance(profiles,list):
profiles = [profiles]
self.type = uqtype
self.__parfit = data
self.__noiselvl = noiselvl
self.profile = profiles
self.threshold = threshold
nParam = len(np.atleast_1d(data))
elif uqtype == 'bootstrap':
# Scheme 2: UQResult('bootstrap',samples)
self.type = uqtype
samples = data
self.samples = samples
nParam = np.shape(samples)[1]
elif uqtype=='void':
# Scheme 2: UQResult('void')
self.type = uqtype
self.mean, self.median, self.std, self.covmat, self.nparam = ([] for _ in range(5))
return
else:
raise NameError('uqtype not found. Must be: ''covariance'', ''bootstrap'' or ''void''.')
if lb is None:
lb = np.full(nParam, -np.inf)
if ub is None:
ub = np.full(nParam, np.inf)
# Set private variables
self.__lb = lb
self.__ub = ub
self.nparam = nParam
# Create confidence intervals structure
if uqtype=='covariance':
self.mean = parfit
self.median = parfit
self.std = np.sqrt(np.diag(covmat))
self.covmat = covmat
# Profile-based CI specific fields
elif uqtype == 'profile':
xs = [self.pardist(n)[0] for n in range(nParam)]
pardists = [self.pardist(n)[1] for n in range(nParam)]
means = [np.trapz(pardist*x,x) for x,pardist in zip(xs,pardists)]
std = [np.sqrt(np.trapz(pardist*(x-mean)**2,x)) for x,pardist,mean in zip(xs,pardists,means)]
self.mean = means
self.median = self.percentile(50)
self.std = std
self.covmat = np.diag(np.array(std)**2)
# Bootstrap-based CI specific fields
elif uqtype == 'bootstrap':
means = np.mean(samples,0)
covmat = np.squeeze(samples)[email protected](samples)/np.shape(samples)[0] - means*means.T
self.mean = means
self.median = self.percentile(50)
self.std = np.squeeze(np.std(samples,0))
self.covmat = covmat
# Gets called when an attribute is accessed
#--------------------------------------------------------------------------------
def __getattribute__(self, attr):
try:
# Calling the super class to avoid recursion
if attr!='type' and super(UQResult, self).__getattribute__('type') == 'void':
# Check if the uncertainty quantification has been done, if not report that there is nothing in the object
raise ValueError('The requested attribute/method is not available. Uncertainty quantification has not been calculated during the fit by using the `uq=None` keyword.')
except AttributeError:
# Catch cases where 'type' attribute has still not been defined (e.g. when using copy.deepcopy)
pass
# Otherwise return requested attribute
return super(UQResult, self).__getattribute__(attr)
#--------------------------------------------------------------------------------
# Combination of multiple uncertainties
#--------------------------------------------------------------------------------
def join(self,*args):
"""
Combine multiple uncertainty quantification instances.
Parameters
----------
uq : any number of :ref:`UQResult`
Uncertainty quantification objects with ``N1,N2,...,Nn`` parameters to be joined
to the object calling the method with ``M`` parameters.
Returns
-------
uq_joined : :ref:`UQResult`
Joined uncertainty quantification object with a total of ``M + N1 + N2 + ... + Nn`` parameters.
The parameter vectors are concatenated on the order they are passed.
"""
# Original metadata
mean = self.mean
covmat = self.covmat
lbm = self.__lb
ubm = self.__ub
for uq in args:
if not isinstance(uq, UQResult):
raise TypeError('Only UQResult objects can be joined.')
if uq.type=='void':
raise TypeError('Void UQResults cannot be joined.')
# Concatenate metadata of external UQResult objects
mean = np.concatenate([mean, uq.mean])
covmat = block_diag(covmat, uq.covmat)
lbm = np.concatenate([lbm, uq.__lb])
ubm = np.concatenate([ubm, uq.__ub])
# Return new UQResult object with combined information
return UQResult('covariance',mean,covmat,lbm,ubm)
#--------------------------------------------------------------------------------
# Parameter distributions
#--------------------------------------------------------------------------------
def pardist(self,n=0):
"""
Generate the uncertainty distribution of the n-th parameter
Parameters
----------
n : int scalar
Index of the parameter
Returns
-------
ax : ndarray
Parameter values at which the distribution is evaluated
pdf : ndarray
Probability density function of the parameter uncertainty.
"""
if n > self.nparam or n < 0:
raise ValueError('The input must be a valid integer number.')
if self.type == 'covariance':
# Generate Gaussian distribution based on covariance matrix
sig = np.sqrt(self.covmat[n,n])
xmean = self.mean[n]
x = np.linspace(xmean-4*sig,xmean+4*sig,500)
pdf = 1/sig/np.sqrt(2*np.pi)*np.exp(-((x-xmean)/sig)**2/2)
if self.type == 'bootstrap':
# Get bw using silverman's rule (1D only)
samplen = self.samples[:, n].real
if np.all(samplen == samplen[0]):
# Dirac's delta distribution
x = np.array([0.9*samplen[0],samplen[0],1.1*samplen[0]])
pdf = np.array([0,1,0])
else:
sigma = np.std(samplen, ddof=1)
bw = sigma*(len(samplen)*3/4.0)**(-1/5)
# Make histogram
maxbin = np.maximum(np.max(samplen),np.mean(samplen)+3*sigma)
minbin = np.minimum(np.min(samplen),np.mean(samplen)-3*sigma)
bins = np.linspace(minbin,maxbin, 2**10 + 1)
count, edges = np.histogram(samplen, bins=bins)
# Generate kernel
delta = np.maximum(np.finfo(float).eps,(edges.max() - edges.min()) / (len(edges) - 1))
kernel_x = np.arange(-4*bw, 4*bw + delta, delta)
kernel = norm(0, bw).pdf(kernel_x)
# Convolve
pdf = fftconvolve(count, kernel, mode='same')
# Set x coordinate of pdf to midpoint of bin
x = edges[:-1] + delta
if self.type=='profile':
if not isinstance(self.profile,list) and n==0:
profile = self.profile
else:
profile = self.profile[n]
σ = self.__noiselvl
obj2likelihood = lambda f: 1/np.sqrt(σ*2*np.pi)*np.exp(-1/2*f/σ**2)
profileinterp = interp1d(profile['x'], profile['y'], kind='slinear', fill_value=1e6,bounds_error=False)
x = np.linspace(np.min(profile['x']), np.max(profile['x']), 2**10 + 1)
pdf = obj2likelihood(profileinterp(x))
# Generate kernel
sigma = np.sum(x*pdf/np.sum(pdf))
bw = sigma*(1e12*3/4.0)**(-1/5)
delta = np.maximum(np.finfo(float).eps,(x.max() - x.min()) / (len(x) - 1))
kernel_x = np.arange(-5*bw, 5*bw + delta, delta)
kernel = norm(0, bw).pdf(kernel_x)
# Convolve
pdf = fftconvolve(pdf, kernel, mode='same')
# Clip the distributions outside the boundaries
pdf[x < self.__lb[n]] = 0
pdf[x > self.__ub[n]] = 0
# Enforce non-negativity (takes care of negative round-off errors)
pdf = np.maximum(pdf,0)
# Ensure normalization of the probability density function
pdf = pdf/np.trapz(pdf, x)
return x, pdf
#--------------------------------------------------------------------------------
# Parameter percentiles
#--------------------------------------------------------------------------------
def percentile(self,p):
"""
Compute the p-th percentiles of the parameters uncertainty distributions
Parameters
----------
p : float scalar
Percentile (between 0-100)
Returns
-------
prctiles : ndarray
Percentile values of all parameters
"""
if p>100 or p<0:
raise ValueError('The input must be a number between 0 and 100')
x = np.zeros(self.nparam)
for n in range(self.nparam):
# Get parameter PDF
values,pdf = self.pardist(n)
# Compute corresponding CDF
cdf = np.cumsum(pdf)
cdf /= max(cdf)
# Eliminate duplicates
cdf, index = np.lib.arraysetops.unique(cdf,return_index=True)
# Interpolate requested percentile
x[n] = np.interp(p/100,cdf,values[index])
return x
#--------------------------------------------------------------------------------
# Covariance-based confidence intervals
#--------------------------------------------------------------------------------
def ci(self,coverage):
"""
Compute the confidence intervals for the parameters.
Parameters
----------
coverage : float scalar
Coverage (confidence level) of the confidence intervals (between 0-100)
Returns
-------
ci : 2D-ndarray
Confidence intervals for the parameters:
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
#####
Tools
#####
*Created on Thu Jul 2 10:07:56 2015 by <NAME>*
A set of tools to use with the `RDKit <http://rdkit.org>`_ in the IPython notebook.
"""
import time
import sys
import base64
import os
import os.path as op
import random
import csv
import gzip
import math
import pickle
from copy import deepcopy
from itertools import product
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Draw
import rdkit.Chem.Descriptors as Desc
# imports for similarity search
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit import DataStructs
from rdkit.SimDivFilters.rdSimDivPickers import MaxMinPicker
import rdkit.Chem.Scaffolds.MurckoScaffold as MurckoScaffold
try:
Draw.DrawingOptions.atomLabelFontFace = "DejaVu Sans"
Draw.DrawingOptions.atomLabelFontSize = 18
except KeyError: # Font "DejaVu Sans" is not available
pass
from PIL import Image, ImageChops
import numpy as np
from . import html_templates as html
try:
import ipywidgets as ipyw
WIDGETS = True
except ImportError:
WIDGETS = False
from IPython.core.display import HTML, display
if sys.version_info[0] > 2:
PY3 = True
from io import BytesIO as IO
else:
PY3 = False
from cStringIO import StringIO as IO
try:
from . import bokeh_tools as bkt
PLOT_TOOL = "bokeh"
except ImportError:
print(" * could not import Bokeh, plotting with Highcharts instead.")
PLOT_TOOL = "highcharts"
from . import hc_tools as hct
try:
from misc_tools import apl_tools as apt
AP_TOOLS = True
except ImportError:
AP_TOOLS = False
USE_FP = "morgan" # other options: "avalon", "default"
try:
# Try to import Avalon so it can be used for generation of 2d coordinates.
from rdkit.Avalon import pyAvalonTools as pyAv
USE_AVALON_2D = True
except ImportError:
print(" * Avalon not available. Using RDKit for 2d coordinate generation.")
USE_AVALON_2D = False
try:
from Contrib.SA_Score import sascorer
SASCORER = True
except ImportError:
print("* SA scorer not available. RDKit's Contrib dir needs to be in the Python import path...")
SASCORER = False
if AP_TOOLS:
#: Library version
VERSION = apt.get_commit(__file__)
# I use this to keep track of the library versions I use in my project notebooks
print("{:45s} (commit: {})".format(__name__, VERSION))
else:
print("{:45s} ({})".format(__name__, time.strftime("%y%m%d-%H:%M", time.localtime(op.getmtime(__file__)))))
if op.isfile("lib/jsme/jsme.nocache.js"):
JSME_LOCATION = "lib"
else:
print("- no local installation of JSME found, using web version.")
JSME_LOCATION = "http://peter-ertl.com/jsme/JSME_2017-02-26"
BGCOLOR = "#94CAEF"
IMG_GRID_SIZE = 235
# A list of partial property strings to use for ordering of properties:
DEFAULT_ORDER = ["_id", "supplier", "producer", "activity|pic50",
"hit", "actass", "pure_flag", "purity", "identity", "lcms"]
JSME_OPTIONS = {"css": ["css/style.css", "css/collapsible_list.css"],
"scripts": ["lib/jsme/jsme.nocache.js"]}
TBL_JAVASCRIPT = '''<script type="text/javascript">
function toggleCpd(cpdIdent)
{{
listPos = document.id_list{ts}.data.value.indexOf(cpdIdent);
cpdIdentCell = document.getElementById(cpdIdent+"_{ts}");
if (listPos == -1)
{{
if (document.id_list{ts}.remark.checked == true)
{{
rem = "\\t" + prompt("Remark (Enter for none):", "");
}}
else
{{
rem = "";
}}
document.id_list{ts}.data.value = document.id_list{ts}.data.value + cpdIdent + rem + "\\n";
cpdIdentCell.style.backgroundColor = "yellow";
}}
else
{{
removeStr = cpdIdent;
tempStr2 = document.id_list{ts}.data.value;
if (listPos > 0) {{
tempStr1 = tempStr2.substring(0, listPos);
tempStr2 = tempStr2.substring(listPos, tempStr2.length);
}} else {{
tempStr1 = "";
}}
listPos = tempStr2.indexOf("\\n");
if (listPos < tempStr2.length - 1) {{
tempStr1 = tempStr1 + tempStr2.substring(listPos+1, tempStr2.length)
}}
document.id_list{ts}.data.value = tempStr1;
cpdIdentCell.style.backgroundColor = "{bgcolor}";
}}
show_number_selected();
}}
function show_number_selected() {{
// display the number of selected compounds:
var count = (document.id_list{ts}.data.value.match(/\\n/g) || []).length;
document.getElementById("selection_title{ts}").innerHTML = "Selection (" + count + "):";
}}
function highlight_cpds() {{
// highlights compounds that were pasted into the selection list
// and keeps those that could be found
var lines = document.id_list{ts}.data.value.split("\\n");
var found = "";
for (var idx = 0; idx < lines.length; idx++) {{
var cpd = lines[idx];
var cpdIdentCell = document.getElementById(cpd+"_{ts}");
if (cpdIdentCell != null) {{
cpdIdentCell.style.backgroundColor = "yellow";
found = found + cpd + "\\n";
}}
}}
// set the value of the selection list to the found compound Ids
document.id_list{ts}.data.value = found;
show_number_selected();
}}
function myShowSelection() {{
document.location.hash = "#SelectionList";
}}
</script>
'''
ID_LIST = """<br><b><a name="SelectionList" id="selection_title{ts}">Selection (0):</a></b>
<form name="id_list{ts}">
<input type="checkbox" name="remark" value="prompt" > Prompt for Remarks<br>
<textarea name="data" cols="70" rows="10"></textarea>
<input type="button" name="highlight" value="highlight compounds" onclick="highlight_cpds()"
title="Paste a list of Compound_Ids here and press this button. The compounds will be highlighted in the report above. Compounds which were not found are removed from the list.">
</form>
"""
JSME_FORM = '''<script type="text/javascript" src="{jsme_loc}/jsme/jsme.nocache.js"></script>
<script type="text/javascript">
function jsmeOnLoad() {{
//arguments: HTML id, width, height (must be string not number!)
jsmeApplet{ts} = new JSApplet.JSME("appletContainer{ts}", "380px", "340px", {{
//optional parameters
"options" : "query,hydrogens"
}});
}}
function onSubmit() {{
var drawing = jsmeApplet{ts}.molFile();
// document.getElementById('jsme_smiles{ts}').value = drawing;
var command = '{var_name} = Chem.MolFromMolBlock("""' + drawing + '""")';
console.log("Executing Command: " + command);
var kernel = IPython.notebook.kernel;
kernel.execute(command);
}}
</script>
<table align="left" style="border: none;">
<tr style="border: none;">
<td id="appletContainer{ts}" style="border: none;"></td>
<td style="vertical-align: bottom; border: none;">
<button onclick="onSubmit()">done !</button>
</td>
</tr>
</table>
'''
class NoFieldTypes(Exception):
def __str__(self):
return repr("FieldTypeError: field types could not be extracted from Mol_List")
class Mol_List(list):
"""Enables display of molecule lists as HTML tables in IPython notebook just by-call
(via _repr_html)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.order = None
self.ia = False # wether the table and grid views are interactive or no
self.plot_tool = PLOT_TOOL
self.id_prop = None
self.recalc_needed = {}
self._set_recalc_needed()
def _pass_properties(self, new_list):
new_list.order = self.order
new_list.ia = self.ia
new_list.plot_tool = self.plot_tool
def __getitem__(self, item):
result = list.__getitem__(self, item)
try:
new_list = type(self)(result)
# pass on properties
self._pass_properties(new_list)
return new_list
except TypeError:
return result
def new(self, *args):
new_list = type(self)(*args)
# pass on properties
self._pass_properties(new_list)
return new_list
def _repr_html_(self):
id_prop = guess_id_prop(list_fields(self)) if self.ia else None
return mol_table(self, id_prop=id_prop, order=self.order)
def _set_recalc_needed(self):
"""Make sure that the expensive calculations are not done too often."""
self.len = len(self)
self.recalc_needed["plot_tool"] = PLOT_TOOL
keys = ["d", "fields", "field_types", "id_prop"]
for k in keys:
self.recalc_needed[k] = True
def _get_field_types(self):
"""Detect all the property field types.
Returns:
Dict with the property names as keys and the types as values."""
print(" > detecting field types...")
field_types = {}
if len(self) > 100:
sdf_sample = random.sample(self, len(self) // 2)
else:
sdf_sample = self
for mol in sdf_sample:
prop_names = mol.GetPropNames()
for prop in prop_names:
prop_type = "number"
prop_str = mol.GetProp(prop)
try:
float(prop_str)
if prop.lower().endswith("id"):
prop_type = "key"
except ValueError:
prop_type = "str"
if prop in field_types:
if field_types[prop] in ["number", "key"] and prop_type == "str":
# "str" overrides everything: if one string is among the values
# of a property, all values become type "str"
field_types[prop] = prop_type
else:
field_types[prop] = prop_type
if not field_types:
raise NoFieldTypes()
return field_types
def _calc_d(self):
self._d = {x: [] for x in self.fields}
self._d["mol"] = []
for mol in self:
if not mol: continue
if self.plot_tool == "bokeh":
img_tag = b64_img(mol)
else:
img_tag = '<img src="data:image/png;base64,{}" alt="Mol"/>'.format(b64_img(mol))
self._d["mol"].append(img_tag)
for prop in self.fields:
if mol.HasProp(prop):
self._d[prop].append(get_value(mol.GetProp(prop)))
else:
self._d[prop].append(np.nan)
def append(self, other):
self._set_recalc_needed()
super().append(other)
def extend(self, other):
self._set_recalc_needed()
super().extend(other)
def align(self, mol_or_smiles=None, in_place=True):
"""Align the Mol_list to the common substructure provided as Mol or Smiles.
Args:
mol_or_smiles (bool): The substructure to which to align.
If None, then the method uses rdFMCS to determine the MCSS
of the Mol_List."""
self.recalc_needed["d"] = True
if in_place:
align(self, mol_or_smiles)
else:
new_list = self.new()
for mol in self:
new_list.append(mol)
align(new_list, mol_or_smiles)
return new_list
def add_id(self, id_prop="molid"):
"""Add an Id property ``id_prop`` to the Mol_List.
By default, "molid" is used."""
for idx, mol in enumerate(self, 1): # start at index 1
mol.SetProp(id_prop, str(idx))
def write_sdf(self, fn, conf_id=-1):
"""Write Mol_List instance as SD File"""
writer = Chem.SDWriter(fn)
# try to save the column order
first_mol = True
for mol in self:
if first_mol:
order = None
try:
order = self.order
except AttributeError:
pass
if order:
mol.SetProp("order", ";".join(order))
try:
mol.GetConformer()
except ValueError: # no 2D coords... calculate them
mol.Compute2DCoords()
writer.write(mol, confId=conf_id)
# remove the order property again from mol_list
if first_mol:
first_mol = False
mol.ClearProp("order")
writer.close()
def write_csv(self, fn="mols.csv", props=None, include_smiles=True, isomeric=True):
"""Writes the Mol_List as a csv file to disk.
Parameters:
fn (str): Filename.
props (list[string]): An optional list of molecule properties to write.
If `props` is None, all props are written.
include_smiles (bool): If true, the Smiles will be calculated on the fly
and written to the csv.
isomeric (bool): If True, the generated Smiles will be isomeric."""
if props is None:
props = self.fields
if not isinstance(props, list):
props = [props]
csv_fields = props.copy()
if include_smiles:
csv_fields.append("Smiles")
with open(fn, "w") as f:
writer = csv.DictWriter(f, csv_fields, dialect="excel-tab")
writer.writeheader()
for mol in self:
row = {}
if include_smiles:
smi = Chem.MolToSmiles(mol, isomericSmiles=isomeric)
row["Smiles"] = smi
for prop in props:
if mol.HasProp(prop):
val = mol.GetProp(prop)
if val != "":
row[prop] = val
writer.writerow(row)
| |
offset_x, offset_y):
self.center = (self.center[0] + offset_x, self.center[1] + offset_y)
self.start = (self.start[0] + offset_x, self.start[1] + offset_y)
self.end = (self.end[0] + offset_x, self.end[1] + offset_y)
def rotate(self, angle, center=(0, 0)):
self.start_angle += angle
self.end_angle += angle
self.center = rotate_point(self.center, angle, center)
self.start = rotate_point(self.start, angle, center)
self.end = rotate_point(self.end, angle, center)
self.angle_regions = _normalize_angle(self.start_angle, self.end_angle)
def intersections_with_halfline(self, point_from, point_to, error_range):
intersection = \
_intersections_of_line_and_circle(
point_from, point_to, self.center, self.radius, error_range)
if intersection is None:
return []
else:
p1, p2, p1_angle, p2_angle, p1_t, p2_t = intersection
if is_equal_point(p1, self.start, error_range):
p1 = None
elif p2 is not None and is_equal_point(p2, self.start, error_range):
p2 = None
def is_contained(angle, region, error):
if angle >= region[0] - error and angle <= region[1] + error:
return True
if angle < 0 and region[1] > 0:
angle = angle + 2 * pi
elif angle > 0 and region[0] < 0:
angle = angle - 2 * pi
return angle >= region[0] - error and angle <= region[1] + error
aerror = error_range * self.radius
pts = []
if p1 is not None and p1_t >= 0 and not is_equal_point(p1, self.start, error_range):
for region in self.angle_regions:
if is_contained(p1_angle, region, aerror):
pts.append(p1)
break
if p2 is not None and p2_t >= 0 and not is_equal_point(p2, self.start, error_range):
for region in self.angle_regions:
if is_contained(p2_angle, region, aerror):
pts.append(p2)
break
return pts
def intersections_with_arc(self, center, radius, angle_regions, error_range):
x1 = center[0] - self.center[0]
y1 = center[1] - self.center[1]
r1 = self.radius
r2 = radius
cd_sq = x1 * x1 + y1 * y1
cd = sqrt(cd_sq)
rd = abs(r1 - r2)
if (cd >= 0 and cd <= rd) or cd >= r1 + r2:
return []
A = (cd_sq + r1 * r1 - r2 * r2) / 2
scale = sqrt(cd_sq * r1 * r1 - A * A) / cd_sq
xl = A * x1 / cd_sq
xr = y1 * scale
yl = A * y1 / cd_sq
yr = x1 * scale
pt1_x = xl + xr
pt1_y = yl - yr
pt2_x = xl - xr
pt2_y = yl + yr
pt1_angle1 = atan2(pt1_y, pt1_x)
pt1_angle2 = atan2(pt1_y - y1, pt1_x - x1)
pt2_angle1 = atan2(pt2_y, pt2_x)
pt2_angle2 = atan2(pt2_y - y1, pt2_x - x1)
aerror = error_range * self.radius
pts=[]
for region in self.angle_regions:
if pt1_angle1 >= region[0] and pt1_angle1 <= region[1]:
for region in angle_regions:
if pt1_angle2 >= region[0] - aerror and pt1_angle2 <= region[1] + aerror:
pts.append((pt1_x + self.center[0], pt1_y + self.center[1]))
break
break
for region in self.angle_regions:
if pt2_angle1 >= region[0] and pt2_angle1 <= region[1]:
for region in angle_regions:
if pt2_angle2 >= region[0] - aerror and pt2_angle2 <= region[1] + aerror:
pts.append((pt2_x + self.center[0], pt2_y + self.center[1]))
break
break
return pts
class DxfPolylineStatement(DxfStatement):
def __init__(self, entity):
super(DxfPolylineStatement, self).__init__(entity)
self.start = (self.entity.points[0][0], self.entity.points[0][1])
self.is_closed = self.entity.is_closed
if self.is_closed:
self.end = self.start
else:
self.end = (self.entity.points[-1][0], self.entity.points[-1][1])
def disassemble(self):
class Item:
pass
def ptseq():
for i in range(1, len(self.entity.points)):
yield i
if self.entity.is_closed:
yield 0
x0 = self.entity.points[0][0]
y0 = self.entity.points[0][1]
b = self.entity.bulge[0]
for idx in ptseq():
pt = self.entity.points[idx]
x1 = pt[0]
y1 = pt[1]
if b == 0:
item = Item()
item.dxftype = 'LINE'
item.start = (x0, y0)
item.end = (x1, y1)
item.is_closed = False
yield DxfLineStatement.from_entity(item)
else:
ang = 4 * atan(b)
xm = x0 + x1
ym = y0 + y1
t = 1 / tan(ang / 2)
xc = (xm - t * (y1 - y0)) / 2
yc = (ym + t * (x1 - x0)) / 2
r = sqrt((x0 - xc)*(x0 - xc) + (y0 - yc)*(y0 - yc))
rx0 = x0 - xc
ry0 = y0 - yc
rc = max(min(rx0 / r, 1.0), -1.0)
start_angle = acos(rc) if ry0 > 0 else 2 * pi - acos(rc)
start_angle *= 180 / pi
end_angle = start_angle + ang * 180 / pi
item = Item()
item.dxftype = 'ARC'
item.start = (x0, y0)
item.end = (x1, y1)
item.start_angle = start_angle
item.end_angle = end_angle
item.radius = r
item.center = (xc, yc)
item.is_closed = end_angle - start_angle >= 360
yield DxfArcStatement(item)
x0 = x1
y0 = y1
b = self.entity.bulge[idx]
def to_inch(self):
self.start = (inch(self.start[0]), inch(self.start[1]))
self.end = (inch(self.end[0]), inch(self.end[1]))
for idx in range(0, len(self.entity.points)):
self.entity.points[idx] = (
inch(self.entity.points[idx][0]), inch(self.entity.points[idx][1]))
def to_metric(self):
self.start = (metric(self.start[0]), metric(self.start[1]))
self.end = (metric(self.end[0]), metric(self.end[1]))
for idx in range(0, len(self.entity.points)):
self.entity.points[idx] = (
metric(self.entity.points[idx][0]), metric(self.entity.points[idx][1]))
def offset(self, offset_x, offset_y):
for idx in range(len(self.entity.points)):
self.entity.points[idx] = (
self.entity.points[idx][0] + offset_x, self.entity.points[idx][1] + offset_y)
def rotate(self, angle, center=(0, 0)):
for idx in range(len(self.entity.points)):
self.entity.points[idx] = rotate_point(self.entity.points[idx], angle, center)
class DxfStatements(object):
def __init__(self, statements, units, dcode=10, draw_mode=None, fill_mode=None):
if draw_mode is None:
draw_mode = DxfFile.DM_LINE
if fill_mode is None:
fill_mode = DxfFile.FM_TURN_OVER
self._units = units
self.dcode = dcode
self.draw_mode = draw_mode
self.fill_mode = fill_mode
self.pitch = inch(1) if self._units == 'inch' else 1
self.width = 0
self.error_range = inch(ACCEPTABLE_ERROR) if self._units == 'inch' else ACCEPTABLE_ERROR
self.statements = list(filter(
lambda i: not (isinstance(i, DxfLineStatement) and \
is_equal_point(i.start, i.end, self.error_range)),
statements
))
self.close_paths, self.open_paths = generate_paths(self.statements, self.error_range)
self.sorted_close_paths = []
self.polarity = True # True means dark, False means clear
@property
def units(self):
return _units
def _polarity_command(self, polarity=None):
if polarity is None:
polarity = self.polarity
return '%LPD*%' if polarity else '%LPC*%'
def _prepare_sorted_close_paths(self):
if self.sorted_close_paths:
return
for i in range(0, len(self.close_paths)):
for j in range(i + 1, len(self.close_paths)):
containee, container = judge_containment(
self.close_paths[i], self.close_paths[j], self.error_range)
if containee is not None:
containee.containers.append(container)
self.sorted_close_paths = sorted(self.close_paths, key=lambda path: len(path.containers))
def to_gerber(self, settings=FileSettings()):
def gerbers():
yield 'G75*'
yield self._polarity_command()
yield 'D{0}*'.format(self.dcode)
if self.draw_mode == DxfFile.DM_FILL:
yield 'G36*'
if self.fill_mode == DxfFile.FM_TURN_OVER:
self._prepare_sorted_close_paths()
polarity = self.polarity
level = 0
for path in self.sorted_close_paths:
if len(path.containers) > level:
level = len(path.containers)
polarity = not polarity
yield 'G37*'
yield self._polarity_command(polarity)
yield 'G36*'
yield path.to_gerber(settings)
else:
for path in self.close_paths:
yield path.to_gerber(settings)
yield 'G37*'
else:
pitch = self.pitch if self.draw_mode == DxfFile.DM_MOUSE_BITES else 0
for path in self.open_paths:
yield path.to_gerber(settings, pitch=pitch, width=self.width)
for path in self.close_paths:
yield path.to_gerber(settings, pitch=pitch, width=self.width)
return '\n'.join(gerbers())
def to_excellon(self, settings=FileSettings()):
if self.draw_mode == DxfFile.DM_FILL:
return
def drills():
pitch = self.pitch if self.draw_mode == DxfFile.DM_MOUSE_BITES else 0
for path in self.open_paths:
yield path.to_excellon(settings, pitch=pitch, width=self.width)
for path in self.close_paths:
yield path.to_excellon(settings, pitch=pitch, width=self.width)
return '\n'.join(drills())
def to_inch(self):
if self._units == 'metric':
self._units = 'inch'
self.pitch = inch(self.pitch)
self.width = inch(self.width)
self.error_range = inch(self.error_range)
for path in self.open_paths:
path.to_inch()
for path in self.close_paths:
path.to_inch()
def to_metric(self):
if self._units == 'inch':
self._units = 'metric'
self.pitch = metric(self.pitch)
self.width = metric(self.width)
self.error_range = metric(self.error_range)
for path in self.open_paths:
path.to_metric()
for path in self.close_paths:
path.to_metric()
def offset(self, offset_x, offset_y):
for path in self.open_paths:
path.offset(offset_x, offset_y)
for path in self.close_paths:
path.offset(offset_x, offset_y)
def rotate(self, angle, center=(0, 0)):
for path in self.open_paths:
path.rotate(angle, center)
for path in self.close_paths:
path.rotate(angle, center)
class DxfFile(CamFile):
DM_LINE = 0
DM_FILL = 1
DM_MOUSE_BITES = 2
FM_SIMPLE = 0
FM_TURN_OVER = 1
FT_RX274X = 0
FT_EXCELLON = 1
@classmethod
def from_dxf(cls, dxf, settings=None, draw_mode=None, filename=None):
fsettings = settings if settings else \
FileSettings(zero_suppression='leading')
if dxf.header['$INSUNITS'] == 1:
fsettings.units = 'inch'
if not settings:
fsettings.format = (2, 5)
else:
fsettings.units = 'metric'
if not settings:
fsettings.format = (3, 4)
statements = []
for entity in dxf.entities:
if entity.dxftype == 'LWPOLYLINE':
statements.append(DxfPolylineStatement(entity))
elif entity.dxftype == 'LINE':
statements.append(DxfLineStatement.from_entity(entity))
elif entity.dxftype == 'CIRCLE':
statements.append(DxfArcStatement(entity))
elif entity.dxftype == 'ARC':
statements.append(DxfArcStatement(entity))
return cls(statements, fsettings, draw_mode, filename)
@classmethod
def rectangle(cls, width, height, left=0, bottom=0, units='metric', draw_mode=None, filename=None):
if units == 'metric':
settings = FileSettings(units=units, zero_suppression='leading', format=(3,4))
else:
settings = FileSettings(units=units, zero_suppression='leading', format=(2,5))
statements = [
DxfLineStatement(None, (left, bottom), (left + width, bottom)),
DxfLineStatement(None, (left + width, bottom), (left + width, bottom + height)),
DxfLineStatement(None, (left + width, bottom + height), (left, bottom + height)),
DxfLineStatement(None, (left, bottom + height), (left, bottom)),
]
| |
"""Primary tests."""
import copy
import functools
import pickle
from typing import Any, Callable, Dict, List, Optional, Tuple
import warnings
import numpy as np
import pytest
import scipy.optimize
from pyblp import (
Agents, CustomMoment, DemographicCovarianceMoment, Formulation, Integration, Iteration, Optimization, Problem,
Products, Simulation, build_ownership, data_to_dict, parallel
)
from pyblp.utilities.basics import Array, Options, update_matrices, compute_finite_differences
from .conftest import SimulatedProblemFixture
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('solve_options_update', [
pytest.param({'method': '2s'}, id="two-step"),
pytest.param({'scale_objective': True}, id="scaled objective"),
pytest.param({'center_moments': False, 'W_type': 'unadjusted', 'se_type': 'clustered'}, id="complex covariances"),
pytest.param({'delta_behavior': 'last'}, id="faster starting delta values"),
pytest.param({'fp_type': 'linear'}, id="non-safe linear fixed point"),
pytest.param({'fp_type': 'safe_nonlinear'}, id="nonlinear fixed point"),
pytest.param({'fp_type': 'nonlinear'}, id="non-safe nonlinear fixed point"),
pytest.param(
{'iteration': Iteration('hybr', {'xtol': 1e-12}, compute_jacobian=True)},
id="linear Newton fixed point"
),
pytest.param(
{'fp_type': 'safe_nonlinear', 'iteration': Iteration('hybr', {'xtol': 1e-12}, compute_jacobian=True)},
id="nonlinear Newton fixed point"
)
])
def test_accuracy(simulated_problem: SimulatedProblemFixture, solve_options_update: Options) -> None:
"""Test that starting parameters that are half their true values give rise to errors of less than 10%."""
simulation, _, problem, solve_options, _ = simulated_problem
# skip different iteration configurations when they won't matter
if simulation.K2 == 0 and {'delta_behavior', 'fp_type', 'iteration'} & set(solve_options_update):
return pytest.skip("A different iteration configuration has no impact when there is no heterogeneity.")
if simulation.epsilon_scale != 1 and 'nonlinear' in solve_options_update.get('fp_type', 'safe_linear'):
return pytest.skip("Nonlinear fixed point configurations are not supported when epsilon is scaled.")
# update the default options and solve the problem
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update(solve_options_update)
updated_solve_options.update({k: 0.5 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})
results = problem.solve(**updated_solve_options)
# test the accuracy of the estimated parameters
keys = ['sigma', 'pi', 'rho', 'beta']
if problem.K3 > 0:
keys.append('gamma')
for key in keys:
np.testing.assert_allclose(getattr(simulation, key), getattr(results, key), atol=0, rtol=0.1, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('compute_options', [
pytest.param({'method': 'approximate'}, id="approximation"),
pytest.param({'method': 'normal'}, id="normal distribution"),
pytest.param({'method': 'empirical'}, id="empirical distribution")
])
def test_optimal_instruments(simulated_problem: SimulatedProblemFixture, compute_options: Options) -> None:
"""Test that starting parameters that are half their true values also give rise to errors of less than 10% under
optimal instruments.
"""
simulation, _, problem, solve_options, problem_results = simulated_problem
# compute optimal instruments and update the problem (only use a few draws to speed up the test)
compute_options = copy.deepcopy(compute_options)
compute_options.update({
'draws': 5,
'seed': 0
})
new_problem = problem_results.compute_optimal_instruments(**compute_options).to_problem()
# update the default options and solve the problem
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update({k: 0.5 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})
new_results = new_problem.solve(**updated_solve_options)
# test the accuracy of the estimated parameters
keys = ['beta', 'sigma', 'pi', 'rho']
if problem.K3 > 0:
keys.append('gamma')
for key in keys:
np.testing.assert_allclose(getattr(simulation, key), getattr(new_results, key), atol=0, rtol=0.1, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_importance_sampling(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that starting parameters that are half their true values also give rise to errors of less than 20% under
importance sampling.
"""
simulation, _, problem, solve_options, problem_results = simulated_problem
# importance sampling is only relevant when there are agent data
if problem.K2 == 0:
return pytest.skip("There are no agent data.")
# it suffices to test importance sampling for problems without demographics
if problem.D > 0:
return pytest.skip("Testing importance sampling is hard with demographics.")
# compute a more precise delta
delta = problem_results.compute_delta(integration=simulation.integration)
# do importance sampling and verify that the mean utility didn't change if precise integration isn't used
sampling_results = problem_results.importance_sampling(
draws=500,
ar_constant=2,
seed=0,
delta=delta,
integration=Integration('mlhs', 50000, {'seed': 0}),
)
# solve the new problem
new_problem = sampling_results.to_problem()
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update({k: 0.5 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})
new_results = new_problem.solve(**updated_solve_options)
# test the accuracy of the estimated parameters
keys = ['beta', 'sigma', 'pi', 'rho']
if problem.K3 > 0:
keys.append('gamma')
for key in keys:
np.testing.assert_allclose(getattr(simulation, key), getattr(new_results, key), atol=0, rtol=0.2, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_bootstrap(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that post-estimation output medians are within 5% parametric bootstrap confidence intervals."""
_, _, problem, solve_options, problem_results = simulated_problem
# create bootstrapped results (use only a few draws and don't iterate for speed)
bootstrapped_results = problem_results.bootstrap(draws=100, seed=0, iteration=Iteration('return'))
# test that post-estimation outputs are within 95% confidence intervals
t = problem.products.market_ids[0]
merger_ids = np.where(problem.products.firm_ids == 1, 0, problem.products.firm_ids)
merger_ids_t = merger_ids[problem.products.market_ids == t]
method_mapping = {
"aggregate elasticities": lambda r: r.compute_aggregate_elasticities(),
"consumer surpluses": lambda r: r.compute_consumer_surpluses(),
"approximate prices": lambda r: r.compute_approximate_prices(merger_ids),
"own elasticities": lambda r: r.extract_diagonals(r.compute_elasticities()),
"aggregate elasticity in t": lambda r: r.compute_aggregate_elasticities(market_id=t),
"consumer surplus in t": lambda r: r.compute_consumer_surpluses(market_id=t),
"approximate prices in t": lambda r: r.compute_approximate_prices(merger_ids_t, market_id=t)
}
for name, method in method_mapping.items():
values = method(problem_results)
bootstrapped_values = method(bootstrapped_results)
median = np.median(values)
bootstrapped_medians = np.nanmedian(bootstrapped_values, axis=range(1, bootstrapped_values.ndim))
lb, ub = np.percentile(bootstrapped_medians, [2.5, 97.5])
np.testing.assert_array_less(np.squeeze(lb), np.squeeze(median) + 1e-14, err_msg=name)
np.testing.assert_array_less(np.squeeze(median), np.squeeze(ub) + 1e-14, err_msg=name)
@pytest.mark.usefixtures('simulated_problem')
def test_bootstrap_se(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that bootstrapped SEs are close to analytic ones. Or at least the same order of magnitude -- especially for
large numbers of RCs they may not necessarily be very close to each other.
"""
_, _, _, _, problem_results = simulated_problem
# compute bootstrapped results (ignore supply side iteration because we will only use the parameter draws)
bootstrapped_results = problem_results.bootstrap(draws=1000, seed=0, iteration=Iteration('return'))
# compare SEs
for key in ['sigma', 'pi', 'rho', 'beta', 'gamma']:
analytic_se = np.nan_to_num(getattr(problem_results, f'{key}_se'))
bootstrapped_se = getattr(bootstrapped_results, f'bootstrapped_{key}').std(axis=0)
np.testing.assert_allclose(analytic_se, bootstrapped_se, atol=0.001, rtol=0.5, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_result_serialization(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that result objects can be serialized and that their string representations are the same when they are
unpickled.
"""
simulation, simulation_results, problem, solve_options, problem_results = simulated_problem
originals = [
Formulation('x + y', absorb='C(z)', absorb_method='lsmr', absorb_options={'tol': 1e-10}),
Integration('halton', size=10, specification_options={'seed': 0, 'scramble': True}),
Iteration('lm', method_options={'max_evaluations': 100}, compute_jacobian=True),
Optimization('nelder-mead', method_options={'xatol': 1e-5}, compute_gradient=False, universal_display=False),
problem,
simulation,
simulation_results,
problem_results,
problem_results.compute_optimal_instruments(),
problem_results.bootstrap(draws=1, seed=0),
data_to_dict(simulation_results.product_data),
solve_options['micro_moments'],
]
for original in originals:
unpickled = pickle.loads(pickle.dumps(original))
assert str(original) == str(unpickled), str(original)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('solve_options_update', [
pytest.param({'costs_bounds': (-1e10, 1e10)}, id="non-binding costs bounds"),
pytest.param({'check_optimality': 'both'}, id="Hessian computation")
])
def test_trivial_changes(simulated_problem: SimulatedProblemFixture, solve_options_update: Dict) -> None:
"""Test that solving a problem with arguments that shouldn't give rise to meaningful differences doesn't give rise
to any differences.
"""
simulation, _, problem, solve_options, results = simulated_problem
# solve the problem with the updated options
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update(solve_options_update)
updated_results = problem.solve(**updated_solve_options)
# test that all arrays in the results are essentially identical
for key, result in results.__dict__.items():
if isinstance(result, np.ndarray) and result.dtype != np.object:
if 'hessian' not in key:
np.testing.assert_allclose(result, getattr(updated_results, key), atol=1e-14, rtol=0, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_parallel(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that solving problems and computing results in parallel gives rise to the same results as when using serial
processing.
"""
_, _, problem, solve_options, results = simulated_problem
# compute marginal costs as a test of results (everything else has already been computed without parallelization)
costs = results.compute_costs()
# solve the problem and compute costs in parallel
with parallel(2):
parallel_results = problem.solve(**solve_options)
parallel_costs = parallel_results.compute_costs()
# test that all arrays in the results are essentially identical
for key, result in results.__dict__.items():
if isinstance(result, np.ndarray) and result.dtype != np.object:
np.testing.assert_allclose(result, getattr(parallel_results, key), atol=1e-14, rtol=0, err_msg=key)
# test that marginal costs are essentially equal
np.testing.assert_allclose(costs, parallel_costs, atol=1e-14, rtol=0)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize(['ED', 'ES', 'absorb_method', 'absorb_options'], [
pytest.param(1, 0, None, None, id="1 demand FE, default method"),
pytest.param(0, 1, None, None, id="1 supply FE, default method"),
pytest.param(1, 1, None, None, id="1 demand- and 1 supply FE, default method"),
pytest.param(2, 0, None, None, id="2 demand FEs, default method"),
pytest.param(0, 2, 'sw', None, id="2 supply FEs, SW"),
pytest.param(3, 1, 'lsmr', None, id="3 demand- and 1 supply FEs, LSMR"),
pytest.param(1, 3, 'map', {'transform': 'cimmino', 'acceleration': 'cg'}, id="1 demand- and 3 supply FEs, MAP-CG"),
])
def test_fixed_effects(
simulated_problem: SimulatedProblemFixture, ED: int, ES: int, absorb_method: Optional[str],
absorb_options: Optional[dict]) -> None:
"""Test that absorbing different numbers of demand- and supply-side fixed effects gives rise to essentially
identical first-stage results as does including indicator variables. Also test that optimal instruments results,
marginal costs, and test statistics remain unchanged.
"""
simulation, simulation_results, problem, solve_options, problem_results = simulated_problem
# there cannot be supply-side fixed effects if there isn't a supply side
if problem.K3 == 0:
ES = 0
if ED == ES == 0:
return pytest.skip("There are no fixed effects to test.")
# configure the optimization routine to only do a few iterations to save time and never get to the point where small
# numerical differences between methods build up into noticeable differences
solve_options = copy.deepcopy(solve_options)
solve_options['optimization'] = Optimization('l-bfgs-b', {'maxfun': | |
crashing with minions and bombs!", 45, (40, 430), (255, 0, 0))
setText("5. Points Awarded: 1, 3, 5 and 10 depending up on the kind of egg.", 45, (40, 500), (255, 0, 255))
pygame.draw.rect(instruction_window, (0, 128, 255), (display_width / 2 - 180, 560, 160, 80))
setText("Back", 60, (display_width / 2 - 160, 570), (255, 255, 255))
while instruction_clicked:
for event in pygame.event.get():
mouse = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if display_width / 2 - 20 > mouse[0] > display_width / 2 - 180 and 640 > mouse[1] > 560:
pygame.draw.rect(instruction_window, (255, 255, 255), (display_width / 2 - 180, 560, 160, 80))
setText("Back", 60, (display_width / 2 - 160, 570), (0, 128, 255))
setText("Best of Luck!!", 60, (display_width / 2, 560), (185, 0, 0), None, "Forte")
if event.type == pygame.MOUSEBUTTONDOWN:
instruction_clicked = False
game_play()
else:
pygame.draw.rect(instruction_window, (0, 128, 255), (display_width / 2 - 180, 560, 160, 80))
setText("Back", 60, (display_width / 2 - 160, 570), (255, 255, 255))
instruction_window.fill((69, 250, 245), (display_width / 2, 570, 500, 70))
pygame.display.update()
instruction_clock.tick(30)
# Shows previous best high score stored in files in the same directory where the game lies
if best_scores_clicked:
score_window = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('High Score')
score_clock = pygame.time.Clock()
score_window.fill((255, 255, 255))
setText("High Score", 120, (display_width / 2 - 320, 50), (125, 20, 220), None, "Elephant")
pygame.draw.line(score_window, (255, 0, 0), (display_width / 2 - 320, 180), (display_width / 2 + 340, 180), 5)
pygame.draw.line(score_window, (0, 255, 0), (display_width / 2 - 320, 185), (display_width / 2 + 340, 185), 5)
pygame.draw.line(score_window, (0, 0, 255), (display_width / 2 - 320, 190), (display_width / 2 + 340, 190), 5)
file_object, high_score = file_open_read()
setText(high_score, 100, (display_width / 2, display_height / 2), (0, 255, 0))
pygame.draw.rect(score_window, (212, 3, 108), (display_width / 2 - 130, display_height / 2 + 160, 300, 130))
setText("Go Back", 80, (display_width / 2 - 120, display_height / 2 + 180), (255, 255, 255))
file_close(file_object)
while best_scores_clicked:
for event in pygame.event.get():
mouse_position = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if display_width / 2 + 170 > mouse_position[0] > display_width / 2 - 130 and display_height / 2 + 290 > mouse_position[1] > display_height / 2 + 160:
pygame.draw.rect(score_window, (212, 3, 108), (display_width / 2 - 130, display_height / 2 + 160, 300, 130))
setText("Go Back", 80, (display_width / 2 - 120, display_height / 2 + 180), (255, 255, 255))
if event.type == pygame.MOUSEBUTTONDOWN:
best_scores_clicked = False
game_play()
else:
pygame.draw.rect(score_window, (212, 108, 3), (display_width / 2 - 130, display_height / 2 + 160, 300, 130))
setText("Go Back", 80, (display_width / 2 - 120, display_height / 2 + 180), (255, 255, 255))
pygame.display.update()
score_clock.tick(30)
# X and Y co-ordinates of the basket.
basket_x = display_width / 2 - 200
basket_y = display_height - 270
# For changing the X co-ordinate of the basket when appropriate keys are pressed.
x_change = 0
# Random positions for eggs
x = random.randint(0, display_width - 250)
y = -150
# Randomly loading Egg images
random_images = egg_images[random.randint(0, 9)]
random_eggs = pygame.image.load(random_images)
# Game in action !!!
while play_clicked:
# New game window is created on clicking the play button with the same dimensions.
play_window = pygame.display.set_mode((1250, 680))
pygame.display.set_caption('Play')
play_window.fill((255, 255, 255))
play_clock = pygame.time.Clock()
play_window.blit(random_eggs, (x, y))
# Horizontal line carrying the basket.
pygame.draw.line(play_window, (175, 115, 0), (0, display_height - 20), (display_width, display_height - 20))
# Color beneath the line.
pygame.draw.rect(play_window, (243, 128, 12), (0, display_height - 18, display_width, display_height))
for event in pygame.event.get():
# Event handling
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
x_change = unit
elif event.key == pygame.K_LEFT:
x_change = -unit
elif event.key == pygame.K_DOWN:
x_change = 0
basket_x += x_change
y += image_speed
# Placing the Basket in position
play_window.blit(basket, (basket_x, basket_y))
# Placing score on the game window.
setText("Your Score:" + str(score), 40, (0, 0), (107, 20, 99), (128, 255, 255))
# Checking egg and basket crossover.
if y + 80 >= basket_y and y + 80 <= basket_y + 15:
if x >= basket_x - 40 and x + 100 <= basket_x + display_width / 2 - 240:
# Checks collision with bomb and minion image
if random_images == egg_images[9] or random_images == egg_images[7]:
score -= 5
setText("Your Score:" + str(score), 40, (0, 0), (107, 20, 99), (128, 255, 255))
# Checking whether the current score is greater than the best score.
file, current_best_score = file_open_read()
file_close(file)
if score > int(current_best_score):
file = file_open_write(str(score))
file_close(file)
setText("Crashed", 150, (display_width / 2 - 240, 35), (0, 0, 0))
setText(None, 40, (0, 0), (255, 255, 255))
play_window.blit(explosion, (basket_x, basket_y - 80))
pygame.display.update()
time.sleep(3)
for k in range(0, display_width + 1, 5):
setText("Your Score:" + str(score), 40, (k, 0), (107, 20, 99), (128, 255, 255))
pygame.time.wait(20)
time.sleep(2)
game_over = True
play_clicked = False
# Makes the egg disappear !
y = display_height
# Incrementing the score appropriately.
if random_images == egg_images[6]:
score += 1
elif random_images == egg_images[0] or random_images == egg_images[1] or random_images == egg_images[3]:
score += 3
elif random_images == egg_images[4] or random_images == egg_images[5] or random_images == egg_images[8]:
score += 5
elif random_images == egg_images[2]:
score += 10
# Checking whether the egg image had crossed the floor.
if y >= display_height + 200:
# Random positions for eggs
x = random.randint(0, display_width - 250)
y = -150
# Randomly loading Egg images
random_images = egg_images[random.randint(0, 9)]
random_eggs = pygame.image.load(random_images)
# Increasing the speed in which the basket moves both the directions.
if unit != 15:
unit += 1
# Increasing the speed in which the images moves down.
if image_speed != 16:
image_speed += 1
# Restricting the basket within the width of the Game window
if basket_x <= 0:
basket_x = 0
elif basket_x >= display_width - 300:
basket_x = display_width - 300
pygame.display.update()
play_clock.tick(60)
# Game Over window
if game_over:
game_over_window = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption("Game Over Buddy!")
game_over_clock = pygame.time.Clock()
game_over_window.fill((188, 7, 116))
setText("G", 90, (180, 250), (255, 255, 255), None, "Elephant")
pygame.time.wait(400)
setText("A", 90, (270, 250), (255, 255, 255), None, "Elephant")
pygame.time.wait(400)
setText("M", 90, (360, 250), (255, 255, 255), None, "Elephant")
pygame.time.wait(400)
setText("E", 90, (460, 250), (255, 255, 255), None, "Elephant")
pygame.time.wait(400)
setText("O", 90, (630, 250), (5, 96, 196), None, "Elephant")
pygame.time.wait(400)
setText("V", 90, (720, 250), (5, 96, 196), None, "Elephant")
pygame.time.wait(400)
setText("E", 90, (810, 250), (5, 96, 196), None, "Elephant")
pygame.time.wait(400)
setText("R", 90, (900, 250), (5, 96, 196), None, "Elephant")
pygame.time.wait(400)
pygame.draw.rect(game_over_window, (244, 122, 11), (display_width / 2 - 200, 420, 420, 90))
setText("Back to Main Menu", 50, (display_width / 2 - 190, 430), (255, 255, 255))
pygame.draw.rect(game_over_window, (255, 255, 128), (display_width / 2 - 110, 540, 180, 95))
setText("Credits", 60, (display_width / 2 - 110, 550), (0, 128, 127), None, "Forte")
while game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
mouse = pygame.mouse.get_pos()
if display_width / 2 + 220 > mouse[0] > display_width / 2 - 200 and 510 > mouse[1] > 420:
pygame.draw.rect(game_over_window, (128, 128, 255), (display_width / 2 - 200, 420, 420, 90))
setText("Back to Main Menu", 50, (display_width / 2 - 190, 430), (255, 0, 0))
if event.type == pygame.MOUSEBUTTONDOWN:
game_over = False
game_play()
else:
pygame.draw.rect(game_over_window, (244, 122, 11), (display_width / 2 - 200, 420, 420, 90))
setText("Back to Main Menu", 50, (display_width / 2 - 190, 430), (255, 255, 255))
if display_width / 2 + 70 > mouse[0] > display_width / 2 - 110 and 635 > mouse[1] > 540:
pygame.draw.rect(game_over_window, (235, 125, 255), (display_width / 2 - 110, 540, 180, 95))
setText("Credits", 60, (display_width / 2 - 110, 550), (0, 128, 127), None, "Forte")
if event.type == pygame.MOUSEBUTTONDOWN:
credit_clicked = True
game_over = False
else:
pygame.draw.rect(game_over_window, (255, 255, 128), (display_width / 2 - 110, 540, 180, 95))
setText("Credits", 60, (display_width / 2 - 110, 550), (0, | |
<gh_stars>1-10
"""
A module for simple MPI communication.
The SimpleComm class is designed to provide a simplified MPI-based
communication strategy using the MPI4Py module.
To accomplish this task, the SimpleComm object provides a single communication
pattern with a simple, light-weight API. The communication pattern is a
common 'manager'/'worker' pattern, with the 0th rank assumed to be the
'manager' rank. The SimpleComm API provides a way of sending data out from the
'manager' rank to the 'worker' ranks, and for collecting the data from the
'worker' ranks back on the 'manager' rank.
**PARTITIONING:**
Within the SimpleComm paradigm, the 'manager' rank is assumed to be responsible
for partition (or distributing) the necessary work to the 'worker' ranks.
The *partition* mathod provides this functionality. Using a *partition
function*, the *partition* method takes data known on the 'manager' rank and
gives each 'worker' rank a part of the data according to the algorithm of the
partition function.
The *partition* method is *synchronous*, meaning that every rank (from the
'manager' rank to all of the 'worker' ranks) must be in synch when the method
is called. This means that every rank must participate in the call, and
every rank will wait until all of the data has been partitioned before
continuing. Remember, whenever the 'manager' rank speaks, all of the
'worker' ranks listen! And they continue to listen until dismissed by the
'manager' rank.
Additionally, the 'manager' rank can be considered *involved* or *uninvolved*
in the partition process. If the 'manager' rank is *involved*, then the
master will take a part of the data for itself. If the 'manager' is
*uninvolved*, then the data will be partitioned only across the 'worker' ranks.
*Partitioning* is a *synchronous* communication call that implements a
*static partitioning* algorithm.
**RATIONING:**
An alternative approach to the *partitioning* communication method is the
*rationing* communication method. This method involves the individual
'worker' ranks requesting data to work on. In this approach, each 'worker'
rank, when the 'worker' rank is ready, asks the 'manager' rank for a new
piece of data on which to work. The 'manager' rank receives the request
and gives the next piece of data for processing out to the requesting
'worker' rank. It doesn't matter what order the ranks request data, and
they do not all have to request data at the same time. However, it is
critical to understand that if a 'worker' requests data when the 'manager'
rank does not listen for the request, or the 'manager' expects a 'worker'
to request work but the 'worker' never makes the request, the entire
process will hang and wait forever!
*Rationing* is an *asynchronous* communication call that allows the 'manager'
to implement a *dynamic partitioning* algorithm.
**COLLECTING:**
Once each 'worker' has received its assigned part of the data, the 'worker'
will perform some work pertaining to the data it received. In such a case,
the 'worker' may (though not necessarily) return one or more results back to
the 'manager'. The *collect* method provides this functionality.
The *collect* method is *asynchronous*, meaning that each slave can send
its data back to the master at any time and in any order. Since the 'manager'
rank does not care where the data came from, the 'manager' rank simply receives
the result from the 'worker' rank and processes it. Hence, all that matters
is that for every *collect* call made by all of the 'worker' ranks, a *collect*
call must also be made by the 'manager' rank.
The *collect* method is a *handshake* method, meaning that while the 'manager'
rank doesn't care which 'worker' rank sends it data, the 'manager' rank does
acknowledge the 'worker' rank and record the 'worker' rank's identity.
**REDUCING:**
In general, it is assumed that each 'worker' rank works independently from the
other 'worker' ranks. However, it may be occasionally necessary for the
'worker' ranks to know something about the work being done on (or the data
given to) each of the other ranks. The only allowed communication of this
type is provided by the *allreduce* method.
The *allreduce* method allows for *reductions* of the data distributed across
all of the ranks to be made available to every rank. Reductions are operations
such as 'max', 'min', 'sum', and 'prod', which compute and distribute to the
ranks the 'maximum', 'minimum', 'sum', or 'product' of the data distributed
across the ranks. Since the *reduction* computes a reduced quantity of data
distributed across all ranks, the *allreduce* method is a *synchronous* method
(i.e., all ranks must participate in the call, including the 'manager').
**DIVIDING:**
It can be occasionally useful to subdivide the 'worker' ranks into different
groups to perform different tasks in each group. When this is necessary, the
'manager' rank will assign itself and each 'worker' rank a *color* ID. Then,
the 'manager' will assign each rank (including itself) to 2 new groups:
1. Each rank with the same color ID will be assigned to the same group, and
within this new *color* group, each rank will be given a new rank ID
ranging from 0 (identifying the color group's 'manager' rank) to the number
of 'worker' ranks in the color group. This is called
the *monocolor* grouping.
2. Each rank with the same new rank ID across all color groups will be assigned
to the same group. Hence, all ranks with rank ID 0 (but different color
IDs) will be in the same group, all ranks with rank ID 1 (but different
color IDs) will be the in another group, etc. This is called the
*multicolor* grouping. NOTE: This grouping will look like grouping (1)
except with the rank ID and the color ID swapped.
The *divide* method provides this functionality, and it returns 2 new
SimpleComm objects for each of the 2 groupings described above. This means
that within each group, the same *partition*, *collecting*, and *reducing*
operations can be performed in the same way as described above for the *global*
group.
Copyright 2020 University Corporation for Atmospheric Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
from functools import partial # noqa: UnusedImport
# Define the supported reduction operators
OPERATORS = ['sum', 'prod', 'max', 'min']
# Define the reduction operators map (Maps names to function names.
# The 'py' function names are passed to 'eval(*)' and executed as python code.
# The 'np' function names are passed to 'getattr(numpy,*)' and executed as
# numpy code. The 'mpi' function names are passed to 'getattr(mpi4py,*)'
# and return an MPI operator object which is passed as an argument to MPI
# reduce functions.
_OP_MAP = {
'sum': {'py': 'sum', 'np': 'sum', 'mpi': 'SUM'},
'prod': {'py': 'partial(reduce, lambda x, y: x * y)', 'np': 'prod', 'mpi': 'PROD'},
'max': {'py': 'max', 'np': 'max', 'mpi': 'MAX'},
'min': {'py': 'min', 'np': 'min', 'mpi': 'MIN'},
}
def create_comm(serial=False):
"""
This is a factory function for creating SimpleComm objects.
Depending on the argument given, it returns an instance of a serial or
parallel SimpleComm object.
Keyword Arguments:
serial (bool): A boolean flag with True indicating the desire for a
serial SimpleComm instance, and False incidicating the
desire for a parallel SimpleComm instance.
Returns:
SimpleComm: An instance of a SimpleComm object, either serial
(if serial == True) or parallel (if serial == False)
Raises:
TypeError: if the serial argument is not a bool.
Examples:
>>> sercomm = create_comm(serial=True)
>>> type(sercomm)
<class 'simplecomm.SimpleComm'>
>>> parcomm = create_comm()
>>> type(parcomm)
<class 'simplecomm.SimpleCommMPI'>
"""
if type(serial) is not bool:
raise TypeError('Serial parameter must be a bool')
if serial:
return SimpleComm()
else:
return SimpleCommMPI()
class SimpleComm(object):
"""
Simple Communicator for serial operation.
Attributes:
_numpy: Reference to the Numpy module, if found
_color: The color associated with the communicator, if colored
_group: The group ID associated with the communicator's color
"""
def __init__(self):
"""
Constructor.
"""
# Try importing the Numpy module
try:
import numpy
except:
numpy = None
# To the Numpy module, if found
self._numpy = numpy
# The color ID associated with this | |
'Tooraweenah'},
'61268608':{'en': 'Trangie'},
'61268609':{'en': 'Tyrie'},
'61268610':{'en': 'Bruie Plains'},
'61268611':{'en': 'Parkes'},
'61268612':{'en': 'Parkes'},
'61268613':{'en': 'Alectown'},
'61268614':{'en': 'Bindogundra'},
'61268615':{'en': 'Bogan Gate'},
'61268616':{'en': 'Mandagery'},
'61268617':{'en': 'Peak Hill'},
'61268618':{'en': 'Yarrabandai'},
'61268619':{'en': 'Mungery'},
'6126862':{'en': 'Parkes'},
'6126863':{'en': 'Parkes'},
'6126864':{'en': 'Bogan Gate'},
'61268642':{'en': 'Yarrabandai'},
'61268650':{'en': 'Bruie Plains'},
'61268651':{'en': 'Mandagery'},
'61268652':{'en': 'Alectown'},
'61268653':{'en': 'Alectown'},
'61268654':{'en': 'Mungery'},
'61268655':{'en': 'Parkes'},
'61268656':{'en': 'Peak Hill'},
'61268657':{'en': 'Yarrabandai'},
'61268658':{'en': 'Gollan'},
'61268659':{'en': 'Stuart Town'},
'61268660':{'en': 'Mandagery'},
'61268661':{'en': 'Mandagery'},
'61268662':{'en': 'Bindogundra'},
'61268663':{'en': 'Bindogundra'},
'61268664':{'en': 'Bindogundra'},
'61268665':{'en': 'Wellington'},
'61268666':{'en': 'Yeoval'},
'61268667':{'en': 'Baradine'},
'61268668':{'en': 'Binnaway'},
'61268669':{'en': 'Coalbaggie'},
'61268670':{'en': 'Mandagery'},
'61268671':{'en': 'Mandagery'},
'61268672':{'en': 'Dubbo'},
'61268673':{'en': 'Dubbo'},
'61268674':{'en': 'Dubbo'},
'61268675':{'en': 'Berkley Downs'},
'61268676':{'en': 'Berkley Downs'},
'61268677':{'en': 'Bourke'},
'61268678':{'en': 'Bourke'},
'61268679':{'en': 'Dubbo'},
'61268680':{'en': 'Yarrabandai'},
'61268681':{'en': 'Yarrabandai'},
'61268682':{'en': 'Cobar'},
'61268683':{'en': 'Cobar'},
'61268684':{'en': 'Lake Cargelligo'},
'61268685':{'en': 'Lake Cargelligo'},
'61268686':{'en': 'Stuart Town'},
'61268687':{'en': 'Stuart Town'},
'61268688':{'en': 'Gilgandra'},
'61268689':{'en': 'Gilgandra'},
'6126869':{'en': 'Peak Hill'},
'61268697':{'en': 'Mungery'},
'61268698':{'en': 'Mungery'},
'61268699':{'en': 'Bruie Plains'},
'6126870':{'en': 'Bourke'},
'61268710':{'en': 'Barrier'},
'61268711':{'en': 'Barrinford'},
'61268712':{'en': 'Bourke'},
'61268713':{'en': 'Brewarrina'},
'61268714':{'en': 'Cobar'},
'61268715':{'en': 'Cuttaburra'},
'61268716':{'en': 'Narran'},
'61268717':{'en': 'Albert'},
'61268718':{'en': 'Banar'},
'61268719':{'en': 'Bobadah'},
'61268720':{'en': 'Bourke'},
'61268721':{'en': 'Bourke'},
'61268722':{'en': 'Bourke'},
'61268723':{'en': 'Bourke'},
'61268724':{'en': 'Bourke'},
'61268725':{'en': 'Cuttaburra'},
'61268726':{'en': 'Cuttaburra'},
'61268727':{'en': 'Cuttaburra'},
'61268728':{'en': 'Cuttaburra'},
'61268729':{'en': 'Cuttaburra'},
'61268730':{'en': 'Boona Mountain'},
'61268731':{'en': 'Condobolin'},
'61268732':{'en': 'Double Peaks'},
'61268733':{'en': 'Fairholme'},
'61268734':{'en': 'Kiacatoo'},
'61268735':{'en': 'Lake Cargelligo'},
'61268736':{'en': '<NAME>'},
'61268737':{'en': 'Myamley'},
'61268738':{'en': 'Naradhan'},
'61268739':{'en': 'Tottenham'},
'6126874':{'en': 'Cuttaburra'},
'61268744':{'en': 'Narran'},
'61268745':{'en': 'Trundle'},
'61268750':{'en': 'Binnaway'},
'61268751':{'en': 'Coalbaggie'},
'61268752':{'en': 'Collie'},
'61268753':{'en': 'Coonabarabran'},
'61268754':{'en': 'Curban'},
'61268755':{'en': 'Dandaloo'},
'61268756':{'en': 'Dubbo'},
'61268757':{'en': 'Farrendale'},
'61268758':{'en': 'Geurie'},
'61268759':{'en': 'Gilgandra'},
'61268760':{'en': 'Gulargambone'},
'61268761':{'en': 'Magometon'},
'61268762':{'en': 'Quambone'},
'61268763':{'en': 'Teridgerie'},
'61268764':{'en': 'Warrington'},
'61268765':{'en': 'Warrumbungle'},
'61268766':{'en': 'Airlands'},
'61268767':{'en': 'Balladoran'},
'61268768':{'en': 'Ballimore'},
'61268769':{'en': 'Baradine'},
'61268770':{'en': 'Myamley'},
'61268771':{'en': 'Naradhan'},
'61268772':{'en': 'Tottenham'},
'61268773':{'en': 'Trundle'},
'61268774':{'en': 'Tullamore'},
'61268775':{'en': 'Buckinguy'},
'61268776':{'en': 'Carinda'},
'61268777':{'en': 'Coonamble'},
'61268778':{'en': 'Gilgooma'},
'61268779':{'en': 'Ginghet'},
'61268780':{'en': 'Tullamore'},
'61268781':{'en': 'Buckinguy'},
'61268782':{'en': 'Carinda'},
'61268783':{'en': 'Coonamble'},
'61268784':{'en': 'Gilgooma'},
'61268785':{'en': 'Ginghet'},
'61268786':{'en': 'Gulargambone'},
'61268787':{'en': 'Magometon'},
'61268788':{'en': 'Quambone'},
'61268789':{'en': 'Teridgerie'},
'61268790':{'en': 'Boona Mountain'},
'61268791':{'en': 'Condobolin'},
'61268792':{'en': 'Double Peaks'},
'61268793':{'en': 'Fairholme'},
'61268794':{'en': 'Kiacatoo'},
'61268795':{'en': 'Lake Cargelligo'},
'61268796':{'en': '<NAME>'},
'61268797':{'en': 'Cobar'},
'61268798':{'en': 'Cobar'},
'61268799':{'en': 'Cobar'},
'61268800':{'en': 'Yarragrin'},
'61268801':{'en': 'Curban'},
'61268802':{'en': 'Ballimore'},
'61268803':{'en': 'Warrington'},
'61268804':{'en': 'Geurie'},
'61268805':{'en': 'Gilgandra'},
'61268806':{'en': 'Mendooran'},
'61268807':{'en': 'Tooraweenah'},
'61268808':{'en': 'Trangie'},
'61268809':{'en': 'Tyrie'},
'6126881':{'en': 'Dubbo'},
'6126882':{'en': 'Dubbo'},
'6126883':{'en': 'Dubbo'},
'61268830':{'en': 'Dandaloo'},
'61268837':{'en': 'Warren'},
'61268838':{'en': 'Coalbaggie'},
'61268839':{'en': 'Collie'},
'6126884':{'en': 'Dubbo'},
'6126885':{'en': 'Dubbo'},
'61268860':{'en': 'Mendooran'},
'61268861':{'en': 'Mendooran'},
'61268862':{'en': 'Mendooran'},
'61268863':{'en': 'Neilrex'},
'61268864':{'en': 'Neilrex'},
'61268865':{'en': 'Ballimore'},
'61268866':{'en': 'Ballimore'},
'61268867':{'en': 'Ballimore'},
'61268868':{'en': 'Warrumbungle'},
'61268869':{'en': 'Airlands'},
'61268870':{'en': 'Geurie'},
'61268871':{'en': 'Geurie'},
'61268872':{'en': 'Dubbo'},
'61268873':{'en': 'Dubbo'},
'61268874':{'en': 'Dubbo'},
'61268875':{'en': 'Dubbo'},
'61268876':{'en': 'Coalbaggie'},
'61268877':{'en': 'Geurie'},
'61268878':{'en': 'Geurie'},
'61268879':{'en': 'Coalbaggie'},
'61268880':{'en': 'Balladoran'},
'61268881':{'en': 'Balladoran'},
'61268882':{'en': 'Ballimore'},
'61268883':{'en': 'Tyrie'},
'61268884':{'en': 'Dandaloo'},
'61268885':{'en': 'Dubbo'},
'61268886':{'en': 'Trangie'},
'61268887':{'en': 'Trangie'},
'61268888':{'en': 'Trangie'},
'61268889':{'en': 'Trangie'},
'6126889':{'en': 'Narromine'},
'61268890':{'en': 'Farrendale'},
'61268898':{'en': 'Wyanga'},
'61268900':{'en': 'Naradhan'},
'61268901':{'en': 'Condobolin'},
'61268902':{'en': 'Baradine'},
'61268903':{'en': 'Bobadah'},
'61268904':{'en': 'Condobolin'},
'61268905':{'en': 'Condobolin'},
'61268906':{'en': 'Fairholme'},
'61268907':{'en': 'Kiacatoo'},
'61268908':{'en': 'Lake Cargelligo'},
'61268909':{'en': 'Double Peaks'},
'61268910':{'en': 'Condobolin'},
'61268911':{'en': 'Mount Herring'},
'61268912':{'en': 'Condobolin'},
'61268913':{'en': 'Tullamore'},
'61268914':{'en': 'Boona Mountain'},
'61268915':{'en': 'Myamley'},
'61268916':{'en': 'Tottenham'},
'61268917':{'en': 'Trundle'},
'61268918':{'en': 'Binnaway'},
'61268919':{'en': 'Condobolin'},
'61268920':{'en': 'Tullamore'},
'61268921':{'en': 'Trundle'},
'61268922':{'en': 'Trundle'},
'61268923':{'en': 'Tullamore'},
'61268924':{'en': 'Tottenham'},
'61268925':{'en': 'Tullamore'},
'61268926':{'en': 'Tullamore'},
'61268927':{'en': 'Trundle'},
'61268928':{'en': 'Albert'},
'61268929':{'en': 'Myamley'},
'61268930':{'en': 'Myamley'},
'61268931':{'en': 'Myamley'},
'61268932':{'en': 'Myamley'},
'61268933':{'en': 'Myamley'},
'61268934':{'en': 'Tottenham'},
'61268935':{'en': 'Tullamore'},
'61268936':{'en': 'Myamley'},
'61268937':{'en': '<NAME>'},
'61268938':{'en': '<NAME>'},
'61268939':{'en': '<NAME>'},
'61268940':{'en': 'Coalbaggie'},
'61268941':{'en': 'Collie'},
'61268942':{'en': 'Coonabarabran'},
'61268943':{'en': 'Curban'},
'61268944':{'en': 'Dandaloo'},
'61268945':{'en': 'Farrendale'},
'61268946':{'en': 'Geurie'},
'61268947':{'en': 'Gilgandra'},
'61268948':{'en': 'Goorianawa'},
'61268949':{'en': 'Gwabegar'},
'6126895':{'en': 'Condobolin'},
'61268957':{'en': 'Banar'},
'61268958':{'en': 'Mendooran'},
'61268959':{'en': 'Narromine'},
'61268960':{'en': '<NAME>'},
'61268961':{'en': 'Mount Herring'},
'61268962':{'en': 'Mount Herring'},
'61268963':{'en': 'Bobadah'},
'61268964':{'en': 'Fairholme'},
'61268965':{'en': 'Kiacatoo'},
'61268966':{'en': 'Lake Cargelligo'},
'61268967':{'en': 'Double Peaks'},
'61268968':{'en': 'Double Peaks'},
'61268969':{'en': 'Naradhan'},
'61268970':{'en': 'Fairholme'},
'61268971':{'en': 'Fairholme'},
'61268972':{'en': 'Fairholme'},
'61268973':{'en': 'Fairholme'},
'61268974':{'en': 'Bobadah'},
'61268975':{'en': 'Fairholme'},
'61268976':{'en': 'Lake Cargelligo'},
'61268977':{'en': 'Double Peaks'},
'61268978':{'en': 'Double Peaks'},
'61268979':{'en': 'Double Peaks'},
'6126898':{'en': 'Lake Cargelligo'},
'61268987':{'en': 'Naradhan'},
'61268988':{'en': 'Naradhan'},
'61268989':{'en': 'Naradhan'},
'61268990':{'en': 'Barrier'},
'61268991':{'en': 'Barrinford'},
'61268992':{'en': 'Bourke'},
'61268993':{'en': 'Brewarrina'},
'61268994':{'en': 'Cobar'},
'61268995':{'en': 'Cuttaburra'},
'61268996':{'en': 'Narran'},
'61268997':{'en': 'Albert'},
'61268998':{'en': 'Banar'},
'61268999':{'en': 'Bobadah'},
'61269000':{'en': '<NAME>'},
'61269001':{'en': '<NAME>'},
'61269002':{'en': '<NAME>'},
'61269003':{'en': 'Adelong'},
'61269004':{'en': 'Alleena'},
'61269005':{'en': 'Ardlethan'},
'61269006':{'en': '<NAME>'},
'61269007':{'en': 'Bambilla'},
'61269008':{'en': 'Barellan'},
'61269009':{'en': 'Barmedman'},
'61269010':{'en': 'Barmedman East'},
'61269011':{'en': 'Batlow'},
'61269012':{'en': 'Bethungra'},
'61269013':{'en': 'Bidgeemia'},
'61269014':{'en': 'Black Stump'},
'61269015':{'en': 'Booroorban'},
'61269016':{'en': 'Boree Creek'},
'61269017':{'en': 'Bunda'},
'61269018':{'en': 'Bundure'},
'61269019':{'en': 'Burcher'},
'61269020':{'en': 'Burra'},
'61269021':{'en': 'Carabost'},
'61269022':{'en': 'Carrathool'},
'61269023':{'en': 'Coleambally'},
'61269024':{'en': 'Coolac'},
'61269025':{'en': 'Coolamon'},
'61269026':{'en': 'Cootamundra'},
'61269027':{'en': 'Cowabbie'},
'61269028':{'en': 'Currawarna'},
'61269029':{'en': '<NAME>'},
'61269030':{'en': 'Egansford'},
'61269031':{'en': '<NAME>'},
'61269032':{'en': 'Galore'},
'61269033':{'en': 'Ganmain'},
'61269034':{'en': 'Goolgowi'},
'61269035':{'en': 'Griffith'},
'61269036':{'en': 'Grong Grong'},
'61269037':{'en': 'Gunbar'},
'61269038':{'en': 'Gundagai'},
'61269039':{'en': 'Hay'},
'61269040':{'en': 'Henty'},
'61269041':{'en': 'Hillston'},
'61269042':{'en': 'Humula'},
'61269043':{'en': 'Ivanhoe'},
'61269044':{'en': 'Junee'},
'61269045':{'en': '<NAME>'},
'61269046':{'en': 'Kikoira'},
'61269047':{'en': 'Kyeamba'},
'61269048':{'en': 'Lachlan'},
'61269049':{'en': 'Landervale'},
'61269050':{'en': 'Leeton'},
'61269051':{'en': 'Lockhart'},
'61269052':{'en': 'Mangoplah'},
'61269053':{'en': 'Mannus'},
'61269054':{'en': 'Marsden'},
'61269055':{'en': 'Maude'},
'61269056':{'en': 'Melbergen'},
'61269057':{'en': 'Merriwagga'},
'61269058':{'en': 'Milbrulong'},
'61269059':{'en': 'Morundah'},
'61269060':{'en': 'Nangus'},
'61269061':{'en': 'Narraburra'},
'61269062':{'en': 'Narrandera'},
'61269063':{'en': 'Rankins Springs'},
'61269064':{'en': 'Rannock'},
'61269065':{'en': 'Sandigo'},
'61269066':{'en': 'Springdale'},
'61269067':{'en': 'Stanbridge'},
'61269068':{'en': 'Stockinbingal'},
'61269069':{'en': 'Talbingo'},
'61269070':{'en': 'Tallimba'},
'61269071':{'en': 'Tarcutta'},
'61269072':{'en': 'Temora'},
'61269073':{'en': 'The Rock'},
'61269074':{'en': 'Tooma'},
'61269075':{'en': 'Tullibigeal'},
'61269076':{'en': 'Tumbarumba'},
'61269077':{'en': 'Tumorrama'},
'61269078':{'en': 'Tumut'},
'61269079':{'en': 'Ungarie'},
'61269080':{'en': 'Urana'},
'61269081':{'en': 'Wallanthery'},
'61269082':{'en': 'Wallendbeen'},
'61269083':{'en': 'Wantabadgery'},
'61269084':{'en': 'Warralonga'},
'61269085':{'en': 'Warrawidgee'},
'61269086':{'en': 'Wee Elwah'},
'61269087':{'en': 'Weethalle'},
'61269088':{'en': 'West Wyalong'},
'61269089':{'en': 'Winchendon Vale'},
'6126909':{'en': 'Griffith'},
'61269100':{'en': 'Yaven Creek'},
'61269101':{'en': 'Yenda'},
'61269102':{'en': 'Griffith'},
'61269103':{'en': 'Wagga Wagga'},
'61269104':{'en': 'Wagga Wagga'},
'61269105':{'en': 'Griffith'},
'61269106':{'en': 'Stockinbingal'},
'61269107':{'en': 'Talbingo'},
'61269108':{'en': 'Tooma'},
'61269109':{'en': 'Tumbarumba'},
'61269110':{'en': 'Wagga Wagga'},
'61269111':{'en': 'Tumorrama'},
'61269112':{'en': 'Tumut'},
'61269113':{'en': 'Wallendbeen'},
'61269114':{'en': 'Yaven Creek'},
'61269115':{'en': 'Adelong'},
'61269116':{'en': 'Batlow'},
'61269117':{'en': 'Bethungra'},
'61269118':{'en': 'Burra'},
'61269119':{'en': 'Carabost'},
'61269120':{'en': 'Coolac'},
'61269121':{'en': 'Cootamundra'},
'61269122':{'en': 'Gundagai'},
'61269123':{'en': 'Mannus'},
'61269124':{'en': 'Nangus'},
'61269125':{'en': 'Melbergen'},
'61269126':{'en': 'Merriwagga'},
'61269127':{'en': 'Rankins Springs'},
'61269128':{'en': 'Wallanthery'},
'61269129':{'en': 'Warrawidgee'},
'61269130':{'en': '<NAME>'},
'61269131':{'en': 'Yenda'},
'61269132':{'en': 'Bunda'},
'61269133':{'en': '<NAME>'},
'61269134':{'en': 'Goolgowi'},
'61269135':{'en': 'Griffith'},
'61269136':{'en': 'Gunbar'},
'61269137':{'en': 'Hillston'},
'61269138':{'en': 'Barellan'},
'61269139':{'en': '<NAME>'},
'61269140':{'en': 'Hay'},
'61269141':{'en': 'Ivanhoe'},
'61269142':{'en': 'Lachlan'},
'61269143':{'en': 'Maude'},
'61269144':{'en': 'Bambilla'},
'61269145':{'en': 'Booroorban'},
'61269146':{'en': 'Carrathool'},
'61269147':{'en': 'Leeton'},
'61269148':{'en': 'Morundah'},
'61269149':{'en': 'Narrandera'},
'61269150':{'en': 'Sandigo'},
'61269151':{'en': 'Stanbridge'},
'61269152':{'en': 'Bundure'},
'61269153':{'en': 'Coleambally'},
'61269154':{'en': 'Egansford'},
'61269155':{'en': '<NAME>'},
'61269156':{'en': '<NAME>'},
'61269157':{'en': 'Landervale'},
'61269158':{'en': 'Ardlethan'},
'61269159':{'en': 'Ariah Park'},
'61269160':{'en': 'Barmedman'},
'61269161':{'en': 'Barmedman East'},
'61269162':{'en': 'Narraburra'},
'61269163':{'en': 'Springdale'},
'61269164':{'en': 'Temora'},
'61269165':{'en': 'Mangoplah'},
'61269166':{'en': 'Milbrulong'},
'61269167':{'en': 'Rannock'},
'61269168':{'en': 'Tarcutta'},
'61269169':{'en': 'The Rock'},
'61269170':{'en': 'Urana'},
'61269171':{'en': '<NAME>'},
'61269172':{'en': 'Wantabadgery'},
'61269173':{'en': '<NAME>'},
'61269174':{'en': 'Cowabbie'},
'61269175':{'en': 'Currawarna'},
'61269176':{'en': 'Galore'},
'61269177':{'en': 'Ganmain'},
'61269178':{'en': 'Henty'},
'61269179':{'en': 'Humula'},
'61269180':{'en': 'Junee'},
'61269181':{'en': 'Junee Reefs'},
'61269182':{'en': 'Kyeamba'},
'61269183':{'en': 'Lockhart'},
'61269184':{'en': 'Bidgeemia'},
'61269185':{'en': 'Boree Creek'},
'61269186':{'en': 'Coolamon'},
'61269187':{'en': 'Burcher'},
'61269188':{'en': 'Kikoira'},
'61269189':{'en': 'Marsden'},
'61269190':{'en': 'Tallimba'},
'61269191':{'en': 'Tullibigeal'},
'61269192':{'en': 'Ungarie'},
'61269193':{'en': 'Warralonga'},
'61269194':{'en': 'Weethalle'},
'61269195':{'en': 'West Wyalong'},
'61269196':{'en': 'Alleena'},
'61269197':{'en': 'Weethalle'},
'612691980':{'en': 'Rannock'},
'612691981':{'en': 'Rannock'},
'612691982':{'en': 'Rannock'},
'612691983':{'en': 'Rannock'},
'612691984':{'en': 'Wagga Wagga'},
'612691985':{'en': 'Rannock/Wagga Wagga'},
'612691986':{'en': 'Rannock/Wagga Wagga'},
'612691987':{'en': 'Rannock'},
'612691988':{'en': 'Rannock/Wagga Wagga'},
'612691989':{'en': 'Rannock/Wagga Wagga'},
'61269199':{'en': 'Tarcutta'},
'61269200':{'en': 'The Rock'},
'61269201':{'en': 'The Rock'},
'61269202':{'en': 'The Rock'},
'61269203':{'en': 'The Rock'},
'61269204':{'en': 'Lockhart'},
'61269205':{'en': 'Lockhart'},
'61269206':{'en': 'Milbrulong'},
'61269207':{'en': 'Bidgeemia'},
'61269208':{'en': 'Urana'},
'61269209':{'en': 'Urana'},
'6126921':{'en': 'Wagga Wagga'},
'6126922':{'en': 'Wagga Wagga'},
'6126923':{'en': 'Wagga Wagga'},
'6126924':{'en': 'Junee'},
'61269247':{'en': 'Junee Reefs'},
'6126925':{'en': 'Wagga Wagga'},
'6126926':{'en': 'Wagga Wagga'},
'61269270':{'en': 'Wagga Wagga'},
'61269271':{'en': 'Boree Creek'},
'61269272':{'en': 'Coolamon'},
'61269273':{'en': 'Coolamon'},
'61269274':{'en': 'Coolamon'},
'61269275':{'en': 'Winchendon Vale'},
'61269276':{'en': 'Ganmain'},
'61269277':{'en': 'Ganmain'},
'61269278':{'en': 'Rannock'},
'61269279':{'en': 'Cowabbie'},
'61269280':{'en': 'Kyeamba'},
'61269281':{'en': 'Kyeamba'},
'61269282':{'en': 'Currawarna'},
'61269283':{'en': 'Galore'},
'61269284':{'en': 'Wantabadgery'},
'61269285':{'en': 'Mangoplah'},
'61269286':{'en': 'Mangoplah'},
'61269287':{'en': 'Tarcutta'},
'61269288':{'en': 'Tarcutta'},
'61269289':{'en': 'Humula'},
'6126929':{'en': 'Henty'},
'61269290':{'en': 'Currawarna'},
'61269291':{'en': 'Currawarna'},
'61269295':{'en': 'Lockhart'},
'612693':{'en': 'Wagga Wagga'},
'61269300':{'en': '<NAME>'},
'61269301':{'en': 'Coolamon'},
'61269302':{'en': 'Cowabbie'},
'61269303':{'en': 'Galore'},
'61269304':{'en': 'Ganmain'},
'61269305':{'en': 'Junee'},
'61269306':{'en': 'Junee'},
'61269307':{'en': 'Lockhart'},
'61269308':{'en': 'Rannock'},
'61269309':{'en': 'Urana'},
'61269340':{'en': 'Adelong'},
'61269341':{'en': 'Batlow'},
'61269342':{'en': 'Springdale'},
'61269343':{'en': 'Bidgeemia'},
'61269344':{'en': 'Coolamon'},
'61269345':{'en': 'Ganmain'},
'61269346':{'en': 'Junee'},
'61269347':{'en': 'Lockhart'},
'61269348':{'en': '<NAME>'},
'61269349':{'en': 'Wantabadgery'},
'61269350':{'en': 'Bethungra'},
'61269351':{'en': 'Burra'},
'61269352':{'en': 'Carabost'},
'61269353':{'en': 'Coolac'},
'61269354':{'en': 'Cootamundra'},
'61269355':{'en': 'Gundagai'},
'61269356':{'en': 'Mannus'},
'61269357':{'en': 'Nangus'},
'61269358':{'en': 'Stockinbingal'},
'61269359':{'en': 'Talbingo'},
'61269380':{'en': 'Kyeamba'},
'61269387':{'en': 'Henty'},
'61269388':{'en': 'Humula'},
'61269389':{'en': '<NAME>'},
'61269390':{'en': 'Currawarna'},
'61269391':{'en': 'Mangoplah'},
'61269392':{'en': 'Milbrulong'},
'61269393':{'en': 'Tarcutta'},
'61269394':{'en': 'The Rock'},
'61269396':{'en': 'Wantabadgery'},
'61269398':{'en': 'Bidgeemia'},
'61269399':{'en': 'Boree Creek'},
'61269400':{'en': 'Tooma'},
'61269401':{'en': 'Cootamundra'},
'61269402':{'en': 'Cootamundra'},
'61269403':{'en': 'Bethungra'},
'61269404':{'en': 'Stockinbingal'},
'61269405':{'en': 'Wallendbeen'},
'61269406':{'en': 'Gundagai'},
'61269407':{'en': 'Burra'},
'61269408':{'en': 'Coolac'},
'61269409':{'en': 'Nangus'},
'61269410':{'en': 'Mannus'},
'61269411':{'en': 'Tumut'},
'61269412':{'en': 'Tumut'},
'61269413':{'en': 'Adelong'},
'61269414':{'en': 'Batlow'},
'61269415':{'en': 'Talbingo'},
'61269416':{'en': 'Tumorrama'},
'61269417':{'en': 'Yaven Creek'},
'61269418':{'en': 'Tumbarumba'},
'61269419':{'en': 'Carabost'},
'6126942':{'en': 'Cootamundra'},
'61269430':{'en': 'Stockinbingal'},
'61269431':{'en': 'Stockinbingal'},
'61269432':{'en': 'Wallendbeen'},
'61269433':{'en': 'Cootamundra'},
'61269434':{'en': 'Bethungra'},
'61269435':{'en': 'Bethungra'},
'61269436':{'en': 'Coolac'},
'61269437':{'en': 'Coolac'},
'61269438':{'en': 'Coolac'},
'61269439':{'en': 'Coolac'},
'6126944':{'en': 'Gundagai'},
'61269447':{'en': | |
<filename>skrf/media/media.py
'''
.. module:: skrf.media.media
========================================
media (:mod:`skrf.media.media`)
========================================
Contains Media class.
'''
import warnings
import numpy as npy
from scipy import stats
from scipy.constants import c, inch, mil
from ..frequency import Frequency
from ..network import Network, connect
from .. import tlineFunctions as tf
from .. import mathFunctions as mf
from ..mathFunctions import ALMOST_ZERO
class Media(object):
'''
The base-class for all transmission line mediums.
The :class:`Media` object provides generic methods to produce :class:`~skrf.network.Network`'s for any transmision line medium, such as :func:`line` and :func:`delay_short`.
The initializer for this class has flexible argument types. This
allows for the important attributes of the :class:`Media` object
to be dynamic. For example, if a Media object's propagation constant
is a function of some attribute of that object, say `conductor_width`,
then the propagation constant will change when that attribute
changes. See :func:`__init__` for details.
The network creation methods build off of each other. For example,
the specicial load cases, suc as :func:`short` and :func:`open` call
:func:`load` with given arguments for Gamma0, and the delay_ and
shunt_ functions call :func:`line` and :func:`shunt` respectively.
This minimizes re-implementation.
Most methods initialize the :class:`~skrf.network.Network` by
calling :func:`match` to create a 'blank'
:class:`~skrf.network.Network`, and then fill in the s-matrix.
'''
def __init__(self, frequency, propagation_constant,
characteristic_impedance, z0=None):
'''
The Media initializer.
This initializer has flexible argument types. The parameters
`propagation_constant`, `characterisitc_impedance` and `z0` can
all be either static or dynamic. This is achieved by allowing
those arguments to be either:
* functions which take no arguments or
* values (numbers or arrays)
In the case where the media's propagation constant may change
after initialization, because you adjusted a parameter of the
media, then passing the propagation_constant as a function
allows it to change when the media's parameters do.
Parameters
--------------
frequency : :class:`~skrf.frequency.Frequency` object
frequency band of this transmission line medium
propagation_constant : number, array-like, or a function
propagation constant for the medium.
characteristic_impedance : number,array-like, or a function
characteristic impedance of transmission line medium.
z0 : number, array-like, or a function
the port impedance for media , IF its different
from the characterisitc impedance of the transmission
line medium (None) [a number].
if z0= None then will set to characterisitc_impedance
See Also
---------
:func:`from_csv` : function to create a
Media object from a csv file containing gamma/z0
Notes
------
`propagation_constant` must adhere to the following convention,
* positive real(gamma) = attenuation
* positive imag(gamma) = forward propagation
the z0 parameter is needed in some cases. For example, the
:class:`~skrf.media.rectangularWaveguide.RectangularWaveguide`
is an example where you may need this, because the
characteristic impedance is frequency dependent, but the
touchstone's created by most VNA's have z0=1
'''
self.frequency = frequency.copy()
self.propagation_constant = propagation_constant
self.characteristic_impedance = characteristic_impedance
if z0 is None:
z0 = characteristic_impedance
self.z0 = z0
# convinience names
self.delay = self.line
def __getstate__(self):
'''
method needed to allow for pickling
'''
d = self.__dict__.copy()
del d['delay'] # cant pickle instance methods
return(d)
#return {k: self.__dict__[k] for k in \
# ['frequency','_characteristic_impedance','_propagation_constant','_z0']}
def __eq__(self,other):
'''
test for numerical equality (up to skrf.mathFunctions.ALMOST_ZERO)
'''
if self.frequency != other.frequency:
return False
if max(abs(self.characteristic_impedance - \
other.characteristic_impedance)) > ALMOST_ZERO:
return False
if max(abs(self.propagation_constant - \
other.propagation_constant)) > ALMOST_ZERO:
return False
if max(abs(self.z0 - other.z0)) > ALMOST_ZERO:
return False
return True
def __len__(self):
'''
length of frequency axis
'''
return len(frequency)
## Properties
# note these are made so that a Media type can be constructed with
# propagation_constant, characteristic_impedance, and z0 either as:
# dynamic properties (if they pass a function)
# static ( if they pass values)
@property
def propagation_constant(self):
'''
Propagation constant
The propagation constant can be either a number, array-like, or
a function. If it is a function is must take no arguments. The
reason to make it a function is if you want the propagation
constant to be dynamic, meaning changing with some attribute
of the media. See :func:`__init__` for more explanation.
Returns
---------
propagation_constant : :class:`numpy.ndarray`
complex propagation constant for this media
Notes
------
`propagation_constant` must adhere to the following convention,
* positive real(propagation_constant) = attenuation
* positive imag(propagation_constant) = forward propagation
'''
try:
return self._propagation_constant()
except(TypeError):
# _propagation_constant is not a function, so it is
# either a number or a vector. do some
# shape checking and vectorize it if its a number
try:
if len(self._propagation_constant) != \
len(self.frequency):
raise(IndexError('frequency and propagation_constant impedance have different lengths '))
except(TypeError):
# _propagation_constant has no len, must be a
# number, return a vectorized copy
return self._propagation_constant *\
npy.ones(len(self.frequency))
return self._propagation_constant
@propagation_constant.setter
def propagation_constant(self, new_propagation_constant):
self._propagation_constant = new_propagation_constant
gamma = propagation_constant
@property
def characteristic_impedance(self):
'''
Characterisitc impedance
The characteristic_impedance can be either a number, array-like, or
a function. If it is a function is must take no arguments. The
reason to make it a function is if you want the characterisitc
impedance to be dynamic, meaning changing with some attribute
of the media. See :func:`__init__` for more explanation.
Returns
----------
characteristic_impedance : :class:`numpy.ndarray`
'''
try:
return self._characteristic_impedance()
except(TypeError):
# _characteristic_impedance is not a function, so it is
# either a number or a vector. do some
# shape checking and vectorize it if its a number
try:
if len(self._characteristic_impedance) != \
len(self.frequency):
raise(IndexError('frequency and characteristic_impedance have different lengths '))
except(TypeError):
# _characteristic_impedance has no len, must be a
# number, return a vectorized copy
return self._characteristic_impedance *\
npy.ones(len(self.frequency))
return self._characteristic_impedance
@characteristic_impedance.setter
def characteristic_impedance(self, new_characteristic_impedance):
self._characteristic_impedance = new_characteristic_impedance
Z0 = characteristic_impedance
@property
def z0(self):
'''
Port Impedance
The port impedance is usually equal to the
:attr:`characteristic_impedance`. Therefore, if the port
impedance is `None` then this will return
:attr:`characteristic_impedance`.
However, in some cases such as rectangular waveguide, the port
impedance is traditionally set to 1 (normalized). In such a case
this property may be used.
The Port Impedance can be either a number, array-like, or
a function. If it is a function is must take no arguments. The
reason to make it a function is if you want the Port Impedance
to be dynamic, meaning changing with some attribute
of the media. See :func:`__init__` for more explanation.
Returns
----------
port_impedance : :class:`numpy.ndarray`
the media's port impedance
'''
try:
result = self._z0()
return result
except(TypeError):
try:
if len(self._z0) != len(self.characteristic_impedance):
raise(IndexError('z0 and characterisitc impedance have different shapes '))
except(TypeError):
# z0 has no len, must be a number, so vectorize it
return self._z0 *npy.ones(len(self.characteristic_impedance))
return self._z0
@z0.setter
def z0(self, new_z0):
self._z0 = new_z0
portz0 = z0
@property
def v_p(self):
'''
Complex phase velocity (in m/s)
.. math::
j \cdot \\omega / \\gamma
Notes
-------
The `j` is used so that real phase velocity corresponds to
propagation
where:
* :math:`\\omega` is angular frequency (rad/s),
* :math:`\\gamma` is complex propagation constant (rad/m)
See Also
-----------
propgation_constant
'''
return 1j*(self.frequency.w/self.propagation_constant)
@property
def v_g(self):
'''
Complex group velocity (in m/s)
.. math::
j \cdot d \\omega / d \\gamma
where:
* :math:`\\omega` is angular frequency (rad/s),
* :math:`\\gamma` is complex propagation constant (rad/m)
See Also
-----------
propgation_constant
v_p
'''
dw = npy.diff(self.frequency.w)
dk = npy.diff(self.propagation_constant)
return 1j*dw/dk
## Other Functions
def theta_2_d(self,theta,deg=True):
'''
Converts electrical length to physical distance.
The given electrical length is to be at the center frequency.
Parameters
----------
theta : number
electrical length, at band center (see deg for unit)
deg : Boolean
is theta in degrees?
Returns
--------
d : number
physical distance in meters
'''
if deg == True:
theta = mf.degree_2_radian(theta)
gamma = self.propagation_constant
return 1.0*theta/npy.imag(gamma[gamma.size/2])
def electrical_length(self, d,deg=False):
'''
calculates the electrical length for a given distance, at
the center frequency.
Parameters
----------
d: number or array-like
delay distance, in meters
deg: Boolean
return | |
#!/usr/bin/env python
from __future__ import print_function
import os
import os.path
import threading
import sentinel2_metadata
bandList = [
'B01',
'B02',
'B03',
'B04',
'B05',
'B06',
'B07',
'B08',
'B8A',
'B09',
'B10',
'B11',
'B12']
bandList_10m = ['B02', 'B03', 'B04', 'B08']
bandList_20m = ['B05', 'B06', 'B07', 'B8A', 'B11', 'B12']
bandList_60m = ['B01', 'B09', 'B10']
# ==================================================
# Sentinel2 Specs
# ==================================================
# all
# +=BANDS==============================+=Wavelength-(um)====+=Resolution-(m)===+
# | (0) BAND1 - Aerosols | 0.443 | 60 |
# | (1) BAND2 - Blue | 0.490 | 10 |
# | (2) BAND3 - Green | 0.560 | 10 |
# | (3) BAND4 - Red | 0.665 | 10 |
# | (4) BAND5 - narrow1 (red-edge) | 0.705 | 20 |
# | (5) BAND6 - narrow2 (red-edge) | 0.740 | 20 |
# | (6) BAND7 - narrow3 (red-edge) | 0.783 | 20 |
# | (7) BAND8 - NIR | 0.842 | 10 |
# | (8) BAND8b- narrow4 (red-edge) | 0.865 | 20 |
# | (9) BAND9 - Water Vapour | 0.945 | 60 |
# | (10)BAND10- Cirrus | 1.380 | 60 |
# | (11)BAND11- SWIR1 | 1.610 | 20 |
# | (12)BAND12- SWIR2 | 2.190 | 20 |
# +====================================+====================+==================+
# 10M bands
# +==BAND=#====+==ORIGNAL=BAND==+==CW=(nm)===+==SW=(nm)===+==RES=(m)===+
# | (0)1 | B02 | 490 | 65 | 10 |
# | (1)2 | B03 | 560 | 35 | 10 |
# | (2)3 | B04 | 665 | 30 | 10 |
# | (3)4 | B08 | 842 | 115 | 10 |
# +============+================+============+============+============+
# 20M bands
# +==BAND=#====+==ORIGNAL=BAND==+==CW=(nm)===+==SW=(nm)===+==RES=(m)===+
# | (0)1 | B05 | 705 | 15 | 20 |
# | (1)2 | B06 | 740 | 15 | 20 |
# | (2)3 | B07 | 783 | 20 | 20 |
# | (3)4 | B8A | 865 | 20 | 20 |
# | (4)5 | B11 | 1610 | 90 | 20 |
# | (5)6 | B12 | 2190 | 180 | 20 |
# +============+================+============+============+============+
# 60M bands
# +==BAND=#====+==ORIGNAL=BAND==+==CW=(nm)===+==SW=(nm)===+==RES=(m)===+
# | (0)1 | B01 | 443 | 20 | 60 |
# | (1)2 | B09 | 945 | 20 | 60 |
# | (2)3 | B10 | 1375 | 30 | 60 |
# +============+================+============+============+============+
def process_saf(context, path, dir=os.getcwd()):
context["aot_window_size"] = 250
#
# find the tiles for this Sentinel 2 product. The French, they call it "Granules"
#
granules = sentinel2_metadata.parse(path, bandList)
# -------------------------------------------------------------------------
# WORKFLOW Sentinel2: OPERA
# -------------------------------------------------------------------------
for granule in granules:
print("processing granule " + granule.base_name)
thread_context = context.copy_self()
sentinel2_granule(granule, thread_context, dir)
def sentinel2_granule(granule, thread_context, dir):
# set defaults from command line
thread_context["max_stages"] = 25
if thread_context["aot"] == "false":
thread_context["max_stages"] -= 3
if thread_context["simec"] == "false":
thread_context["max_stages"] -= 5
if thread_context["watervapor"] == "false":
thread_context["max_stages"] -= 3
if thread_context["keep_intermediate"] == "false":
thread_context["max_stages"] -= 1
input_base_name = granule.base_name.replace(".", "_")
print(input_base_name)
thread_context["name"] = input_base_name
thread_context["prefix_input"] = input_base_name.replace("\\", "/")
granule_working_dir = os.path.join(dir, thread_context["name"])
thread_context["prefix"] = granule_working_dir + "/" + input_base_name
if not os.path.isdir(granule_working_dir):
os.makedirs(granule_working_dir)
thread_context.write_config(granule_working_dir)
# radiance and reflectance filenames
# 60m
radiance_60m = dict()
reflectance_60m = dict()
# 20m
radiance_20m = dict()
reflectance_20m = dict()
# 10m
radiance_10m = dict()
reflectance_10m = dict()
dem_list = dict()
# =========================================================================
thread_context.enter_stage("Convert to scaled radiance")
# =========================================================================
for band in granule.band_list:
reflectance_name = thread_context["prefix"] + \
"_ACRUNNER_Scaled_Reflectance_" + band.name + ".tif"
radiance_name = thread_context["prefix"] + "_ACRUNNER_TOA_Radiance_" + band.name + ".tif"
minimum = 5
if band.name == "B10":
minimum = 0
thread_context.invoke_ac_runner_mine(
"[scale]\n"
"scale.input.location=" + band.location + "\n" +
"scale.output.location=" + reflectance_name + "\n" +
"scale.gain=" + str(band.get_gain()) + "\n" +
"scale.offset=" + str(band.get_offset()) + "\n" +
"scale.invalid.minimum=" + str(minimum) + "\n"
"scale.zero.invalid=true\n"
)
thread_context.invoke_ac_runner_mine(
"[reflectance]\n" +
"reflectance.input.radiance.location=" + reflectance_name + "\n" +
"reflectance.image.dayofyear=94\n" +
"reflectance.bands=0\n" +
"reflectance.lut.bands=" + str(band.get_id()) + "\n"
"reflectance.destination.location=" + radiance_name + "\n" +
"reflectance.override.sza=" + str(granule.mean_solar_zenith) + "\n" +
"reflectance.solarirradiance.location={ac_solar_irradiance}\n" +
"reflectance.response.curves.location={ac_response_curves_all}\n"
"reflectance.invert=true\n"
)
if band.resolution == 60:
reflectance_60m[band.name] = reflectance_name
radiance_60m[band.name] = radiance_name
if band.resolution == 20:
reflectance_20m[band.name] = reflectance_name
radiance_20m[band.name] = radiance_name
if band.resolution == 10:
reflectance_10m[band.name] = reflectance_name
radiance_10m[band.name] = radiance_name
# =========================================================================
thread_context.enter_stage("Generate DEM")
# =========================================================================
# Generate a DEM that matches the size of the input images. A DEM will be
# generated for the 10M, 20M and 60M bands.
dem_list['60'] = thread_context["prefix"] + "_DEM_60M.tif"
thread_context.invoke_ac_runner_mine(
"[dem]\n" +
"dem.reference.location=" + reflectance_60m['B01'] + "\n"
"dem.input.location={dem_world}\n" +
"dem.output.location=" +
thread_context["prefix"] + "_DEM_60M.tif\n"
"dem.conversion.factor=0.001"
)
dem_list['20'] = thread_context["prefix"] + "_DEM_20M.tif"
thread_context.invoke_ac_runner_mine(
"[dem]\n" +
"dem.reference.location=" + reflectance_20m['B05'] + "\n"
"dem.input.location={dem_world}\n" +
"dem.output.location=" +
thread_context["prefix"] + "_DEM_20M.tif\n"
"dem.conversion.factor=0.001"
)
dem_list['10'] = thread_context["prefix"] + "_DEM_10M.tif"
thread_context.invoke_ac_runner_mine(
"[dem]\n" +
"dem.reference.location=" + reflectance_10m['B02'] + "\n"
"dem.input.location={dem_world}\n" +
"dem.output.location=" +
thread_context["prefix"] + "_DEM_10M.tif\n"
"dem.conversion.factor=0.001"
)
# =========================================================================
thread_context.enter_stage("Resize images to 60m")
# =========================================================================
# 20 -> 60
reflectance_60M_ALL = reflectance_60m
radiance_60M_ALL = radiance_60m
# reflectances
input_location_list = ""
output_location_list = ""
for key in reflectance_20m.keys():
input_location_list = input_location_list + reflectance_20m[key] + " "
output_location = thread_context["prefix"] + "_Reflectance_" + key + "_60M.tif "
reflectance_60M_ALL[key] = output_location
output_location_list += output_location
for key in reflectance_10m.keys():
input_location_list = input_location_list + reflectance_10m[key] + " "
output_location = thread_context["prefix"] + "_Reflectance_" + key + "_60M.tif "
reflectance_60M_ALL[key] = output_location
output_location_list += output_location
for key in radiance_20m.keys():
input_location_list = input_location_list + radiance_20m[key] + " "
output_location = thread_context["prefix"] + "_Radiance_" + key + "_60M.tif "
radiance_60M_ALL[key] = output_location
output_location_list += output_location
for key in radiance_10m.keys():
input_location_list = input_location_list + radiance_10m[key] + " "
output_location = thread_context["prefix"] + "_Radiance_" + key + "_60M.tif "
radiance_60M_ALL[key] = output_location
output_location_list += output_location
thread_context.invoke_ac_runner_mine(
"[resize nearest]\n" +
"resize.image.location=" + input_location_list + "\n" +
"resize.reference.location=" + reflectance_60m['B01'] + "\n" +
"resize.destination.location=" + output_location_list + "\n"
)
# =========================================================================
thread_context.enter_stage("Single to MultiBand Radiance - 60M ALL")
# =========================================================================
radiance_mb = ""
radiance_output_multiband_60M_ALL = thread_context["prefix"] + "_Radiance_60M_ALL.tif"
for name in bandList:
radiance_mb += radiance_60M_ALL[name] + " "
thread_context.invoke_ac_runner_mine(
"[singletomulti fast]\n" +
"multiband.input.images=" + radiance_mb + "\n"
"multiband.output.image=" + radiance_output_multiband_60M_ALL + "\n"
)
# =========================================================================
thread_context.enter_stage("Single to MultiBand Radiance - 20M")
# =========================================================================
radiance_mb = ""
radiance_output_multiband_20M = thread_context["prefix"] + "_Radiance_20M_ALL.tif"
for name in bandList_20m:
radiance_mb += radiance_20m[name] + " "
thread_context.invoke_ac_runner_mine(
"[singletomulti fast]\n" +
"multiband.input.images=" + radiance_mb + "\n"
"multiband.output.image=" + radiance_output_multiband_20M + "\n"
)
# =========================================================================
thread_context.enter_stage("Single to MultiBand Radiance - 10M")
# =========================================================================
radiance_mb = ""
radiance_output_multiband_10M = thread_context["prefix"] + "_Radiance_10M_ALL.tif"
for name in bandList_10m:
radiance_mb += radiance_10m[name] + " "
thread_context.invoke_ac_runner_mine(
"[singletomulti fast]\n" +
"multiband.input.images=" + radiance_mb + "\n"
"multiband.output.image=" + radiance_output_multiband_10M + "\n"
)
# =========================================================================
thread_context.enter_stage("Single to MultiBand Reflectance - 60M ALL")
# =========================================================================
reflectance_mb = ""
reflectance_output_multiband_60M_ALL = thread_context["prefix"] + "_Reflectance_60M_ALL.tif"
for name in bandList:
reflectance_mb += reflectance_60M_ALL[name] + " "
thread_context.invoke_ac_runner_mine(
"[singletomulti fast]\n" +
"multiband.input.images=" + reflectance_mb + "\n"
"multiband.output.image=" + reflectance_output_multiband_60M_ALL + "\n"
)
# =========================================================================
thread_context.enter_stage("Cloud detection - 60M")
# =========================================================================
# ALL / 60M
# Do cloud detection on the tile (all bands required)
# Use the blue, NIR and SWIR bands for cloud detection (60M resolution)
# options :
lowband_id = bandList.index(str(thread_context["low_band"]))
cloud_low_id_string = "cloud.low.id=" + str(lowband_id) + "\n"
cloud_low_threshold_string = "cloud.low.trh=" + str(thread_context["low_threshold"]) + "\n"
average_threshold_string = "cloud.avg.trh=" + str(thread_context["average_threshold"]) + "\n"
cirrus_threshold = ""
cirrus_band = ""
if thread_context["cirrus"] == "true":
cirrus_threshold = "cloud.cirrus.threshold=" + thread_context["cirrus_threshold"] + "\n"
cirrus_band = "cloud.cirrus.band=10\n"
cloud_mask_location_60m = thread_context["prefix"] + "_CLOUDMASK_60M.tif"
thread_context.invoke_ac_runner_mine(
"[cloud detection]\n" +
"cloud.input.location=" + reflectance_output_multiband_60M_ALL + "\n" +
cloud_low_id_string +
"cloud.high.id=8\n" +
average_threshold_string +
cloud_low_threshold_string +
"cloud.mask.location=" + cloud_mask_location_60m + "\n" +
"cloud.visible.bands=0 1 2 3 4 5 6 7 8\n" +
cirrus_threshold +
cirrus_band
)
# ==============================================================================
thread_context.enter_stage("Water detection")
# ==============================================================================
water_mask_location_60m = thread_context["prefix"] + "_WATERMASK_60M.tif"
water_mask_location_20m = thread_context["prefix"] + "_WATERMASK_20M.tif"
water_mask_location_10m = thread_context["prefix"] + "_WATERMASK_10M.tif"
band_name = str(thread_context["water_band"])
if band_name in bandList_20m:
water_mask_location = water_mask_location_20m
water_input_location = reflectance_20m[band_name]
water_orig_resolution = 20
elif band_name in bandList_10m:
water_mask_location = water_mask_location_10m
water_input_location = reflectance_10m[band_name]
water_orig_resolution = 10
else:
raise Exception("water detection band resolution should have either 10m or 20m resolution")
water_band_string = "water.nir.band=" + str(0) + "\n"
water_threshold = "water.treshold=" + thread_context["water_threshold"] + "\n"
thread_context.invoke_ac_runner_mine(
"[water detection]\n"
"water.input.location=" + water_input_location + "\n" +
water_band_string +
water_threshold +
"water.mask.location=" + water_mask_location + "\n"
)
thread_context.invoke_ac_runner_mine(
"[resize mask]\n" +
"resize.image.location=" + water_mask_location + "\n" | |
import itertools as it
import math
import os
from enum import IntEnum, auto
import numpy as np
import pygame as pg
import pymunk as pm
from gym import spaces
from gym.utils import EzPickle, seeding
from pymunk import Vec2d
from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector, wrappers
from pettingzoo.utils.conversions import parallel_wrapper_fn
from . import constants as const
from . import utils
from .manual_control import manual_control
class CollisionTypes(IntEnum):
PROSPECTOR = auto()
BOUNDARY = auto()
WATER = auto()
BANK = auto()
GOLD = auto()
BANKER = auto()
class Prospector(pg.sprite.Sprite):
def __init__(self, pos, space, num, *sprite_groups):
super().__init__(sprite_groups)
self.image = utils.load_image(["prospector.png"])
self.id = num
self.rect = self.image.get_rect(center=pos)
self.orig_image = self.image.copy()
# Create the physics body and shape of this object.
moment = pm.moment_for_circle(1, 0, const.AGENT_RADIUS)
self.body = pm.Body(1, moment, body_type=pm.Body.DYNAMIC)
self.body.nugget = None
self.body.sprite_type = "prospector"
self.shape = pm.Circle(self.body, const.AGENT_RADIUS)
self.shape.elasticity = 0.0
self.shape.collision_type = CollisionTypes.PROSPECTOR
self.body.position = utils.flipy(pos)
# Add them to the Pymunk space.
self.space = space
self.space.add(self.body, self.shape)
def reset(self, pos):
self.body.angle = 0
self.body.angular_velocity = 0
self.image = pg.transform.rotozoom(self.orig_image, 0, 1)
self.rect = self.image.get_rect(center=pos)
self.body.position = utils.flipy(pos)
self.body.velocity = Vec2d(0.0, 0.0)
self.body.force = Vec2d(0.0, 0.0)
self.body.nugget = None
@property
def center(self):
return self.rect.center
def update(self, action):
# These actions are performed with the agent's angle in mind
# forward/backward action
y_vel = action[0] * const.PROSPECTOR_SPEED
# left/right action
x_vel = action[1] * const.PROSPECTOR_SPEED
delta_angle = action[2] * const.MAX_SPRITE_ROTATION
self.body.angle += delta_angle
self.body.angular_velocity = 0
move = pm.Vec2d(x_vel, y_vel)
self.body.apply_force_at_local_point(move, point=(0, 0))
def synchronize_center(self):
self.rect.center = utils.flipy(self.body.position)
self.image = pg.transform.rotate(self.orig_image, math.degrees(self.body.angle))
self.rect = self.image.get_rect(center=self.rect.center)
def update_gold(self):
if self.body.nugget is not None:
self.body.nugget.update(self.body.position, self.body.angle, False)
def convert_img(self):
self.image = self.image.convert_alpha()
def __str__(self):
return f"prospector_{self.id}"
def __repr__(self):
return self.__str__()
class Banker(pg.sprite.Sprite):
def __init__(self, pos, space, num, *sprite_groups):
super().__init__(sprite_groups)
self.image = utils.load_image(["bankers", f"{num}.png"])
self.id = num
self.rect = self.image.get_rect(center=pos)
self.orig_image = self.image.copy()
moment = pm.moment_for_circle(1, 0, const.AGENT_RADIUS)
self.body = pm.Body(1, moment, body_type=pm.Body.DYNAMIC)
self.body.nugget = None
self.body.sprite_type = "banker"
self.shape = pm.Circle(self.body, const.AGENT_RADIUS)
self.shape.collision_type = CollisionTypes.BANKER
self.body.position = utils.flipy(pos)
# Add them to the Pymunk space.
self.space = space
self.space.add(self.body, self.shape)
def reset(self, pos):
self.body.angle = 0
self.image = pg.transform.rotozoom(self.orig_image, 0, 1)
self.rect = self.image.get_rect(center=pos)
self.body.position = utils.flipy(pos)
self.body.velocity = Vec2d(0.0, 0.0)
self.body.nugget = None
@property
def center(self):
return self.rect.center
def update(self, action):
# up/down action
y_vel = action[0] * const.BANKER_SPEED
# left/right action
x_vel = action[1] * const.BANKER_SPEED
# Subtract math.pi / 2 because sprite starts off with math.pi / 2 rotated
angle_radians = math.atan2(y_vel, x_vel) - (math.pi / 2)
# Angle is determined only by current trajectory.
if not all(a == 0 for a in action):
self.body.angle = angle_radians
self.body.angular_velocity = 0
# rotate movement backwards with a magnitude of self.body.angle
# so that sprite moves forward in chosen direction
move = pm.Vec2d(x_vel, y_vel).rotated(-self.body.angle)
self.body.apply_force_at_local_point(move, point=(0, 0))
def synchronize_center(self):
self.rect.center = utils.flipy(self.body.position)
self.image = pg.transform.rotate(self.orig_image, math.degrees(self.body.angle))
self.rect = self.image.get_rect(center=self.rect.center)
def update_gold(self):
if self.body.nugget is not None:
self.body.nugget.update(
self.body.position, self.body.angle + (math.pi / 2), True
)
def convert_img(self):
self.image = self.image.convert_alpha()
def __str__(self):
return f"banker_{self.id}"
def __repr__(self):
return self.__str__()
class Fence(pg.sprite.Sprite):
def __init__(self, w_type, sprite_pos, body_pos, verts, space, *sprite_groups):
super().__init__(sprite_groups)
self.rects = []
if w_type == "top":
self.tile = utils.load_image(["fence_horiz_tile.png"])
size = self.tile.get_rect().size
x = 15
y = 0
while x <= 1230:
rect = pg.Rect(x, y, *size)
self.rects.append(rect)
x += 50
elif w_type in ["right", "left"]:
self.tile = utils.load_image(["fence_vert_tile.png"])
size = self.tile.get_rect().size
x = 6 if w_type == "left" else 1265
y = 0
while y <= const.VERT_FENCE_HEIGHT:
rect = pg.Rect(x, y, *size)
self.rects.append(rect)
y += 33
else:
raise ValueError("Fence image not found! Check the spelling")
self.body = pm.Body(body_type=pm.Body.STATIC)
# Transform pygame vertices to fit Pymunk body
invert_verts = utils.invert_y(verts)
self.shape = pm.Poly(self.body, invert_verts)
self.shape.elasticity = 0.0
self.shape.collision_type = CollisionTypes.BOUNDARY
self.body.position = utils.flipy(body_pos)
space.add(self.body, self.shape)
def full_draw(self, screen):
for rect in self.rects:
screen.blit(self.tile, rect)
def convert_img(self):
self.tile = self.tile.convert_alpha()
class Bank(pg.sprite.Sprite):
def __init__(self, pos, verts, space, *sprite_groups):
super().__init__(sprite_groups)
self.image = utils.load_image(["bank.png"])
self.rect = self.image.get_rect(topleft=pos)
self.body = pm.Body(body_type=pm.Body.STATIC)
invert_verts = utils.invert_y(verts)
self.shape = pm.Poly(self.body, invert_verts)
self.shape.collision_type = CollisionTypes.BANK
self.body.position = utils.flipy(pos)
self.space = space
self.space.add(self.body, self.shape)
def convert_img(self):
self.image = self.image.convert_alpha()
class Gold(pg.sprite.Sprite):
ids = it.count(0)
def __init__(self, pos, body, space, *sprite_groups):
super().__init__(sprite_groups)
self.id = next(self.ids)
self.image = utils.load_image(["gold.png"])
self.orig_image = self.image
self.rect = self.image.get_rect()
self.moment = pm.moment_for_circle(1, 0, const.GOLD_RADIUS)
self.body = pm.Body(1, self.moment, body_type=pm.Body.KINEMATIC)
self.body.position = body.position
self.shape = pm.Circle(self.body, const.GOLD_RADIUS)
self.shape.collision_type = CollisionTypes.GOLD
# only triggers collision callbacks, doesn't create real collisions
self.shape.sensor = True
self.shape.id = self.id
self.space = space
self.space.add(self.body, self.shape)
self.initial_angle = body.angle - Vec2d(0, -1).angle
self.parent_body = body
def update(self, pos, angle, banker: bool):
if banker:
new_angle = angle
else:
new_angle = angle - self.initial_angle
new_pos = pos + Vec2d(const.AGENT_RADIUS + 9, 0).rotated(new_angle)
self.body.position = new_pos
self.body.angular_velocity = 0
self.rect.center = utils.flipy(self.body.position)
self.image = pg.transform.rotozoom(
self.orig_image, math.degrees(self.body.angle), 1
)
self.rect = self.image.get_rect(center=self.rect.center)
def convert_img(self):
self.image = self.image.convert_alpha()
class Water:
def __init__(self, pos, verts, space, rng):
self.num_cols = math.ceil(const.SCREEN_WIDTH / const.TILE_SIZE)
self.num_rows = math.ceil(const.WATER_HEIGHT / const.TILE_SIZE)
self.top_tile = utils.load_image(["river_to_sand_tile.png"])
self.tile = utils.load_image(["river_tile.png"])
self.debris_tile = utils.load_image(["debris", "seaweed_water.png"])
tile_size = self.tile.get_size()
self.rects = []
for row in range(self.num_rows):
new_row = []
for col in range(self.num_cols):
rect = pg.Rect(
col * const.TILE_SIZE, pos[1] + (row * const.TILE_SIZE), *tile_size
)
new_row.append(rect)
self.rects.append(new_row)
self.body = pm.Body(body_type=pm.Body.STATIC)
# Transform pygame vertices to fit Pymunk body
invert_verts = utils.invert_y(verts)
self.shape = pm.Poly(self.body, invert_verts)
self.shape.collision_type = CollisionTypes.WATER
self.body.position = utils.flipy(pos)
self.space = space
self.space.add(self.body, self.shape)
def generate_debris(self, rng):
self.debris = []
for col in range(1, self.num_cols - 1, 3):
if rng.random_sample() >= 0.5:
y = rng.integers(0, 2)
x = col + rng.integers(0, 3)
rect = self.rects[y][x].copy()
rect.x += 3
rect.y += 9
self.debris.append([self.debris_tile, rect])
def full_draw(self, screen):
for rect in self.rects[0]:
screen.blit(self.top_tile, rect)
for rect in self.rects[1]:
screen.blit(self.tile, rect)
for pair in self.debris:
screen.blit(pair[0], pair[1])
def draw(self, screen):
self.full_draw()
def convert_img(self):
self.top_tile = self.top_tile.convert_alpha()
self.tile = self.tile.convert_alpha()
self.debris_tile = self.debris_tile.convert_alpha()
class Background:
def __init__(self, rng):
self.num_cols = math.ceil(const.SCREEN_WIDTH / const.TILE_SIZE)
self.num_rows = (
math.ceil((const.SCREEN_HEIGHT - const.WATER_HEIGHT) / const.TILE_SIZE) + 1
)
self.tile = utils.load_image(["sand_tile.png"])
self.debris_tiles = {
0: utils.load_image(["debris", "0.png"]),
1: utils.load_image(["debris", "1.png"]),
2: utils.load_image(["debris", "2.png"]),
3: utils.load_image(["debris", "3.png"]),
}
# Used when updating environment and drawing
self.dirty_rects = []
self.rects = []
# same as (const.TILE_SIZE, const.TILE_SIZE)
tile_size = self.tile.get_size()
for row in range(self.num_rows):
new_row = []
for col in range(self.num_cols):
rect = pg.Rect(col * const.TILE_SIZE, row * const.TILE_SIZE, *tile_size)
new_row.append(rect)
self.rects.append(new_row)
def generate_debris(self, rng):
self.debris = {}
for row in range(1, self.num_rows - 1, 3):
for col in range(1, self.num_cols - 1, 3):
y = row + rng.integers(0, 3)
if y == self.num_rows - 2:
y += -1
x = col + rng.integers(0, 3)
choice = rng.integers(0, 4)
self.debris[self.rects[y][x].topleft] = self.debris_tiles[choice]
def full_draw(self, screen):
for row in self.rects:
for rect in row:
screen.blit(self.tile, rect)
debris = self.debris.get(rect.topleft, None)
if debris is not None:
screen.blit(debris, rect)
def draw(self, screen):
# self.full_draw(screen)
for rect in self.dirty_rects:
screen.blit(self.tile, rect)
debris = self.debris.get(rect.topleft, None)
if debris is not None:
screen.blit(debris, rect)
self.dirty_rects.clear()
def update(self, sprite_rect: pg.Rect):
top_y = int(sprite_rect.top // const.TILE_SIZE)
bottom_y = int(sprite_rect.bottom // const.TILE_SIZE)
left_x = int(sprite_rect.left // const.TILE_SIZE)
right_x = int(sprite_rect.right // const.TILE_SIZE)
self.dirty_rects.append(self.rects[top_y][left_x])
self.dirty_rects.append(self.rects[top_y][right_x])
self.dirty_rects.append(self.rects[bottom_y][left_x])
self.dirty_rects.append(self.rects[bottom_y][right_x])
def convert_img(self):
self.tile = self.tile.convert_alpha()
for i in self.debris_tiles:
self.debris_tiles[i].convert_alpha()
def env(**kwargs):
env = raw_env(**kwargs)
env = wrappers.ClipOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
parallel_env = parallel_wrapper_fn(env)
class raw_env(AECEnv, EzPickle):
def __init__(
self,
ind_reward=0.8,
group_reward=0.1,
other_group_reward=0.1,
prospec_find_gold_reward=1,
prospec_handoff_gold_reward=1,
banker_receive_gold_reward=1,
banker_deposit_gold_reward=1,
max_cycles=450,
):
EzPickle.__init__(
self,
ind_reward,
group_reward,
other_group_reward,
prospec_find_gold_reward,
prospec_handoff_gold_reward,
banker_receive_gold_reward,
banker_deposit_gold_reward,
max_cycles,
)
total_reward_factor = ind_reward + group_reward + other_group_reward
if not math.isclose(total_reward_factor, 1.0, rel_tol=1e-09):
raise ValueError(
"The sum of the individual reward, group reward, and other "
"group reward should add up to approximately 1.0"
)
self.agents = []
self.sprite_list = [
"bankers/0.png",
"bankers/1.png",
"bankers/2.png",
"prospector.png",
]
self.max_cycles = max_cycles
pg.init()
self.seed()
self.closed = False
self.background = Background(self.rng)
self.space = pm.Space()
self.space.gravity = Vec2d(0.0, 0.0)
self.space.iterations = 20 # for decreasing bounciness
self.space.damping = 0.0
self.all_sprites = pg.sprite.RenderUpdates()
self.gold = []
self.water = Water(
const.WATER_INFO[0], const.WATER_INFO[1], self.space, self.rng
)
# Generate random positions for each | |
from csp.decorators import csp_update
from django.utils.decorators import method_decorator
from rest_framework.authentication import SessionAuthentication
from rest_framework_simplejwt.authentication import JWTAuthentication
from django.views.generic import TemplateView, DetailView, FormView
from rest_framework.generics import CreateAPIView, ListAPIView, RetrieveAPIView, UpdateAPIView, DestroyAPIView
from .serializers import ReportSerializer, ReportGeoSerializer, ReportImagesSerializer, ReportFinalizeSerializer, \
OnlineReportFinalizeSerializer, OnlineReportSerializer, ReportCreateUpdateSerializer, \
OnlineReportCreateUpdateSerializer, ReportCommentThreadSerializer, OnlineReportCommentThreadSerializer,\
CreateReportCommentSerializer, CreateOnlineReportCommentSerializer
from .models import Report, ReportImage, OnlineReport, ReportCommentThread, OnlineReportCommentThread, ReportComment, \
OnlineReportComment
from .models import LocationType, CRIMETYPE_CHOICES, OnlineTypes
from django.db.models import Q
from .forms import NewReportForm, NewOnlineReportForm
from django.contrib.contenttypes.models import ContentType
from rest_framework import status
from rest_framework.response import Response
from apps.profiles.models import LevelTypes
from apps.notifications.models import Notification
from apps.profiles.decorator_tests import *
from django.contrib.auth.mixins import UserPassesTestMixin
from django_countries import countries
from rest_framework.permissions import AllowAny, IsAuthenticated
class CreateReportAPIView(CreateAPIView):
"""
Creates a new report and returns the new report's ID
"""
serializer_class = ReportCreateUpdateSerializer
authentication_classes = [JWTAuthentication, SessionAuthentication]
permission_classes = (AllowAny,)
class UpdateReportAPIView(UserPassesTestMixin, UpdateAPIView):
"""
Updates a report
"""
serializer_class = ReportCreateUpdateSerializer
authentication_classes = [JWTAuthentication, SessionAuthentication]
permission_classes = (AllowAny,)
def test_func(self):
u = self.request.user
if u.is_authenticated:
return is_non_profit_employee(u) or is_volunteer(u) or is_commercial_entity_employee(u) or is_anonymous_contributor(u)
else:
obj_type = self.kwargs['type']
obj_id = self.kwargs['pk']
if obj_type == ContentType.objects.get_for_model(Report).id:
report = Report.objects.filter(id=obj_id).first()
elif obj_type == ContentType.objects.get_for_model(OnlineReport).id:
report = OnlineReport.objects.filter(id=obj_id).first()
else:
return False
if not report:
return False
if report.author or report.edit_token == '' or not len(report.edit_token) == 32:
return False
transmitted_token = self.kwargs['token']
if len(transmitted_token) != 32:
return False
return transmitted_token == report.edit_token
def get_queryset(self):
report = Report.objects.get(id=self.kwargs['pk'])
if report.author:
current_user_profile = self.request.user.profile
if report.author == current_user_profile and not report.read_only:
return Report.objects.filter(id=report.id, read_only=False)
else:
return Report.objects.none()
else:
correct_token = report.edit_token
transmitted_token = self.kwargs['token']
if len(correct_token) == 32 and len(
transmitted_token) == 32 and correct_token == transmitted_token and not report.author:
return Report.objects.filter(id=report.id, read_only=False)
else:
return Report.objects.none()
class CreateOnlineReportAPIView(CreateAPIView):
"""
Creates a new online report and returns the new report's ID
"""
serializer_class = OnlineReportCreateUpdateSerializer
authentication_classes = [JWTAuthentication, SessionAuthentication]
permission_classes = (AllowAny,)
class UpdateOnlineReportAPIView(UserPassesTestMixin, UpdateAPIView):
"""
Updates an online report
"""
serializer_class = OnlineReportCreateUpdateSerializer
authentication_classes = [JWTAuthentication, SessionAuthentication]
permission_classes = (AllowAny,)
def test_func(self):
u = self.request.user
if u.is_authenticated:
return is_non_profit_employee(u) or is_volunteer(u) or is_commercial_entity_employee(u) or is_anonymous_contributor(u)
else:
obj_type = self.kwargs['type']
obj_id = self.kwargs['pk']
if obj_type == ContentType.objects.get_for_model(Report).id:
report = Report.objects.filter(id=obj_id).first()
elif obj_type == ContentType.objects.get_for_model(OnlineReport).id:
report = OnlineReport.objects.filter(id=obj_id).first()
else:
return False
if not report:
return False
if report.author or report.edit_token == '' or not len(report.edit_token) == 32:
return False
transmitted_token = self.kwargs['token']
if len(transmitted_token) != 32:
return False
return transmitted_token == report.edit_token
def get_queryset(self):
report = OnlineReport.objects.get(id=self.kwargs['pk'])
if report.author:
current_user_profile = self.request.user.profile
if report.author == current_user_profile and not report.read_only:
return OnlineReport.objects.filter(id=report.id, read_only=False)
else:
return OnlineReport.objects.none()
else:
correct_token = report.edit_token
transmitted_token = self.kwargs['token']
if len(correct_token) == 32 and len(
transmitted_token) == 32 and correct_token == transmitted_token and not report.author:
return OnlineReport.objects.filter(id=report.id, read_only=False)
else:
return OnlineReport.objects.none()
class CreateReportChoiceEntryPoint(TemplateView):
template_name = 'newreportchoice.html'
def get_context_data(self, **kwargs):
context = super(CreateReportChoiceEntryPoint, self).get_context_data(**kwargs)
context['type'] = {'report': ContentType.objects.get_for_model(Report).id, 'onlinereport': ContentType.objects.get_for_model(OnlineReport).id}
context['authenticated'] = self.request.user.is_authenticated
return context
@method_decorator(csp_update(STYLE_SRC=("'unsafe-inline'",)), name='dispatch')
class CreateReportView(FormView):
"""
Creates a new report and returns the new report's ID
"""
form_class = NewReportForm
template_name = "newreport.html"
def get_context_data(self, **kwargs):
context = super(CreateReportView, self).get_context_data(**kwargs)
context['type'] = {'report': ContentType.objects.get_for_model(Report).id, 'onlinereport': ContentType.objects.get_for_model(OnlineReport).id}
context['this_type'] = "report"
context['mode'] = 'new'
context['authenticated'] = self.request.user.is_authenticated
return context
# def get_success_url(self):
# return reverse('report-add-images', kwargs={'pk': self.object.pk, 'type': ContentType.objects.get_for_model(Report).id})
class CreateOnlineReportView(FormView):
"""
Creates a new report and returns the new report's ID
"""
form_class = NewOnlineReportForm
template_name = "newonlinereport.html"
def get_context_data(self, **kwargs):
context = super(CreateOnlineReportView, self).get_context_data(**kwargs)
context['type'] = {'report': ContentType.objects.get_for_model(Report).id, 'onlinereport': ContentType.objects.get_for_model(OnlineReport).id}
context['this_type'] = "onlinereport"
context['mode'] = 'new'
context['authenticated'] = self.request.user.is_authenticated
return context
# def get_success_url(self):
# return reverse('onlinereport-add-images', kwargs={'pk': self.object.pk, 'type': ContentType.objects.get_for_model(OnlineReport).id})
@method_decorator(csp_update(STYLE_SRC=("'unsafe-inline'",)), name='dispatch')
class UpdateReportView(UserPassesTestMixin, FormView):
serializer_class = ReportSerializer
form_class = NewReportForm
template_name = "newreport.html"
def test_func(self):
u = self.request.user
return is_non_profit_employee(u) or is_volunteer(u) or is_commercial_entity_employee(u) or is_anonymous_contributor(u)
def get_context_data(self, **kwargs):
context = super(UpdateReportView, self).get_context_data(**kwargs)
context['type'] = {'report': ContentType.objects.get_for_model(Report).id, 'onlinereport': ContentType.objects.get_for_model(OnlineReport).id}
context['this_type'] = "report"
context['mode'] = 'update'
context['authenticated'] = self.request.user.is_authenticated
context['report_id'] = self.kwargs['pk']
return context
def get_object(self, *args, **kwargs):
id_filter = Q(id=self.kwargs['pk'])
author_filter = Q(author=self.request.user.profile)
write_filter = Q(read_only=False)
return Report.objects.get(id_filter & author_filter & write_filter)
# def get_success_url(self):
# return reverse('report-add-images', kwargs={'pk': self.object.pk, 'type': ContentType.objects.get_for_model(Report).id})
class UpdateOnlineReportView(UserPassesTestMixin, FormView):
serializer_class = OnlineReportSerializer
form_class = NewOnlineReportForm
template_name = "newonlinereport.html"
def test_func(self):
u = self.request.user
return is_non_profit_employee(u) or is_volunteer(u) or is_commercial_entity_employee(u) or is_anonymous_contributor(u)
def get_context_data(self, **kwargs):
context = super(UpdateOnlineReportView, self).get_context_data(**kwargs)
context['type'] = {'report': ContentType.objects.get_for_model(Report).id, 'onlinereport': ContentType.objects.get_for_model(OnlineReport).id}
context['this_type'] = "onlinereport"
context['mode'] = 'update'
context['authenticated'] = self.request.user.is_authenticated
context['report_id'] = self.kwargs['pk']
return context
def get_object(self, *args, **kwargs):
id_filter = Q(id=self.kwargs['pk'])
author_filter = Q(author=self.request.user.profile)
write_filter = Q(read_only=False)
return OnlineReport.objects.get(id_filter & author_filter & write_filter)
# def get_success_url(self):
# return reverse('onlinereport-add-images', kwargs={'pk': self.object.pk, 'type': ContentType.objects.get_for_model(OnlineReport).id})
@method_decorator(csp_update(STYLE_SRC=("'unsafe-inline'",)), name='dispatch')
class ReportsSearchTemplateView(UserPassesTestMixin, TemplateView):
template_name = 'reports_search.html'
def test_func(self):
u = self.request.user
return is_non_profit_employee(u)
def get_context_data(self, **kwargs):
context = super(ReportsSearchTemplateView, self).get_context_data(**kwargs)
current_user = self.request.user
current_user_profile = current_user.profile
current_user_primary_org = current_user_profile.primary_org
context['categories'] = CRIMETYPE_CHOICES
context['online_categories'] = OnlineTypes.choices
context['locationtypes'] = LocationType.choices
if current_user_primary_org.level_type == LevelTypes.N.value:
context['countries'] = current_user_primary_org.countries
else:
context['countries'] = countries.countries
return context
class FinalizeReportAPIView(UserPassesTestMixin, UpdateAPIView):
"""
Finalizes the report after having uploaded images
"""
serializer_class = ReportFinalizeSerializer
authentication_classes = [JWTAuthentication, SessionAuthentication]
permission_classes = (AllowAny,)
def test_func(self):
u = self.request.user
obj_id = self.kwargs['pk']
report = Report.objects.filter(id=obj_id).first()
if u.is_authenticated:
return (is_non_profit_employee(u) or is_volunteer(u) or is_commercial_entity_employee(u) or is_anonymous_contributor(u)) and report.author == u.profile
else:
if not report:
return False
if report.author or report.edit_token == '' or not len(report.edit_token) == 32:
return False
transmitted_token = self.kwargs['token']
if len(transmitted_token) != 32:
return False
return transmitted_token == report.edit_token
def get_object(self):
id_filter = Q(id=self.kwargs['pk'])
writeable_filter = Q(read_only=False)
return Report.objects.get(id_filter & writeable_filter)
def update(self, request, *args, **kwargs):
obj_instance = self.get_object()
if obj_instance is not None and not obj_instance.read_only:
obj_instance.read_only = True
obj_instance.save()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class FinalizeOnlineReportAPIView(UserPassesTestMixin, UpdateAPIView):
"""
Finalizes the online report after having uploaded images
"""
serializer_class = OnlineReportFinalizeSerializer
authentication_classes = [JWTAuthentication, SessionAuthentication]
permission_classes = (AllowAny,)
def test_func(self):
u = self.request.user
obj_id = self.kwargs['pk']
report = OnlineReport.objects.filter(id=obj_id).first()
if u.is_authenticated:
return (is_non_profit_employee(u) or is_volunteer(u) or is_commercial_entity_employee(u) or is_anonymous_contributor(u)) and report.author == u.profile
else:
if not report:
return False
if report.author or report.edit_token == '' or not len(report.edit_token) == 32:
return False
transmitted_token = self.kwargs['token']
if len(transmitted_token) != 32:
return False
return transmitted_token == report.edit_token
def get_object(self):
id_filter = Q(id=self.kwargs['pk'])
writeable_filter = Q(read_only=False)
return OnlineReport.objects.get(id_filter & writeable_filter)
def update(self, request, *args, **kwargs):
obj_instance = self.get_object()
if obj_instance is not None and not obj_instance.read_only:
obj_instance.read_only = True
obj_instance.save()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class AddImagesToReportView(UserPassesTestMixin, DetailView):
model = Report
template_name = "addimages.html"
context_object_name = "report"
def test_func(self):
u = self.request.user
obj_id = self.kwargs['pk']
report = Report.objects.filter(id=obj_id).first()
if u.is_authenticated:
return (is_non_profit_employee(u) or is_volunteer(u) or is_commercial_entity_employee(u) or is_anonymous_contributor(u)) and report.author == u.profile
else:
if not report:
return False
if report.author or report.edit_token == '' or not len(report.edit_token) == 32:
return False
transmitted_token = self.kwargs['token']
if len(transmitted_token) != 32:
return False
return transmitted_token == report.edit_token
def get_context_data(self, **kwargs):
context = super(AddImagesToReportView, self).get_context_data(**kwargs)
context['type'] = {'report': ContentType.objects.get_for_model(Report).id, 'onlinereport': ContentType.objects.get_for_model(OnlineReport).id}
context['authenticated'] = self.request.user.is_authenticated
return context
def get_object(self, queryset=None):
report_id = self.kwargs['pk']
return Report.objects.get(id=report_id, read_only=False)
class AddImagesToOnlineReportView(UserPassesTestMixin, DetailView):
model = OnlineReport
template_name = "addimages.html"
context_object_name = "report"
def test_func(self):
u = self.request.user
obj_id = self.kwargs['pk']
report = OnlineReport.objects.filter(id=obj_id).first()
if u.is_authenticated:
return (is_non_profit_employee(u) or is_volunteer(u) or is_commercial_entity_employee(u) or is_anonymous_contributor(u)) and u.profile == report.author
else:
if not report:
return False
if report.author or report.edit_token == '' or not len(report.edit_token) == 32:
return False
transmitted_token = self.kwargs['token']
if len(transmitted_token) != 32:
return False
return transmitted_token == report.edit_token
def get_context_data(self, **kwargs):
context = super(AddImagesToOnlineReportView, self).get_context_data(**kwargs)
context['type'] = {'report': ContentType.objects.get_for_model(Report).id, 'onlinereport': ContentType.objects.get_for_model(OnlineReport).id}
context['authenticated'] = self.request.user.is_authenticated
return context
def get_object(self, queryset=None):
report_id = self.kwargs['pk']
return OnlineReport.objects.get(id=report_id, read_only=False)
class UploadImageForReportById(UserPassesTestMixin, CreateAPIView):
model = ReportImage
serializer_class = ReportImagesSerializer
authentication_classes = [JWTAuthentication, SessionAuthentication]
permission_classes = (AllowAny,)
def test_func(self):
u = self.request.user
if u.is_authenticated:
return is_non_profit_employee(u) or is_volunteer(u) or is_commercial_entity_employee(u) or is_anonymous_contributor(u)
else:
obj_type = self.kwargs['type']
obj_id = self.kwargs['pk']
if int(obj_type) == ContentType.objects.get_for_model(Report).id:
report = Report.objects.filter(id=obj_id).first()
elif int(obj_type) == ContentType.objects.get_for_model(OnlineReport).id:
report = OnlineReport.objects.filter(id=obj_id).first()
else:
return False
if not report:
return False
if report.author or report.edit_token == '' or not len(report.edit_token) == 32:
return False
transmitted_token = self.kwargs['token']
if len(transmitted_token) != 32:
return False
return transmitted_token == report.edit_token
class DeleteImageByImageId(UserPassesTestMixin, DestroyAPIView):
model = ReportImage
serializer_class = ReportImagesSerializer
authentication_classes = [JWTAuthentication, SessionAuthentication]
permission_classes = (AllowAny,)
def | |
the number of partitions for the RDD to use.
Returns:
avg_ils: the average user's Intra-List Similarity
"""
temp = y_predicted.map(lambda (u,i,p): (i, (u,p))).join(content_array)
user_ils = temp.map(lambda (i,((u,p),c_a)): (u, (i, c_a))).groupByKey()\
.map(lambda (user, item_list):(calc_user_ILS(list(item_list)))).collect()
total_ils = sum(user_ils)
avg_ils = total_ils/float(len(user_ils))
return avg_ils
def calc_user_ILS(item_list):
item_list = list(item_list)
total_ils = 0
total_count = 0
for (i1, i2) in itertools.combinations(item_list, 2):
# get similarity using the attached content (or rating) array
pair_similarity = calc_cosine_distance(i1[1], i2[1])
total_ils += pair_similarity
total_count += 1
#this shouldn't happen but if it does then we want to return zero...
if total_count ==0:
return 0.0
return float(total_ils)/total_count
def calculate_catalog_coverage(y_test, y_train, y_predicted):
"""
Calculates the percentage of user-item pairs that were predicted by the algorithm.
The full data is passed in as y_test and y_train to determine the total number of potential user-item pairs
Then the predicted data is passed in to determine how many user-item pairs were predicted.
It is very important to NOT pass in the sorted and cut prediction RDD and that the algorithm trys to predict all pairs
The use the function 'cartesian' as shown in line 25 of content_based.py is helpful in that regard
Args:
y_test: the data used to test the RecSys algorithm in the format of an RDD of [ (userId, itemId, actualRating) ]
y_train: the data used to train the RecSys algorithm in the format of an RDD of [ (userId, itemId, actualRating) ]
y_predicted: predicted ratings in the format of a RDD of [ (userId, itemId, predictedRating) ]. It is important that this is not the sorted and cut prediction RDD
Returns:
catalog_coverage: value representing the percentage of user-item pairs that were able to be predicted
"""
y_full_data = y_test.union(y_train)
prediction_count = y_predicted.count()
#obtain the number of potential users and items from the actual array as the algorithms cannot predict something that was not trained
num_users = y_full_data.map(lambda row: row[0]).distinct().count()
num_items = y_full_data.map(lambda row: row[1]).distinct().count()
potential_predict = num_users*num_items
catalog_coverage = prediction_count/float(potential_predict)*100
return catalog_coverage
def calculate_item_coverage(y_test, y_train, content_vector, y_predicted):
"""
Calculates the percentage of users pairs that were predicted by the algorithm.
The full dataset is passed in as y_test and y_train to determine the total number of potential items
Then the predicted data is passed in to determine how many users pairs were predicted.
It is very important to NOT pass in the sorted and cut prediction RDD
Args:
y_test: the data used to test the RecSys algorithm in the format of an RDD of [ (userId, itemId, actualRating) ]
y_train: the data used to train the RecSys algorithm in the format of an RDD of [ (userId, itemId, actualRating) ]
content_vector: the content vector in the format of an RDD of [ (item_id, [item_content]) ].
It is passed in because some datasets have items without any ratings
y_predicted: predicted ratings in the format of a RDD of [ (userId, itemId, predictedRating) ]. It is important that this is not the sorted and cut prediction RDD
Returns:
item_coverage: value representing the percentage of user ratings that were able to be predicted
"""
predicted_items = y_predicted.map(lambda row: row[1]).distinct().count()
#obtain the number of potential users and items from the actual array as the algorithms cannot predict something that was not trained
interact_items = y_test.union(y_train).map(lambda row: row[1]).distinct()
content_items = content_vector.map(lambda row: row[0]).distinct()
full_potential_items = interact_items.union(content_items)
num_items = full_potential_items.distinct().count()
item_coverage = predicted_items/float(num_items)*100
return item_coverage
def calculate_user_coverage(y_test, y_train, y_predicted):
"""
Calculates the percentage of users that were predicted by the algorithm.
The full dataset is passed in as y_test and y_train to determine the total number of potential users
Then the predicted data is passed in to determine how many users pairs were predicted.
It is very important to NOT pass in the sorted and cut prediction RDD
Args:
y_test: the data used to test the RecSys algorithm in the format of an RDD of [ (userId, itemId, actualRating) ]
y_train: the data used to train the RecSys algorithm in the format of an RDD of [ (userId, itemId, actualRating) ]
y_predicted: predicted ratings in the format of a RDD of [ (userId, itemId, predictedRating) ]. It is important that this is not the sorted and cut prediction RDD
Returns:
user_coverage: value representing the percentage of user ratings that were able to be predicted
"""
y_full_data = y_test.union(y_train)
predicted_users = y_predicted.map(lambda row: row[0]).distinct().count()
#obtain the number of potential users and items from the actual array as the algorithms cannot predict something that was not trained
num_users = y_full_data.map(lambda row: row[0]).distinct().count()
user_coverage = predicted_users/float(num_users)*100
return user_coverage
def calculate_prediction_coverage(y_actual, y_predicted):
"""
Calculates the percentage of known user-item pairs which were predicted by the algorithm.
It is different from the item_coverage in that only the user's actual ratings are analyzed vs all potential ratings
In this manner it is likely that very low occuring items or users wouldn't hurt the final metric as much calculate_item_coverage will
It is very important to NOT pass in the sorted and cut prediction RDD
Args:
y_actual: actual ratings in the format of an array of [ (userId, itemId, actualRating) ]
y_predicted: predicted ratings in the format of a RDD of [ (userId, itemId, predictedRating) ]. It is important that this is not the sorted and cut prediction RDD
Returns:
item_coverage: value representing the percentage of user-item pairs that were able to be predicted
"""
predictionsAndRatings = y_predicted.map(lambda x: ((x[0], x[1]), x[2])) \
.join(y_actual.map(lambda x: ((x[0], x[1]), x[2])))
num_found_predictions = predictionsAndRatings.count()
num_test_set = y_actual.count()
prediction_coverage = num_found_predictions/float(num_test_set)*100
return prediction_coverage
def calculate_serendipity(y_train, y_test, y_predicted, sqlCtx, rel_filter=1):
"""
Calculates the serendipity of the recommendations.
This measure of serendipity in particular is how surprising relevant recommendations are to a user
serendipity = 1/N sum( max(Pr(s)- Pr(S), 0) * isrel(s)) over all items
The central portion of this equation is the difference of probability that an item is rated for a user
and the probability that item would be recommended for any user.
The first ranked item has a probability 1, and last ranked item is zero. prob_by_rank(rank, n) calculates this
Relevance is defined by the items in the hold out set (y_test).
If an item was rated it is relevant, which WILL miss relevant non-rated items.
Higher values are better
Method derived from the Coursera course: Recommender Systems taught by Prof <NAME> (Universitu of Minesota)
and Prof <NAME> (Texas State University)
Args:
y_train: actual training ratings in the format of an array of [ (userId, itemId, actualRating) ].
y_test: actual testing ratings to test in the format of an array of [ (userId, itemId, actualRating) ].
y_predicted: predicted ratings in the format of a RDD of [ (userId, itemId, predictedRating) ].
It is important that this is not the sorted and cut prediction RDD
rel_filter: the threshold of item relevance. So for MovieLens this may be 3.5, LastFM 0.
Ratings/interactions have to be at or above this mark to be considered relevant
Returns:
average_overall_serendipity: the average amount of surprise over all users
average_serendipity: the average user's amount of surprise over their recommended items
"""
full_corpus = y_train.union(y_test).map(lambda (u,i,r): (u,i,float(r)))
fields = [StructField("user", LongType(),True),StructField("item", LongType(), True),\
StructField("rating", FloatType(), True) ]
schema = StructType(fields)
schema_rate = sqlCtx.createDataFrame(full_corpus, schema)
schema_rate.registerTempTable("ratings")
item_ranking = sqlCtx.sql("select item, avg(rating) as avg_rate, row_number() over(ORDER BY avg(rating) desc) as rank \
from ratings group by item order by avg_rate desc")
n = item_ranking.count()
#determine the probability for each item in the corpus
item_ranking_with_prob = item_ranking.map(lambda (item_id, avg_rate, rank): (item_id, avg_rate, rank, prob_by_rank(rank, n)))
#format the 'relevant' predictions as a queriable table
#these are those predictions for which we have ratings above the threshold
y_test = y_test.filter(lambda (u,i,r): r>=rel_filter).map(lambda (u,i,r): (u,i,float(r)))
predictionsAndRatings = y_predicted.map(lambda x: ((x[0], x[1]), x[2])) \
.join(y_test.map(lambda x: ((x[0], x[1]), x[2])))
temp = predictionsAndRatings.map(lambda (a,b): (a[0], a[1], b[1], b[1]))
fields = [StructField("user", LongType(),True),StructField("item", LongType(), True),\
StructField("prediction", FloatType(), True), StructField("actual", FloatType(), True) ]
schema = StructType(fields)
schema_preds = sqlCtx.createDataFrame(temp, schema)
schema_preds.registerTempTable("preds")
| |
<reponame>mcx/pinocchio
from .. import pinocchio_pywrap as pin
from ..utils import npToTuple
from . import BaseVisualizer
import os
import warnings
import numpy as np
from distutils.version import LooseVersion
try:
import hppfcl
WITH_HPP_FCL_BINDINGS = True
except:
WITH_HPP_FCL_BINDINGS = False
def isMesh(geometry_object):
""" Check whether the geometry object contains a Mesh supported by MeshCat """
if geometry_object.meshPath == "":
return False
_, file_extension = os.path.splitext(geometry_object.meshPath)
if file_extension.lower() in [".dae", ".obj", ".stl"]:
return True
return False
def loadMesh(mesh):
import meshcat.geometry as mg
if isinstance(mesh,hppfcl.HeightFieldOBBRSS):
heights = mesh.getHeights()
x_grid = mesh.getXGrid()
y_grid = mesh.getYGrid()
min_height = mesh.getMinHeight()
X, Y = np.meshgrid(x_grid,y_grid)
nx = len(x_grid)-1
ny = len(y_grid)-1
num_cells = (nx) * (ny) * 2 + (nx+ny)*4 + 2
num_vertices = X.size
num_tris = num_cells
faces = np.empty((num_tris,3),dtype=int)
vertices = np.vstack((np.stack((X.reshape(num_vertices),Y.reshape(num_vertices),heights.reshape(num_vertices)),axis=1),
np.stack((X.reshape(num_vertices),Y.reshape(num_vertices),np.full(num_vertices,min_height)),axis=1)))
face_id = 0
for y_id in range(ny):
for x_id in range(nx):
p0 = x_id + y_id * (nx+1)
p1 = p0 + 1
p2 = p1 + nx + 1
p3 = p2 - 1
faces[face_id] = np.array([p0,p3,p1])
face_id += 1
faces[face_id] = np.array([p3,p2,p1])
face_id += 1
if y_id == 0:
p0_low = p0 + num_vertices
p1_low = p1 + num_vertices
faces[face_id] = np.array([p0,p1_low,p0_low])
face_id += 1
faces[face_id] = np.array([p0,p1,p1_low])
face_id += 1
if y_id == ny-1:
p2_low = p2 + num_vertices
p3_low = p3 + num_vertices
faces[face_id] = np.array([p3,p3_low,p2_low])
face_id += 1
faces[face_id] = np.array([p3,p2_low,p2])
face_id += 1
if x_id == 0:
p0_low = p0 + num_vertices
p3_low = p3 + num_vertices
faces[face_id] = np.array([p0,p3_low,p3])
face_id += 1
faces[face_id] = np.array([p0,p0_low,p3_low])
face_id += 1
if x_id == nx-1:
p1_low = p1 + num_vertices
p2_low = p2 + num_vertices
faces[face_id] = np.array([p1,p2_low,p2])
face_id += 1
faces[face_id] = np.array([p1,p1_low,p2_low])
face_id += 1
# Last face
p0 = num_vertices
p1 = p0 + nx
p2 = 2*num_vertices-1
p3 = p2 - nx
faces[face_id] = np.array([p0,p1,p2])
face_id += 1
faces[face_id] = np.array([p0,p2,p3])
face_id += 1
elif isinstance(mesh,(hppfcl.Convex,hppfcl.BVHModelBase)):
if isinstance(mesh,hppfcl.BVHModelBase):
num_vertices = mesh.num_vertices
num_tris = mesh.num_tris
call_triangles = mesh.tri_indices
call_vertices = mesh.vertices
elif isinstance(mesh,hppfcl.Convex):
num_vertices = mesh.num_points
num_tris = mesh.num_polygons
call_triangles = mesh.polygons
call_vertices = mesh.points
faces = np.empty((num_tris,3),dtype=int)
for k in range(num_tris):
tri = call_triangles(k)
faces[k] = [tri[i] for i in range(3)]
if LooseVersion(hppfcl.__version__) >= LooseVersion("1.7.7"):
vertices = call_vertices()
else:
vertices = np.empty((num_vertices,3))
for k in range(num_vertices):
vertices[k] = call_vertices(k)
vertices = vertices.astype(np.float32)
if num_tris > 0:
mesh = mg.TriangularMeshGeometry(vertices, faces)
else:
mesh = mg.Points(
mg.PointsGeometry(vertices.T, color=np.repeat(np.ones((3,1)),num_vertices,axis=1)),
mg.PointsMaterial(size=0.002))
return mesh
def createCapsule(length, radius, radial_resolution = 30, cap_resolution = 10):
nbv = np.array([max(radial_resolution, 4), max(cap_resolution, 4)])
h = length
r = radius
position = 0
vertices = np.zeros((nbv[0] * (2 * nbv[1]) + 2, 3))
for j in range(nbv[0]):
phi = (( 2 * np.pi * j) / nbv[0])
for i in range(nbv[1]):
theta = ((np.pi / 2 * i) / nbv[1])
vertices[position + i, :] = np.array([np.cos(theta) * np.cos(phi) * r,
np.cos(theta) * np.sin(phi) * r,
-h / 2 - np.sin(theta) * r])
vertices[position + i + nbv[1], :] = np.array([np.cos(theta) * np.cos(phi) * r,
np.cos(theta) * np.sin(phi) * r,
h / 2 + np.sin(theta) * r])
position += nbv[1] * 2
vertices[-2, :] = np.array([0, 0, -h / 2 - r])
vertices[-1, :] = np.array([0, 0, h / 2 + r])
indexes = np.zeros((nbv[0] * (4 * (nbv[1] - 1) + 4), 3))
index = 0
stride = nbv[1] * 2
last = nbv[0] * (2 * nbv[1]) + 1
for j in range(nbv[0]):
j_next = (j + 1) % nbv[0]
indexes[index + 0] = np.array([j_next * stride + nbv[1], j_next * stride, j * stride])
indexes[index + 1] = np.array([j * stride + nbv[1], j_next * stride + nbv[1], j * stride])
indexes[index + 2] = np.array([j * stride + nbv[1] - 1, j_next * stride + nbv[1] - 1, last - 1])
indexes[index + 3] = np.array([j_next * stride + 2 * nbv[1] - 1, j * stride + 2 * nbv[1] - 1, last])
for i in range(nbv[1]-1):
indexes[index + 4 + i * 4 + 0] = np.array([j_next * stride + i, j_next * stride + i + 1, j * stride + i])
indexes[index + 4 + i * 4 + 1] = np.array([j_next * stride + i + 1, j * stride + i + 1, j * stride + i])
indexes[index + 4 + i * 4 + 2] = np.array([j_next * stride + nbv[1] + i + 1, j_next * stride + nbv[1] + i, j * stride + nbv[1] + i])
indexes[index + 4 + i * 4 + 3] = np.array([j_next * stride + nbv[1] + i + 1, j * stride + nbv[1] + i, j * stride + nbv[1] + i + 1])
index += 4 * (nbv[1] - 1) + 4
import meshcat.geometry
return meshcat.geometry.TriangularMeshGeometry(vertices, indexes)
class MeshcatVisualizer(BaseVisualizer):
"""A Pinocchio display using Meshcat"""
def getViewerNodeName(self, geometry_object, geometry_type):
"""Return the name of the geometry object inside the viewer."""
if geometry_type is pin.GeometryType.VISUAL:
return self.viewerVisualGroupName + '/' + geometry_object.name
elif geometry_type is pin.GeometryType.COLLISION:
return self.viewerCollisionGroupName + '/' + geometry_object.name
def initViewer(self, viewer=None, open=False, loadModel=False):
"""Start a new MeshCat server and client.
Note: the server can also be started separately using the "meshcat-server" command in a terminal:
this enables the server to remain active after the current script ends.
"""
import meshcat
self.viewer = meshcat.Visualizer() if viewer is None else viewer
if open:
self.viewer.open()
if loadModel:
self.loadViewerModel()
def loadPrimitive(self, geometry_object):
import meshcat.geometry
# Cylinders need to be rotated
R = np.array([[1., 0., 0., 0.],
[0., 0., -1., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.]])
RotatedCylinder = type("RotatedCylinder", (meshcat.geometry.Cylinder,), {"intrinsic_transform": lambda self: R })
geom = geometry_object.geometry
if isinstance(geom, hppfcl.Capsule):
if hasattr(meshcat.geometry, 'TriangularMeshGeometry'):
obj = createCapsule(2. * geom.halfLength, geom.radius)
else:
obj = RotatedCylinder(2. * geom.halfLength, geom.radius)
elif isinstance(geom, hppfcl.Cylinder):
obj = RotatedCylinder(2. * geom.halfLength, geom.radius)
elif isinstance(geom, hppfcl.Box):
obj = meshcat.geometry.Box(npToTuple(2. * geom.halfSide))
elif isinstance(geom, hppfcl.Sphere):
obj = meshcat.geometry.Sphere(geom.radius)
elif isinstance(geom, hppfcl.ConvexBase):
obj = loadMesh(geom)
else:
msg = "Unsupported geometry type for %s (%s)" % (geometry_object.name, type(geom) )
warnings.warn(msg, category=UserWarning, stacklevel=2)
obj = None
return obj
def loadMesh(self, geometry_object):
import meshcat.geometry
# Mesh path is empty if Pinocchio is built without HPP-FCL bindings
if geometry_object.meshPath == "":
msg = "Display of geometric primitives is supported only if pinocchio is build with HPP-FCL bindings."
warnings.warn(msg, category=UserWarning, stacklevel=2)
return None
# Get file type from filename extension.
_, file_extension = os.path.splitext(geometry_object.meshPath)
if file_extension.lower() == ".dae":
obj = meshcat.geometry.DaeMeshGeometry.from_file(geometry_object.meshPath)
elif file_extension.lower() == ".obj":
obj = meshcat.geometry.ObjMeshGeometry.from_file(geometry_object.meshPath)
elif file_extension.lower() == ".stl":
obj = meshcat.geometry.StlMeshGeometry.from_file(geometry_object.meshPath)
else:
msg = "Unknown mesh file format: {}.".format(geometry_object.meshPath)
warnings.warn(msg, category=UserWarning, stacklevel=2)
obj = None
return obj
def loadViewerGeometryObject(self, geometry_object, geometry_type, color=None):
"""Load a single geometry object"""
import meshcat.geometry
viewer_name = self.getViewerNodeName(geometry_object, geometry_type)
is_mesh = False
try:
if WITH_HPP_FCL_BINDINGS and isinstance(geometry_object.geometry, hppfcl.ShapeBase):
obj = self.loadPrimitive(geometry_object)
elif isMesh(geometry_object):
obj = self.loadMesh(geometry_object)
is_mesh = True
elif WITH_HPP_FCL_BINDINGS and isinstance(geometry_object.geometry, (hppfcl.BVHModelBase,hppfcl.HeightFieldOBBRSS)):
obj = loadMesh(geometry_object.geometry)
else:
msg = "The geometry object named " + geometry_object.name + " is not supported by Pinocchio/MeshCat for vizualization."
warnings.warn(msg, category=UserWarning, stacklevel=2)
return
if obj is None:
return
except Exception as e:
msg = "Error while loading geometry object: %s\nError message:\n%s" % (geometry_object.name, e)
warnings.warn(msg, category=UserWarning, stacklevel=2)
return
if isinstance(obj, meshcat.geometry.Object):
self.viewer[viewer_name].set_object(obj)
elif isinstance(obj, meshcat.geometry.Geometry):
material = meshcat.geometry.MeshPhongMaterial()
# Set material color from URDF, converting for triplet of doubles to a single int.
if color is None:
meshColor = geometry_object.meshColor
else:
meshColor = color
material.color = int(meshColor[0] * 255) * 256**2 + int(meshColor[1] * 255) * 256 + int(meshColor[2] * 255)
# Add transparency, if needed.
if float(meshColor[3]) != 1.0:
material.transparent = True
material.opacity = float(meshColor[3])
self.viewer[viewer_name].set_object(obj, material)
if is_mesh: # Apply the scaling
scale = list(np.asarray(geometry_object.meshScale).flatten())
self.viewer[viewer_name].set_property("scale",scale)
def loadViewerModel(self, rootNodeName="pinocchio", color = None):
"""Load the robot in a MeshCat viewer.
Parameters:
rootNodeName: name to give to the robot in the viewer
color: optional, color to give to the robot. This overwrites the color present in the urdf.
| |
len(all_orfs)
# for the bonferroni correction, we only correct for the number of tests
# we actually consider that is, we only correct for orfs which pass
# the base filter
corrected_significance_level = chisq_alpha / M
msg = "Corrected significance level: {}".format(corrected_significance_level)
logger.debug(msg)
m_chisq_pval = all_orfs['chi_square_p'] < corrected_significance_level
predicted_orfs = all_orfs[m_chisq_pval]
else:
m_bf = get_bf_filter(all_orfs, min_bf_mean, max_bf_var, min_bf_likelihood)
predicted_orfs = all_orfs[m_bf]
if select_longest_by_stop:
all_orfs = bed_utils.get_longest_features_by_end(all_orfs)
predicted_orfs = bed_utils.get_longest_features_by_end(predicted_orfs)
return (all_orfs, predicted_orfs)
###
# Defaults for b-tea scripts
###
default_perm_test_min_rpkm_mean = 1
default_perm_test_max_rpkm_var_power = 1
###
# Field names for b-tea files
###
field_map = {
"ribo": "Riboseq",
"rna": "RNA-seq",
"te": "Translational Efficiency"
}
fields = sorted(field_map.keys())
field_name_order = [field_map[f] for f in fields]
def get_field_name(field):
""" This function maps from the field to a human-readable name.
"""
return field_map[field]
mean_field_map = {
"ribo": "ribo_abundance_mean_loc",
"rna": "rna_abundance_mean_loc",
"te": "log_translational_efficiency_loc"
}
var_field_map = {
"ribo": "ribo_abundance_var_loc",
"rna": "rna_abundance_var_loc",
"te": "log_translational_efficiency_scale"
}
###
# The following functions are all related. They are used to estimate p-values
# for the KL-divergence values calculated for translational efficiency (only).
###
def get_basic_filter(kl, condition_1, condition_2, field):
""" Mask kl to filter on the conditions and field. """
m_condition_1 = kl['condition_1'] == condition_1
m_condition_2 = kl['condition_2'] == condition_2
m_field = kl['field'] == field
m_basic = m_condition_1 & m_condition_2 & m_field
return m_basic
def get_rpkm_mean_filter(kl, min_rpkm_mean):
""" Mask kl to filter on the estimated means. """
m_min_rpkm_mean_1 = kl['mean_1'] > min_rpkm_mean
m_min_rpkm_mean_2 = kl['mean_2'] > min_rpkm_mean
m_min_rpkm_mean = m_min_rpkm_mean_1 & m_min_rpkm_mean_2
return m_min_rpkm_mean
def get_rpkm_var_power_filter(kl, max_rpkm_var_power):
""" Mask kl to filter on the variances as a power of the means. """
import numpy as np
m_max_rpkm_var_1 = kl['var_1'] < np.power(kl['mean_1'], max_rpkm_var_power)
m_max_rpkm_var_2 = kl['var_2'] < np.power(kl['mean_2'], max_rpkm_var_power)
m_max_rpkm_var = m_max_rpkm_var_1 & m_max_rpkm_var_2
return m_max_rpkm_var
def get_basic_and_rpkm_filter(kl, condition_1, condition_2, field,
min_rpkm_mean, max_rpkm_var_power):
""" Mask kl using all of the indicated filters. This handles TE as the
combination of both riboseq and rnaseq.
"""
if field == "te":
# first, get the genes which meet the rpkm requirements
m_ribo = get_basic_and_rpkm_filter(
kl,
condition_1,
condition_2,
"ribo",
min_rpkm_mean,
max_rpkm_var_power
)
m_rna = get_basic_and_rpkm_filter(
kl,
condition_1,
condition_2,
"rna",
min_rpkm_mean,
max_rpkm_var_power
)
# find the gene ids that meet both filters
ribo_gene_ids = set(kl.loc[m_ribo, 'gene_id'].unique())
rna_gene_ids = set(kl.loc[m_rna, 'gene_id'].unique())
gene_ids = ribo_gene_ids & rna_gene_ids
# get all te rows for these conditions
m_basic = get_basic_filter(kl, condition_1, condition_2, "te")
# and only keep the genes which met both rpkm requirements
m_gene_ids = kl['gene_id'].isin(gene_ids)
m_all = m_basic & m_gene_ids
else:
m_basic = get_basic_filter(kl, condition_1, condition_2, field)
m_min_rpkm_mean = get_rpkm_mean_filter(kl, min_rpkm_mean)
m_max_rpkm_var = get_rpkm_var_power_filter(kl, max_rpkm_var_power)
m_all = m_basic & m_min_rpkm_mean & m_max_rpkm_var
return m_all
###
# These functions are all based on the old "wide" data frame format. Thus, they
# have all been deprecated.
###
mean_format_map = {
"te": "log_translational_efficiency_loc_{1}",
"ribo": "{}_abundance_mean_loc_{}",
"rna": "{}_abundance_mean_loc_{}"
}
var_format_map = {
"te": "log_translational_efficiency_scale_{1}",
"ribo": "{}_abundance_var_loc_{}",
"rna": "{}_abundance_var_loc_{}"
}
# decorator to raise the deprecated warning
def ribo_deprecated(func):
""" Issue a warning that the given function uses the "wide" df format and
should be replaced with the easier to work with "long" format.
"""
def wrapper(*args, **kwargs):
msg = ("[ribo_utils.{}]: This function has been deprecated. It uses "
"the old \"wide\" df format. Please replace it with the "
"respective \"long\" df format function.".format(func.__name__))
logger.warning(msg)
return func(*args, **kwargs)
return wrapper
@ribo_deprecated
def get_mean_var_column_names(field, condition):
""" This function returns the name of the columns containing the mean and
variance for the given field and condition.
Parameters
----------
field : string
The name of the field in question. Valid values are:
* te
* ribo
* rna
condition : string
The name of the condition (e.g., "sham.cm")
Returns
-------
mean_column : string
The name of the column containing the means for this field
var_column : string
The name of the column containing the variances for this field
"""
mean_field = mean_format_map[field].format(field, condition)
var_field = var_format_map[field].format(field, condition)
return (mean_field, var_field)
kl_format_map = {
"te": "log_translational_efficiency_{}_{}_kl_divergence",
"ribo": "ribo_abundance_{}_{}_kl_divergence",
"rna": "rna_abundance_{}_{}_kl_divergence"
}
pvalue_format_map = {
"te": "log_translational_efficiency_{}_{}_pvalue",
"ribo": "ribo_abundance_{}_{}_pvalue",
"rna": "rna_abundance_{}_{}_pvalue"
}
@ribo_deprecated
def get_kl_pvalue_column_name(field, condition_1, condition_2):
""" This function returns the names of the columns containing the estimated
KL-divergence and pvalues for the two conditions and field.
Parameters
----------
field : string
The name of the field in question. Valid values are:
* te
* ribo
* rna
condition_{1,2} : string
The name of the condition (e.g., "sham.cm")
Returns
-------
kl_column : string
The name of the column containing the KL-divergence for this field
pvalue_column : string
The name of the column containing the means-values for this field
"""
kl_field = kl_format_map[field].format(condition_1, condition_2)
pvalue_field = pvalue_format_map[field].format(condition_1, condition_2)
return (kl_field, pvalue_field)
significant_pvalue_format_map = {
"te": "significant_te_{}_{}",
"ribo": "significant_ribo_{}_{}",
"rna": "significant_rna_{}_{}"
}
@ribo_deprecated
def get_significant_pvalue_column_name(field, condition_1, condition_2):
""" Column name indicating the specified estimates significantly differ
Parameters
----------
field : string
The name of the field in question. Valid values are:
* te
* ribo
* rna
condition_{1,2} : string
The name of the conditions (e.g., "sham.cm")
Returns
-------
significant_column: string
Name of the column indicating significance
"""
sig_pval_col = significant_pvalue_format_map[field]
sig_pval_col = sig_pval_col.format(condition_1, condition_2)
return sig_pval_col
@ribo_deprecated
def get_micropeptide_overlap_column_name(condition):
""" Column name indicating an overlap with a micropeptide
Parameters
----------
condition: string
The name of the condition (e.g., "sham.cm")
Returns
-------
Name of the column indicating an overlap
"""
return "has_micropeptide_overlap_{}".format(condition)
log_fold_change_map = {
"te": "log_translational_efficiency_{}_{}_log_fold_change",
"ribo": "ribo_abundance_{}_{}_log_fold_change",
"rna": "rna_abundance_{}_{}_log_fold_change"
}
@ribo_deprecated
def get_log_fold_change_field_name(field, condition_1, condition_2):
lfc_field = log_fold_change_map[field].format(condition_1, condition_2)
return lfc_field
@ribo_deprecated
def get_log_fold_changes(df, condition_pairs):
""" This function creates a new data frame which includes all of the log
fold changes (TE, riboseq and RNA-seq) for each of the condition
pairs in the given list.
The returned data frame could be joined to the original df with a
command like:
pd.concat([df, log_fold_changes_df], axis=1)
Parameters
----------
df : pd.DataFrame
A data frame containing the "mean" fields
condition_pairs : list of 2-tuple-likes of strings
The pairs of conditions for which the log fold changes will be
included in the returns data frame
Returns
-------
log_fold_changes_df : pd.DataFrame
A data frame containing all of the requested log fold changes
"""
import numpy as np
import pandas as pd
log_fold_changes_df = pd.DataFrame()
for (condition_1, condition_2) in condition_pairs:
field = 'te'
field_1 = mean_format_map[field].format(field, condition_1)
field_2 = mean_format_map[field].format(field, condition_2)
lfc_field = log_fold_change_map[field].format(condition_1, condition_2)
log_fold_changes_df[lfc_field] = df[field_2] - df[field_1]
field = 'ribo'
field_1 = mean_format_map[field].format(field, condition_1)
field_2 = mean_format_map[field].format(field, condition_2)
lfc_field = log_fold_change_map[field].format(condition_1, condition_2)
log_fold_changes_df[lfc_field] = np.log(df[field_2]) - np.log(df[field_1])
field = 'rna'
field_1 = mean_format_map[field].format(field, condition_1)
field_2 = mean_format_map[field].format(field, condition_2)
lfc_field = log_fold_change_map[field].format(condition_1, condition_2)
log_fold_changes_df[lfc_field] = np.log(df[field_2]) - np.log(df[field_1])
return log_fold_changes_df
@ribo_deprecated
def get_variance_power_filter(kl_df, condition_1, condition_2, field, power=0.5):
import numpy as np
# first, get the field names for which we want significances
if field == "log_translational_efficiency":
# filter by both rna_abundance and ribo_abundance in both samples
ribo_var_1_f = "ribo_abundance_var_loc_{}".format(condition_1)
ribo_var_2_f = "ribo_abundance_var_loc_{}".format(condition_2)
rna_var_1_f = "rna_abundance_var_loc_{}".format(condition_1)
rna_var_2_f = "rna_abundance_var_loc_{}".format(condition_2)
# filter by both rna_abundance and ribo_abundance in both samples
ribo_mean_1_f = "ribo_abundance_mean_loc_{}".format(condition_1)
ribo_mean_2_f = "ribo_abundance_mean_loc_{}".format(condition_2)
rna_mean_1_f = "rna_abundance_mean_loc_{}".format(condition_1)
rna_mean_2_f = "rna_abundance_mean_loc_{}".format(condition_2)
m_ribo_1 = abs(kl_df[ribo_var_1_f]) < np.power(abs(kl_df[ribo_mean_1_f]), power)
m_ribo_2 = abs(kl_df[ribo_var_2_f]) < np.power(abs(kl_df[ribo_mean_2_f]), power)
m_rna_1 = abs(kl_df[rna_var_1_f]) < np.power(abs(kl_df[rna_mean_1_f]), power)
m_rna_2 = abs(kl_df[rna_var_2_f]) < np.power(abs(kl_df[rna_mean_2_f]), power)
m_filter = (m_ribo_1 & m_ribo_2 & m_rna_1 & m_rna_2)
else:
var_1_f = "{}_var_loc_{}".format(field, condition_1)
var_2_f = "{}_var_loc_{}".format(field, condition_2)
mean_1_f = "{}_mean_loc_{}".format(field, condition_1)
mean_2_f = "{}_mean_loc_{}".format(field, condition_2)
# also get the filter
m_1 = abs(kl_df[var_1_f]) < np.power(abs(kl_df[mean_1_f]), power)
m_2 = abs(kl_df[var_2_f]) < np.power(abs(kl_df[mean_2_f]), power)
m_filter = (m_1 & m_2)
return m_filter
@ribo_deprecated
def get_variance_filter(kl_df, condition_1, condition_2, field, max_var=0.5):
# first, get the field names for which we want significances
if field == "log_translational_efficiency":
# filter by both rna_abundance and ribo_abundance in both samples
ribo_var_1_f = "ribo_abundance_var_loc_{}".format(condition_1)
ribo_var_2_f = "ribo_abundance_var_loc_{}".format(condition_2)
rna_var_1_f = "rna_abundance_var_loc_{}".format(condition_1)
rna_var_2_f = "rna_abundance_var_loc_{}".format(condition_2)
m_ribo_1 = abs(kl_df[ribo_var_1_f]) < max_var
m_ribo_2 = abs(kl_df[ribo_var_2_f]) < max_var
m_rna_1 = abs(kl_df[rna_var_1_f]) < max_var
m_rna_2 = abs(kl_df[rna_var_2_f]) < max_var
m_filter = (m_ribo_1 & m_ribo_2 & m_rna_1 & m_rna_2)
else:
var_1_f = "{}_var_loc_{}".format(field, condition_1)
var_2_f = "{}_var_loc_{}".format(field, condition_2)
# also get the filter
m_1 = abs(kl_df[var_1_f]) < | |
able to parse this type...
# no_type_match_but_ext_match.append(p)
pass
# then the specific
for p in self._specific_parsers:
match, exact_match = p.is_able_to_parse_detailed(desired_type=desired_type,
desired_ext=required_ext,
strict=strict)
if match:
if is_any_type(desired_type):
# special case: dont register as a type match
no_type_match_but_ext_match.append(p)
else:
if exact_match is None or exact_match:
matching_parsers_exact.append(p)
else:
matching_parsers_approx.append(p)
else:
# try to set the type to a supported type to see if that makes a match
if p.is_able_to_parse(desired_type=JOKER, desired_ext=required_ext, strict=strict):
no_type_match_but_ext_match.append(p)
# try to set the ext to a supported ext to see if that makes a match
elif p.is_able_to_parse(desired_type=desired_type, desired_ext=JOKER, strict=strict):
no_ext_match_but_type_match.append(p)
# no match at all
else:
no_match.append(p)
return (matching_parsers_generic, matching_parsers_approx, matching_parsers_exact), \
no_type_match_but_ext_match, no_ext_match_but_type_match, no_match
class ParserRegistry(ParserCache, ParserFinder, DelegatingParser):
"""
A manager of specific and generic parsers
"""
def __init__(self, pretty_name: str, strict_matching: bool, initial_parsers_to_register: List[Parser] = None):
"""
Constructor. Initializes the dictionary of parsers with the optionally provided initial_parsers, and
inits the lock that will be used for access in multithreading context.
:param initial_parsers_to_register:
"""
super(ParserRegistry, self).__init__()
self.pretty_name = pretty_name
check_var(strict_matching, var_types=bool, var_name='strict_matching')
self.is_strict = strict_matching
# add provided parsers
if initial_parsers_to_register is not None:
self.register_parsers(initial_parsers_to_register)
def __str__(self):
return self.pretty_name
def _create_parsing_plan(self, desired_type: Type[T], filesystem_object: PersistedObject, logger: Logger,
log_only_last: bool = False) -> ParsingPlan[T]:
"""
Implementation of Parser API
Relies on the underlying registry of parsers to provide the best parsing plan
:param desired_type:
:param filesystem_object:
:param logger:
:param log_only_last: a flag to only log the last part of the file path (default False)
:return:
"""
# find the parser for this object
t, combined_parser = self.build_parser_for_fileobject_and_desiredtype(filesystem_object, desired_type,
logger=logger)
# ask the parser for the parsing plan
return combined_parser.create_parsing_plan(t, filesystem_object, logger)
def build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_type: Type[T],
logger: Logger = None) -> Tuple[Type, Parser]:
"""
Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type.
To do that, it iterates through all registered parsers in the list in reverse order (last inserted first),
and checks if they support the provided object format (single or multifile) and type.
If several parsers match, it returns a cascadingparser that will try them in order.
If several alternatives are requested (through a root Union type), this is done independently for each
alternative.
:param obj_on_filesystem:
:param object_type:
:param logger:
:return: a type to use and a parser. The type to use is either directly the one provided, or a resolved one in
case of TypeVar
"""
# First resolve TypeVars and Unions to get a list of compliant types
object_types = get_alternate_types_resolving_forwardref_union_and_typevar(object_type)
if len(object_types) == 1:
# One type: proceed as usual
parsers = self._build_parser_for_fileobject_and_desiredtype(obj_on_filesystem, object_typ=object_types[0],
logger=logger)
if len(parsers) > 1:
return object_types[0], CascadingParser(parsers)
else:
return next(iter(parsers.items()))
else:
# Several alternate types are supported: try to build a parser for each
parsers = OrderedDict()
errors = OrderedDict()
for typ in object_types:
try:
parsers.update(self._build_parser_for_fileobject_and_desiredtype(obj_on_filesystem, object_typ=typ,
logger=logger))
except NoParserFoundForObjectExt as e:
logger.warning("{} - {}".format(type(e).__name__, e))
errors[e] = e
except NoParserFoundForObjectType as f:
logger.warning("{} - {}".format(type(f).__name__, f))
errors[f] = f
# Combine if there are remaining, otherwise raise
if len(parsers) > 0:
return object_type, CascadingParser(parsers)
else:
raise NoParserFoundForUnionType.create(obj_on_filesystem, object_type, errors)
def _build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_typ: Type[T],
logger: Logger = None) -> Dict[Type, Parser]:
"""
Builds a parser for each subtype of object_typ
:param obj_on_filesystem:
:param object_typ:
:param logger:
:return:
"""
parsers = OrderedDict()
errors = OrderedDict()
try:
p = self.__build_parser_for_fileobject_and_desiredtype(obj_on_filesystem,
object_typ=object_typ,
logger=logger)
parsers[object_typ] = p
except NoParserFoundForObjectExt as e:
logger.warning("{} - {}".format(type(e).__name__, e))
errors[e] = e
except NoParserFoundForObjectType as f:
logger.warning("{} - {}".format(type(f).__name__, f))
errors[f] = f
# do not explore subclasses for collections
if is_collection(object_typ, strict=True):
if len(errors) > 0:
raise next(iter(errors.values()))
else:
return parsers
# Finally create one such parser for each subclass
subclasses = get_all_subclasses(object_typ)
# Then for each subclass also try (with a configurable limit in nb of subclasses)
for subclass in subclasses[0:GLOBAL_CONFIG.dict_to_object_subclass_limit]:
try:
parsers[subclass] = self.__build_parser_for_fileobject_and_desiredtype(obj_on_filesystem,
object_typ=subclass,
logger=logger)
except NoParserFoundForObjectExt as e:
logger.warning("{} - {}".format(type(e).__name__, e))
errors[e] = e
except NoParserFoundForObjectType as f:
logger.warning("{} - {}".format(type(f).__name__, f))
errors[f] = f
if len(subclasses) > GLOBAL_CONFIG.dict_to_object_subclass_limit:
warn('Type {} has more than {} subclasses, only {} were tried to convert it, with no success. You '
'can raise this limit by setting the appropriate option with `parsyfiles_global_config()`'
''.format(object_typ, len(subclasses), GLOBAL_CONFIG.dict_to_object_subclass_limit))
return parsers
def __build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_typ: Type[T],
logger: Logger = None) -> Parser:
"""
Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type.
To do that, it iterates through all registered parsers in the list in reverse order (last inserted first),
and checks if they support the provided object format (single or multifile) and type.
If several parsers match, it returns a cascadingparser that will try them in order.
:param obj_on_filesystem:
:param object_typ:
:param logger:
:return:
"""
# first remove any non-generic customization
object_type = get_base_generic_type(object_typ)
# find all matching parsers for this
matching, no_type_match_but_ext_match, no_ext_match_but_type_match, no_match = \
self.find_all_matching_parsers(strict=self.is_strict, desired_type=object_type,
required_ext=obj_on_filesystem.ext)
matching_parsers = matching[0] + matching[1] + matching[2]
if len(matching_parsers) == 0:
# No match. Do we have a close match ? (correct type, but not correct extension ?)
if len(no_ext_match_but_type_match) > 0:
raise NoParserFoundForObjectExt.create(obj_on_filesystem, object_type,
set([ext_ for ext_set in
[p.supported_exts for p in no_ext_match_but_type_match]
for ext_ in ext_set]))
else:
# no, no match at all
raise NoParserFoundForObjectType.create(obj_on_filesystem, object_type,
set([typ_ for typ_set in
[p.supported_types for p in no_type_match_but_ext_match]
for typ_ in typ_set]))
elif len(matching_parsers) == 1:
# return the match directly
return matching_parsers[0]
else:
# return a cascade of all parsers, in reverse order (since last is our preferred one)
# print('----- WARNING : Found several parsers able to parse this item. Combining them into a cascade.')
return CascadingParser(list(reversed(matching_parsers)))
class AttrConversionException(ConversionException):
"""
Raised whenever parsing fails
"""
def __init__(self, contents):
"""
We actually can't put more than 1 argument in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
That's why we have a helper static method create()
:param contents:
"""
super(AttrConversionException, self).__init__(contents)
@staticmethod
def create(att_name: str, parsed_att: S, attribute_type: Type[T], caught_exec: Dict[Converter[S, T], Exception]):
"""
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param att_name:
:param parsed_att:
:param attribute_type:
:param caught_exec:
:return:
"""
base_msg = "Error while trying to convert value for attribute '{a}' to type <{t}>:\n" \
" - parsed value is : '{v}' of type <{tv}>\n" \
"".format(a=str(att_name), t=get_pretty_type_str(attribute_type), v=parsed_att,
tv=get_pretty_type_str(type(parsed_att)))
msg = StringIO()
if len(list(caught_exec.keys())) > 0:
msg.writelines(' - converters tried are : \n * ')
msg.writelines('\n * '.join([str(converter) for converter in caught_exec.keys()]))
msg.writelines(' \n Caught the following exceptions: \n')
for converter, err in caught_exec.items():
msg.writelines('--------------- From ' + str(converter) + ' caught: \n')
print_error_to_io_stream(err, msg)
msg.write('\n')
return AttrConversionException(base_msg + msg.getvalue())
class NoConverterFoundForObjectType(Exception):
"""
Raised whenever no ConversionFinder has been provided, while a dictionary value needs conversion to be used as an
object constructor attribute
"""
def __init__(self, contents: str):
"""
We actually can't put more than 1 argument in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
That's why we have a helper static method create()
:param contents:
"""
super(NoConverterFoundForObjectType, self).__init__(contents)
@staticmethod
def create(conversion_finder, parsed_att: Any, attribute_type: Type[Any], errors: Dict[Type, Exception] = None):
"""
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parsed_att:
:param attribute_type:
:param conversion_finder:
:return:
"""
if conversion_finder is None:
msg = "No conversion finder provided to find a converter between parsed attribute '{patt}' of type " \
"'{typ}' and expected type '{expt}'.".format(patt=str(parsed_att),
typ=get_pretty_type_str(type(parsed_att)),
expt=get_pretty_type_str(attribute_type))
else:
msg = "No conversion chain found between parsed attribute '{patt}' of type '{typ}' and expected type " \
"'{expt}' using conversion finder {conv}.".format(patt=parsed_att,
typ=get_pretty_type_str(type(parsed_att)),
expt=get_pretty_type_str(attribute_type),
conv=conversion_finder)
if errors is not None:
msg = msg + ' ' + str(errors)
return NoConverterFoundForObjectType(msg)
# def _handle_from_type_wildcard(desired_from_type: Optional[Type], c: Converter):
# return desired_from_type or c.from_type
#
#
# def _handle_to_type_wildcard(desired_type: Optional[Type], c: Converter):
# return desired_type or c.to_type
class ConversionFinder(metaclass=ABCMeta):
"""
Abstract class for objects able to find | |
selector with unquoted value, not '
'matching class attribute not beginning with specified '
'substring',
'selector': '#attr-begins [class^= apple]'},
{
'expect': ['attr-ends-a1', 'attr-ends-a3'],
'level': 3,
'name': 'Attribute ends with selector, matching href attributes '
'ending with specified substring',
'selector': '#attr-ends a[href$=".org"]'},
{
'expect': ['attr-ends-div2', 'attr-ends-div4'],
'level': 3,
'name': 'Attribute ends with selector, matching lang attributes '
'ending with specified substring, ',
'selector': '#attr-ends [lang$="-CH"]'},
{
'expect': [],
'level': 3,
'name': 'Attribute ends with selector, not matching class attribute '
'with empty value',
'selector': '#attr-ends [class$=""]'},
{
'expect': [],
'level': 3,
'name': 'Attribute ends with selector, not matching class attribute '
'not ending with specified substring',
'selector': '#attr-ends [class$=apple]'},
{
'expect': ['attr-ends-p1'],
'level': 3,
'name': 'Attribute ends with selector with single-quoted value, '
'matching class attribute ending with specified substring',
'selector': "#attr-ends [class$='apple ']"},
{
'expect': ['attr-ends-p1'],
'level': 3,
'name': 'Attribute ends with selector with double-quoted value, '
'matching class attribute ending with specified substring',
'selector': '#attr-ends [class$="apple "]'},
{
'expect': [],
'level': 3,
'name': 'Attribute ends with selector with unquoted value, not '
'matching class attribute not ending with specified substring',
'selector': '#attr-ends [class$=apple ]'},
{
'expect': ['attr-contains-a1', 'attr-contains-a3'],
'level': 3,
'name': 'Attribute contains selector, matching href attributes '
'beginning with specified substring',
'selector': '#attr-contains a[href*="http://www"]'},
{
'expect': ['attr-contains-a1', 'attr-contains-a2'],
'level': 3,
'name': 'Attribute contains selector, matching href attributes ending '
'with specified substring',
'selector': '#attr-contains a[href*=".org"]'},
{
'expect': ['attr-contains-a1', 'attr-contains-a3'],
'level': 3,
'name': 'Attribute contains selector, matching href attributes '
'containing specified substring',
'selector': '#attr-contains a[href*=".example."]'},
{
'expect': ['attr-contains-div2', 'attr-contains-div6'],
'level': 3,
'name': 'Attribute contains selector, matching lang attributes '
'beginning with specified substring, ',
'selector': '#attr-contains [lang*="en-"]'},
{
'expect': ['attr-contains-div3', 'attr-contains-div5'],
'level': 3,
'name': 'Attribute contains selector, matching lang attributes ending '
'with specified substring, ',
'selector': '#attr-contains [lang*="-CH"]'},
{
'expect': [],
'level': 3,
'name': 'Attribute contains selector, not matching class attribute '
'with empty value',
'selector': '#attr-contains [class*=""]'},
{
'expect': ['attr-contains-p1'],
'level': 3,
'name': 'Attribute contains selector with single-quoted value, '
'matching class attribute beginning with specified substring',
'selector': "#attr-contains [class*=' apple']"},
{
'expect': ['attr-contains-p1'],
'level': 3,
'name': 'Attribute contains selector with single-quoted value, '
'matching class attribute ending with specified substring',
'selector': "#attr-contains [class*='orange ']"},
{
'expect': ['attr-contains-p1'],
'level': 3,
'name': 'Attribute contains selector with single-quoted value, '
'matching class attribute containing specified substring',
'selector': "#attr-contains [class*='ple banana ora']"},
{
'expect': ['attr-contains-p1'],
'level': 3,
'name': 'Attribute contains selector with double-quoted value, '
'matching class attribute beginning with specified substring',
'selector': '#attr-contains [class*=" apple"]'},
{
'expect': ['attr-contains-p1'],
'level': 3,
'name': 'Attribute contains selector with double-quoted value, '
'matching class attribute ending with specified substring',
'selector': '#attr-contains [class*="orange "]'},
{
'expect': ['attr-contains-p1'],
'level': 3,
'name': 'Attribute contains selector with double-quoted value, '
'matching class attribute containing specified substring',
'selector': '#attr-contains [class*="ple banana ora"]'},
{
'expect': ['attr-contains-p1'],
'level': 3,
'name': 'Attribute contains selector with unquoted value, matching '
'class attribute beginning with specified substring',
'selector': '#attr-contains [class*= apple]'},
{
'expect': ['attr-contains-p1'],
'level': 3,
'name': 'Attribute contains selector with unquoted value, matching '
'class attribute ending with specified substring',
'selector': '#attr-contains [class*=orange ]'},
{
'expect': ['attr-contains-p1'],
'level': 3,
'name': 'Attribute contains selector with unquoted value, matching '
'class attribute containing specified substring',
'selector': '#attr-contains [class*= banana ]'},
{
'exclude': ['element', 'fragment', 'detached'],
'expect': ['html'],
'level': 3,
'name': ':root pseudo-class selector, matching document root element',
'selector': ':root'},
{
'exclude': ['document'],
'expect': [],
'level': 3,
'name': ':root pseudo-class selector, not matching document root '
'element',
'selector': ':root'},
{
'expect': [
'pseudo-nth-td3',
'pseudo-nth-td9',
'pseudo-nth-tr3',
'pseudo-nth-td15'],
'level': 3,
'name': ':nth-child selector, matching the third child element',
'selector': '#pseudo-nth-table1 :nth-child(3)'},
{
'expect': [
'pseudo-nth-li3',
'pseudo-nth-li6',
'pseudo-nth-li9',
'pseudo-nth-li12'],
'level': 3,
'name': ':nth-child selector, matching every third child element',
'selector': '#pseudo-nth li:nth-child(3n)'},
{
'expect': [
'pseudo-nth-li4',
'pseudo-nth-li6',
'pseudo-nth-li8',
'pseudo-nth-li10',
'pseudo-nth-li12'],
'level': 3,
'name': ':nth-child selector, matching every second child element, '
'starting from the fourth',
'selector': '#pseudo-nth li:nth-child(2n+4)'},
{
'expect': ['pseudo-nth-em2', 'pseudo-nth-span3'],
'level': 3,
'name': ':nth-child selector, matching every fourth child element, '
'starting from the third',
'selector': '#pseudo-nth-p1 :nth-child(4n-1)'},
{
'expect': [
'pseudo-nth-tr1',
'pseudo-nth-td4',
'pseudo-nth-td10',
'pseudo-nth-td16'],
'level': 3,
'name': ':nth-last-child selector, matching the third last child '
'element',
'selector': '#pseudo-nth-table1 :nth-last-child(3)'},
{
'expect': [
'pseudo-nth-li1',
'pseudo-nth-li4',
'pseudo-nth-li7',
'pseudo-nth-li10'],
'level': 3,
'name': ':nth-last-child selector, matching every third child element '
'from the end',
'selector': '#pseudo-nth li:nth-last-child(3n)'},
{
'expect': [
'pseudo-nth-li1',
'pseudo-nth-li3',
'pseudo-nth-li5',
'pseudo-nth-li7',
'pseudo-nth-li9'],
'level': 3,
'name': ':nth-last-child selector, matching every second child '
'element from the end, starting from the fourth last',
'selector': '#pseudo-nth li:nth-last-child(2n+4)'},
{
'expect': ['pseudo-nth-span2', 'pseudo-nth-span4'],
'level': 3,
'name': ':nth-last-child selector, matching every fourth element from '
'the end, starting from the third last',
'selector': '#pseudo-nth-p1 :nth-last-child(4n-1)'},
{
'expect': ['pseudo-nth-em3'],
'level': 3,
'name': ':nth-of-type selector, matching the third em element',
'selector': '#pseudo-nth-p1 em:nth-of-type(3)'},
{
'expect': [
'pseudo-nth-em2',
'pseudo-nth-span2',
'pseudo-nth-span4',
'pseudo-nth-strong2',
'pseudo-nth-em4'],
'level': 3,
'name': ':nth-of-type selector, matching every second element of '
'their type',
'selector': '#pseudo-nth-p1 :nth-of-type(2n)'},
{
'expect': ['pseudo-nth-span1', 'pseudo-nth-span3'],
'level': 3,
'name': ':nth-of-type selector, matching every second elemetn of '
'their type, starting from the first',
'selector': '#pseudo-nth-p1 span:nth-of-type(2n-1)'},
{
'expect': ['pseudo-nth-em2'],
'level': 3,
'name': ':nth-last-of-type selector, matching the third last em '
'element',
'selector': '#pseudo-nth-p1 em:nth-last-of-type(3)'},
{
'expect': [
'pseudo-nth-span1',
'pseudo-nth-em1',
'pseudo-nth-strong1',
'pseudo-nth-em3',
'pseudo-nth-span3'],
'level': 3,
'name': ':nth-last-of-type selector, matching every second last '
'element of their type',
'selector': '#pseudo-nth-p1 :nth-last-of-type(2n)'},
{
'expect': ['pseudo-nth-span2', 'pseudo-nth-span4'],
'level': 3,
'name': ':nth-last-of-type selector, matching every second last '
'element of their type, starting from the last',
'selector': '#pseudo-nth-p1 span:nth-last-of-type(2n-1)'},
{
'expect': ['pseudo-nth-em1'],
'level': 3,
'name': ':first-of-type selector, matching the first em element',
'selector': '#pseudo-nth-p1 em:first-of-type'},
{
'expect': ['pseudo-nth-span1', 'pseudo-nth-em1', 'pseudo-nth-strong1'],
'level': 3,
'name': ':first-of-type selector, matching the first of every type of '
'element',
'selector': '#pseudo-nth-p1 :first-of-type'},
{
'expect': ['pseudo-nth-td1', 'pseudo-nth-td7', 'pseudo-nth-td13'],
'level': 3,
'name': ':first-of-type selector, matching the first td element in '
'each table row',
'selector': '#pseudo-nth-table1 tr :first-of-type'},
{
'expect': ['pseudo-nth-em4'],
'level': 3,
'name': ':last-of-type selector, matching the last em elemnet',
'selector': '#pseudo-nth-p1 em:last-of-type'},
{
'expect': ['pseudo-nth-span4', 'pseudo-nth-strong2', 'pseudo-nth-em4'],
'level': 3,
'name': ':last-of-type selector, matching the last of every type of '
'element',
'selector': '#pseudo-nth-p1 :last-of-type'},
{
'expect': ['pseudo-nth-td6', 'pseudo-nth-td12', 'pseudo-nth-td18'],
'level': 3,
'name': ':last-of-type selector, matching the last td element in each '
'table row',
'selector': '#pseudo-nth-table1 tr :last-of-type'},
{
'expect': ['pseudo-first-child-div1'],
'level': 2,
'name': ':first-child pseudo-class selector, matching first child div '
'element',
'selector': '#pseudo-first-child div:first-child'},
{
'expect': [],
'level': 2,
'name': ":first-child pseudo-class selector, doesn't match "
'non-first-child elements',
'selector': '.pseudo-first-child-div2:first-child, '
'.pseudo-first-child-div3:first-child'},
{
'expect': [
'pseudo-first-child-span1',
'pseudo-first-child-span3',
'pseudo-first-child-span5'],
'level': 2,
'name': ':first-child pseudo-class selector, matching first-child of '
'multiple elements',
'selector': '#pseudo-first-child span:first-child'},
{
'expect': ['pseudo-last-child-div3'],
'level': 3,
'name': ':last-child pseudo-class selector, matching last child div '
'element',
'selector': '#pseudo-last-child div:last-child'},
{
'expect': [],
'level': 3,
'name': ":last-child pseudo-class selector, doesn't match "
'non-last-child elements',
'selector': '.pseudo-last-child-div1:last-child, '
'.pseudo-last-child-div2:first-child'},
{
'expect': [
'pseudo-last-child-span2',
'pseudo-last-child-span4',
'pseudo-last-child-span6'],
'level': 3,
'name': ':last-child pseudo-class selector, matching first-child of '
'multiple elements',
'selector': '#pseudo-last-child span:last-child'},
{
'expect': ['pseudo-only-span1'],
'level': 3,
'name': ':pseudo-only-child pseudo-class selector, matching all '
'only-child elements',
'selector': '#pseudo-only :only-child'},
{
'expect': [],
'level': 3,
'name': ':pseudo-only-child pseudo-class selector, matching '
'only-child em elements',
'selector': '#pseudo-only em:only-child'},
{
'expect': ['pseudo-only-span1', 'pseudo-only-em1'],
'level': 3,
'name': ':pseudo-only-of-type pseudo-class selector, matching all '
'elements with no siblings of the same type',
'selector': '#pseudo-only :only-of-type'},
{
'expect': ['pseudo-only-em1'],
'level': 3,
'name': ':pseudo-only-of-type pseudo-class selector, matching em '
'elements with no siblings of the same type',
'selector': '#pseudo-only em:only-of-type'},
{
'expect': ['pseudo-empty-p1', 'pseudo-empty-p2'],
'level': 3,
'name': ':empty pseudo-class selector, matching empty p elements',
'selector': '#pseudo-empty p:empty'},
{
'expect': ['pseudo-empty-p1', 'pseudo-empty-p2', 'pseudo-empty-span1'],
'level': 3,
'name': ':empty pseudo-class selector, matching all empty elements',
'selector': '#pseudo-empty :empty'},
{
'expect': ['pseudo-link-a1', 'pseudo-link-a2', 'pseudo-link-area1'],
'level': 1,
'name': ':link and :visited pseudo-class selectors, matching a and '
'area elements with href attributes',
'selector': '#pseudo-link :link, #pseudo-link :visited'},
{
'exclude': ['element', 'fragment', 'detached'],
'expect': [],
'level': 1,
'name': ':link and :visited | |
<gh_stars>0
""" Module providing the Fusion360 CommandBase.
"""
import sys
import os
from collections import deque
import traceback
from typing import List, Dict, Any
import logging
from abc import abstractmethod
import uuid
import adsk.core
import adsk.fusion
from .UiElements import UiItemFactory
from ..utilities import get_values
class HandlerState:
def __init__(self):
self.call_count = 0
def reset(self):
pass
def get_changed(self):
pass
class Fusion360CommandBase():
""" Handles ui element creation and setting up handlers for a Command.
This is used as an abstract base class. The child classes are implementing
the command logic.
"""
def __init__(
self,
fusion_app,
positions: List[UiItemFactory],
logger: logging.Logger,
debug_to_ui: bool = True,
):
self.fusion_app = fusion_app
self.cmd_name = os.path.basename(
sys.modules[self.__class__.__module__].__file__)
self.cmd_path = os.path.dirname(
os.path.abspath(sys.modules[self.__class__.__module__].__file__))
self.cmd_uuid = str(uuid.uuid4())
self.positions_info = positions
# path elemts are stored as tuple, second value is inidcating
# if element was created by addin
self.position_paths = []
self.debug_to_ui = debug_to_ui
self.logger = logger
self.on_command_created_handler = _CommandCreatedEventHandler(self)
self.command_handlers = []
self.custom_command_handlers = {} # {event_id: customEventHandler}
self.fusion_command = None # needed sometimes for custom events
self.log_info('initialised {0} command'.format(self.cmd_name))
def show_error(self, message: str):
""" Shows an error message according to the command settings.
Args:
message (str): the message to be displayed
"""
self.logger.error('({0}) {1}'.format(self.cmd_name, message))
if self.debug_to_ui:
ui = adsk.core.Application.get().userInterface
ui.messageBox(message)
def log_info(self, message: str):
""" Logs an message according to the command settings.
Args:
message (str): the message to log
"""
self.logger.info('({0}) {1}'.format(self.cmd_name, message))
def register_custom_command_event(self, func):
event_id = func.__name__ + self.cmd_uuid
if event_id in self.custom_command_handlers.keys():
self.log_info(
'function \'{0}\' is already connected to a registered event'.
format(func.__name__))
return event_id
custom_event = adsk.core.Application.get().registerCustomEvent(
event_id)
on_custom_event = _CustomCommandEventHandler(self, func)
custom_event.add(on_custom_event)
self.custom_command_handlers[event_id] = on_custom_event
self.log_info(
'connected function \'{0}\' to a registered event'.format(
func.__name__))
return event_id
def fire_custom_command_event(self, func, args=''):
adsk.core.Application.get().fireCustomEvent(
func.__name__ + self.cmd_uuid, args)
def on_run(self):
""" Set up the ui elemnts and command definitons for the command.
For each position factors in everx position list of the positions_info
attribute, the in_fusion() method is run. The returned value is saved
as parent and given to the next in_fusion() method. Command definitions
are handled seperately.
Also the 'resources' attribute of the factory are manipulated.
The created/used ui elemnts are stored in the self.position_path attribute.
There are no checks to validate the structure of the given structure.
This should be done before.
"""
for position_count, position_path_info in enumerate(
self.positions_info):
self.log_info('adding position {0}'.format(position_count + 1))
for elem in position_path_info:
if hasattr(elem, 'resources') and elem.resources != '':
if not os.path.isdir(elem.resources):
elem.resource = os.path.abspath(
os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(__file__))),
'resources', elem.resources))
position_path_info = deque(position_path_info)
this_position_path = []
# second value will always indicate if element got created by addin
last_ui_element = (adsk.core.Application.get().userInterface,
False)
while len(position_path_info) > 2:
new_ui_element = position_path_info.popleft().in_fusion(
last_ui_element[0])
self.log_info('positioned into {0} \'{1}\' {2}'.format(
'newly created' if new_ui_element[1] else 'existing',
new_ui_element[0].id,
type(new_ui_element[0]).__name__))
this_position_path.append(new_ui_element)
last_ui_element = new_ui_element
cmd_def = position_path_info.pop().in_fusion()
cmd_def[0].commandCreated.add(self.on_command_created_handler)
final_control = position_path_info.pop().in_fusion(
last_ui_element[0], cmd_def[0])
self.log_info('using {0} for command defintion \'{1}\''.format(
final_control[0].objectType, cmd_def[0].id))
this_position_path.append(final_control)
this_position_path.append(cmd_def)
self.position_paths.append(this_position_path)
@abstractmethod
def on_create(self, args: adsk.core.CommandEventArgs,
command: adsk.core.Command, inputs: adsk.core.CommandInputs,
state: HandlerState):
""" Executed when addin button is pressed.
Create the needed input field here.
Args:
args (adsk.core.CommandEventArgs): the native commandEventArgs passed
to the handler
command (adsk.core.Command): reference to the command object
inputs (adsk.core.CommandInputs): quick reference directly to the
commandInputs object
"""
pass
@abstractmethod
def on_preview(self, args: adsk.core.CommandEventArgs,
command: adsk.core.Command, inputs: adsk.core.CommandInputs,
input_values: Dict[str, Any], state: HandlerState):
""" Executed when any inputs have changed, will updated the graphic
Code in this function will cause the graphics to refresh.
Note if your addin is complex it may be useful to only preview a subset
of the full operations
Args:
args (adsk.core.CommandEventArgs): the native commandEventArgs passed
to the handler
command (adsk.core.Command): reference to the command object
inputs (adsk.core.CommandInputs): quick reference directly to the
commandInputs object
input_values (Dict[str,Any]): dictionary of the useful values a user
entered. The key is the command_id.
"""
pass
@abstractmethod
def on_input_changed(self, args: adsk.core.InputChangedEventArgs,
command: adsk.core.Command,
inputs: adsk.core.CommandInputs,
input_values: Dict[str, Any],
changed_input: adsk.core.CommandInput,
state: HandlerState):
"""Executed when any inputs have changed. Useful for updating command UI.
When a user changes anything in the command dialog this method is executed.
Typically used for making changes to the command dialog itself.
Args:
args (adsk.core.InputChangedEventArgs): the native commandEventArgs
passed to the handler
command (adsk.core.Command): reference to the command object
inputs (adsk.core.CommandInputs): quick reference directly to the
commandInputs object
input_values (Dict[str,Any]): dictionary of the useful values a user
entered. The key is the command_id.
changed_input (adsk.core.CommandInput): The specific commandInput
that was modified.
"""
pass
@abstractmethod
def on_execute(self, args: adsk.core.CommandEventArgs,
command: adsk.core.Command, inputs: adsk.core.CommandInputs,
input_values: Dict[str, Any], state: HandlerState):
"""Will be executed when user selects OK in command dialog.
Args:
args (adsk.core.CommandEventArgs): the native commandEventArgs
passed to the handler.
command (adsk.core.Command): reference to the command object
inputs (adsk.core.CommandInputs): quick reference directly to the
commandInputs object.
input_values (Dict[str,Any]): dictionary of the useful values a user
entered. The key is the command_id.
"""
pass
@abstractmethod
def on_destroy(self, args: adsk.core.CommandEventArgs,
command: adsk.core.Command, inputs: adsk.core.CommandInputs,
input_values: Dict[str, Any],
reason: adsk.core.CommandTerminationReason,
state: HandlerState):
""" Executed when the command is done. Sometimes useful to check if a
user hit cancel.
You can use this to do any clean up that may otherwise be difficult until
after the command has completed.
Like firing a second command for example.
Args:
args (adsk.core.CommandEventArgs): the native commandEventArgs
passed to the handler
command (adsk.core.Command): reference to the command object
inputs (adsk.core.CommandInputs): quick reference directly to the
commandInputs object
input_values (Dict[str,Any]): dictionary of the useful values a user
entered. The key is the command_id.
reason (adsk.core.CommandTerminationReason): The reason the command
was terminated. Enumerator defined in adsk.core.CommandTerminationReason
"""
pass
@abstractmethod
def on_key_down(self, args: adsk.core.CommandEventArgs,
command: adsk.core.Command,
inputs: adsk.core.CommandInputs, input_values: Dict[str,
Any],
keycode: int, state: HandlerState):
"""[summary]
Args:
args (adsk.core.CommandEventArgs): [description]
command (adsk.core.Command): [description]
inputs (adsk.core.CommandInputs): [description]
input_values (Dict[str, Any]): [description]
keycode (int): [description]
"""
pass
class _CommandCreatedEventHandler(adsk.core.CommandCreatedEventHandler):
def __init__(self, cmd_obj):
super().__init__()
self.cmd_obj = cmd_obj
self.cmd_obj.log_info('initialized CommandCreatedEventHAndler')
self.state = HandlerState()
def notify(self, args: adsk.core.CommandCreatedEventArgs):
""" Gets called if the CommandCreatedEvent is raised.
Args:
args (adsk.core.CommandCreatedEventArgs): according event aruments
"""
try:
self.state.call_count += 1
cmd = args.command
self.cmd_obj.fusion_command = cmd
on_execute_handler = _CommandExecuteHandler(self.cmd_obj)
cmd.execute.add(on_execute_handler)
self.cmd_obj.command_handlers.append(on_execute_handler)
on_input_changed_handler = _InputChangedHandler(self.cmd_obj)
cmd.inputChanged.add(on_input_changed_handler)
self.cmd_obj.command_handlers.append(on_input_changed_handler)
on_destroy_handler = _DestroyHandler(self.cmd_obj)
cmd.destroy.add(on_destroy_handler)
self.cmd_obj.command_handlers.append(on_destroy_handler)
on_execute_preview_handler = _PreviewHandler(self.cmd_obj)
cmd.executePreview.add(on_execute_preview_handler)
self.cmd_obj.command_handlers.append(on_execute_preview_handler)
on_keydown_handler = _KeyDownHandler(self.cmd_obj)
cmd.keyDown.add(on_keydown_handler)
self.cmd_obj.command_handlers.append(on_keydown_handler)
self.cmd_obj.log_info('executing CommandCreatedEventHAndler')
# TODO add more args wrapper when needed
self.cmd_obj.on_create(args, cmd, cmd.commandInputs, self.state)
except:
self.cmd_obj.show_error('Command created failed: {0}'.format(
traceback.format_exc()))
class _PreviewHandler(adsk.core.CommandEventHandler):
def __init__(self, cmd_obj):
super().__init__()
self.cmd_obj = cmd_obj
self.cmd_obj.log_info('initialized PreviewHandler')
self.state = HandlerState()
def notify(self, args: adsk.core.CommandEventArgs):
""" Gets called if the CommandPreviewEvent is raised.
Args:
args (adsk.core.CommandEventArgs): according event aruments
"""
try:
self.state.call_count += 1
cmd = args.firingEvent.sender
self.cmd_obj.log_info('executing PreviewHandler')
# TODO add more args wrapper when needed
self.cmd_obj.on_preview(args, cmd, cmd.commandInputs,
get_values(cmd.commandInputs), self.state)
except:
self.cmd_obj.show_error('Preview event failed: {}'.format(
traceback.format_exc()))
class _InputChangedHandler(adsk.core.InputChangedEventHandler):
def __init__(self, cmd_obj):
super().__init__()
self.cmd_obj = cmd_obj
self.cmd_obj.log_info('initialized InputChangedHandler')
self.state = HandlerState()
def notify(self, args: adsk.core.InputChangedEventArgs):
""" Gets called if the InputChangedEvent is raised.
Args:
args (adsk.core.InputChangedEventArgs): according event aruments
"""
try:
self.state.call_count += 1
cmd = args.firingEvent.sender
self.cmd_obj.log_info('executing InputCHangedHandler')
# TODO add more args wrapper when needed
self.cmd_obj.on_input_changed(args, cmd, cmd.commandInputs,
get_values(cmd.commandInputs),
args.input, self.state)
except:
self.cmd_obj.show_error('Input changed event failed: {}'.format(
traceback.format_exc()))
class _CommandExecuteHandler(adsk.core.CommandEventHandler):
def __init__(self, cmd_obj):
super().__init__()
self.cmd_obj = cmd_obj
self.cmd_obj.log_info('initialized CommandExecuteHandler')
self.state = HandlerState()
def notify(self, args: adsk.core.CommandEventArgs):
""" Gets called if the CommandExecuteEvent is raised.
Args:
args (adsk.core.CommandEventArgs): according event aruments
"""
try:
self.state.call_count += 1
cmd = args.firingEvent.sender
self.cmd_obj.log_info('executing CommandExecuteHandler')
# TODO add more args wrapper when needed
self.cmd_obj.on_execute(args, cmd, cmd.commandInputs,
get_values(cmd.commandInputs), self.state)
except:
self.cmd_obj.show_error('Command execute event failed: {}'.format(
traceback.format_exc()))
class _DestroyHandler(adsk.core.CommandEventHandler):
def __init__(self, cmd_obj):
super().__init__()
self.cmd_obj = cmd_obj
self.cmd_obj.log_info('initialized DestroyHandler')
self.state = HandlerState()
def notify(self, args: adsk.core.CommandEventArgs):
""" Gets called if the CommandDestroyEvent is raised.
Args:
args (adsk.core.CommandEventArgs): according event aruments
"""
try:
self.state.call_count += 1
cmd = args.firingEvent.sender
self.cmd_obj.log_info('executing DestroyHandler')
# TODO add more args wrapper when needed
self.cmd_obj.on_destroy(args, cmd, cmd.commandInputs,
get_values(cmd.commandInputs),
args.terminationReason, self.state)
for event_id in set(self.cmd_obj.custom_command_handlers.keys()):
adsk.core.Application.get().unregisterCustomEvent(event_id)
self.cmd_obj.custom_command_handlers.clear()
except:
self.cmd_obj.show_error('Destoy event failed: {}'.format(
traceback.format_exc()))
class _KeyDownHandler(adsk.core.KeyboardEventHandler):
def __init__(self, cmd_obj):
super().__init__()
self.cmd_obj = cmd_obj
self.cmd_obj.log_info('initialized KeyboardHandler')
self.state = HandlerState()
def notify(self, args: adsk.core.KeyboardEventArgs):
try:
self.state.call_count += 1
cmd | |
<filename>bbp/comps/build_workflow.py
#!/usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This module takes care of building a workflow using either user
choices interactively, or an option file containing all needed
parameters.
"""
from __future__ import division, print_function
# Import Python modules
import os
import ast
import sys
# Import Broadband modules
import bband_utils
import gmpe_config
import validation_cfg
import velocity_models
from module import Module
from install_cfg import InstallCfg
class ConfigurationError(Exception):
"""
Exception used to indicate that a configuration error was detected
in the platform
"""
pass
class WorkflowBuilder(object):
"""
This class asks the user to select all simulation options and
creates a workflow
"""
def __init__(self, sim_id, expert_mode, opt_obj=None):
"""
Initialize class parameters
"""
self.sim_id = sim_id
self.opt_obj = opt_obj
self.expert_mode = expert_mode
self.validation = None
self.src_file = ""
self.srf_file = None
self.vel_file = None
self.workflow = []
self.install = InstallCfg.getInstance()
self.tmpdir = os.path.join(self.install.A_TMP_DATA_DIR, str(sim_id))
self.indir = os.path.join(self.install.A_IN_DATA_DIR, str(sim_id))
self.vmodel_name = None
self.vmodel_obj = None
self.val_obj = None
self.stations = None
self.method = None
self.gp_lf_vel_file = None
self.gp_hf_vel_file = None
self.multisegment_validation = False
self.multisegment_src_files = None
def select_simulation_method(self, sim_type):
"""
This function asks the user what method he/she wants to use
"""
while True:
if self.opt_obj is not None:
method = self.opt_obj.get_next_option()
else:
# Print header information about method selection
print("=" * 80)
print()
print("The Broadband Platform includes several scientific"
" methods that can be used to calculate synthetic"
" seismograms.")
print()
method = raw_input("Choose a Method to use in this "
"Broadband %s simulation:\n" % (sim_type) +
"(1) GP (Graves & Pitarka)\n"
"(2) UCSB\n"
"(3) SDSU\n"
"(4) EXSIM\n"
"(5) Song\n"
"(6) Irikura Recipe Method 1 (Irikura1)\n"
"(7) Irikura Recipe Method 2 (Irikura2)\n"
# "(8) CSM (Composite Source Model)"
# " - Beta Version\n"
"? ")
if (method == '1' or method.lower() == "graves & pitarka" or
method.lower() == "gp"):
return "GP"
elif method == '2' or method.lower() == "ucsb":
return "UCSB"
elif method == '3' or method.lower() == "sdsu":
return "SDSU"
elif method == '4' or method.lower() == "exsim":
return "EXSIM"
elif (method == "5" or method.lower() == "song" or
method.lower() == "rmg"):
return "SONG"
elif method == "6" or method.lower() == "irikura1":
return "IRIKURA1"
elif method == "7" or method.lower() == "irikura2":
return "IRIKURA2"
elif method == '8' or method.lower() == "csm":
return "CSM"
else:
print("%s is not a valid choice for method!\n" % (method))
if self.opt_obj is not None:
sys.exit(1)
def get_validation_source_file(self, method):
"""
This function selects a source file from a validation package.
If multiple files exist, it asks the user to select which one
to use for the simulation
"""
src_file = self.val_obj.get_input(method, "source")
if src_file is None or src_file == "":
# We need an src file, cannot proceed
print('*' * 80)
print("The %s validation package does not " %
(self.val_obj.get_print_name()))
print("include a source file for codebase %s" %
(method) +
". Aborting...")
print('*' * 80)
sys.exit(1)
# Only one file
if isinstance(src_file, str):
return src_file
# For multisegment validation events
self.multisegment_validation = True
if method == "SONG" or method == "IRIKURA1":
self.multisegment_src_files = src_file
return src_file[0]
while True:
if self.opt_obj is not None:
src_option = self.opt_obj.get_next_option()
else:
print("=" * 80)
print()
question = ("Please select a src_file from the list"
" below:\n\n")
for i in range(len(src_file)):
question = "%s(%d) %s\n" % (question,
i + 1,
os.path.basename(src_file[i]))
src_option = raw_input("%s? " % question)
try:
choice = int(src_option)
except ValueError:
print("You must enter an integer!")
if self.opt_obj is not None:
# Exit if processing an option file
sys.exit(1)
continue
try:
if choice >= 1 and choice <= len(src_file):
src_file = src_file[choice - 1]
break
else:
print("You must enter an integer from 1 to %d." %
(len(src_file)))
if self.opt_obj is not None:
# Exit if processing an option file
sys.exit(1)
except TypeError:
print("Invalid choice: %s" % (src_option))
if self.opt_obj is not None:
# Exit if processing an option file
sys.exit(1)
# Return src_file
return src_file
def select_source_file(self):
"""
This function asks the user if he/she wants to provide a
custom src file
"""
if self.validation:
while True:
# Ask if user wants to provide a src_file
if not self.expert_mode:
# Unless in expert mode, answer is no
user_src_file = 'n'
elif self.opt_obj is not None:
user_src_file = self.opt_obj.get_next_option()
else:
print("=" * 80)
print()
print("Each validation package includes a default source"
" description (SRC) file for a historical"
" event. Would you like to provide a different"
" file instead of the default file provided?"
" Answer 'no' here if you would like to use"
" the standard source file for this event.")
print()
user_src_file = raw_input("Do you want to provide "
"a custom source file "
"(y/n)? ")
if (user_src_file.lower() == 'y' or
user_src_file.lower() == 'yes'):
# Get custom file from user (note that
# this overrides the selection in the
# validation package)
self.src_file = self.get_input_file("source "
"description",
".src")
# Remember this src_file for later...
self.val_obj.set_input(self.method,
"source",
self.src_file)
if isinstance(self.src_file, list):
self.multisegment_validation = True
if self.method == "SONG" or self.method == "IRIKURA1":
self.multisegment_src_files = self.src_file
self.src_file = self.src_file[0]
break
else:
print("ERROR: Method does not accept "
"multiple SRC files!")
else:
break
elif (user_src_file.lower() == 'n' or
user_src_file.lower() == 'no'):
# The src_file is provided as a "source" parameter
# to the selected rupture generator codebase
self.src_file = self.get_validation_source_file(self.method)
print('=' * 80)
print("SRC file: %s" % (self.src_file))
break
else:
print("Invalid answer!")
if self.opt_obj is not None:
sys.exit(1)
else:
# Need source file
if self.opt_obj is None:
print()
print("=" * 80)
print()
print("The source description (SRC) file contains a"
" description of the hypothetical (or scenario)"
" earthquake, including information like location"
", geometry, magnitude, and mechanism.")
print()
self.src_file = self.get_input_file("source description",
".src")
def infer_site_response(self):
"""
This function infers if the site response should be used in a
validation simulation. The decision comes from (1) having an
active tectonic region and (2) not having correction coefficients
in the validation package.
"""
# Check if validation simulation
if self.validation == False:
# Site response not automatically invoked in
# scenario simulations, use expert mode if needed
return False
if self.val_obj.get_event_type().lower() == "gmpe":
# This is a GMPE validation event, don't use the site
# response module for these events
return False
# Check if this is an active tectonic region
if not self.vmodel_obj.is_active_region():
# Site response not recommended for this region
return False
# Check for correction coefficients
if self.val_obj.get_obs_corrections():
# Found, no need to use site response module
return False
return True
def select_site_response(self):
"""
This function asks the user if he/she wants to run the site
response module
"""
while True:
if self.opt_obj is not None:
site_resp = self.opt_obj.get_next_option()
else:
print("=" * 80)
print()
print("Site Response")
print("=============")
print("Running a site response module is an optional step"
" while running a Broadband Platform simulation. It"
" requires a station list file containing the Vs30"
" values for each station location.")
print()
if not self.vmodel_obj.is_active_region():
print("Warning: The site response module is currently "
"based on Boore et al. (2014)\nas developed for "
"Active Tectonic Regions.\nWe do not recommend "
"its use for CEUS simulations.")
print()
site_resp = raw_input("Do you want to run the "
"site response module (y/n)? ")
if site_resp.lower() == 'y' or site_resp.lower() == 'yes':
return True
elif site_resp.lower() == 'n' or site_resp.lower() == 'no':
return False
else:
print("Invalid answer: %s " % (site_resp))
if self.opt_obj is not None:
sys.exit(1)
def check_velocity_models(self):
"""
This function is used to make sure the needed velocity model
file(s) exist(s)
"""
# List of velocity models to check for...
need_vm = []
| |
= input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
with pytest.raises(ValueError, match=err_msg):
uvf.select(**select_kwargs)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator_no_waterfall
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
@pytest.mark.parametrize("dimension", list(range(1, 4)))
def test_select_antenna_nums(input_uvf, uvf_mode, dimension):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
old_history = copy.deepcopy(uvf.history)
np.random.seed(0)
if uvf.type == "baseline":
unique_ants = np.unique(uvf.ant_1_array.tolist() + uvf.ant_2_array.tolist())
ants_to_keep = np.random.choice(
unique_ants, size=unique_ants.size // 2, replace=False
)
blts_select = [
(a1 in ants_to_keep) & (a2 in ants_to_keep)
for (a1, a2) in zip(uvf.ant_1_array, uvf.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
else:
unique_ants = np.unique(uvf.ant_array)
ants_to_keep = np.random.choice(
unique_ants, size=unique_ants.size // 2, replace=False
)
if dimension == 1:
ants_to_keep = np.atleast_1d(ants_to_keep)
elif dimension == 2:
ants_to_keep = np.atleast_2d(ants_to_keep)
elif dimension == 3:
ants_to_keep = np.atleast_3d(ants_to_keep)
uvf2 = copy.deepcopy(uvf)
uvf2.select(antenna_nums=ants_to_keep)
# make 1-D for the remaining iterators in tests
ants_to_keep = ants_to_keep.squeeze()
assert ants_to_keep.size == uvf2.Nants_data
if uvf2.type == "baseline":
assert Nblts_selected == uvf2.Nblts
for ant in ants_to_keep:
assert ant in uvf2.ant_1_array or ant in uvf2.ant_2_array
for ant in np.unique(uvf2.ant_1_array.tolist() + uvf2.ant_2_array.tolist()):
assert ant in ants_to_keep
else:
for ant in ants_to_keep:
assert ant in uvf2.ant_array
for ant in np.unique(uvf2.ant_array):
assert ant in ants_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific antennas using pyuvdata.",
uvf2.history,
)
@cases_decorator_no_waterfall
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
def test_select_antenna_nums_error(input_uvf, uvf_mode):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
# also test for error if antenna numbers not present in data
with pytest.raises(ValueError) as cm:
uvf.select(antenna_nums=[708, 709, 710])
assert str(cm.value).startswith("Antenna number 708 is not present")
def sort_bl(p):
"""Sort a tuple that starts with a pair of antennas, and may have stuff after."""
if p[1] >= p[0]:
return p
return (p[1], p[0]) + p[2:]
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator_no_waterfall
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
def test_select_bls(input_uvf, uvf_mode):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
if uvf.type != "baseline":
with pytest.raises(ValueError) as cm:
uvf.select(bls=[(0, 1)])
assert str(cm.value).startswith(
'Only "baseline" mode UVFlag '
"objects may select along the "
"baseline axis"
)
else:
old_history = copy.deepcopy(uvf.history)
bls_select = np.random.choice(
uvf.baseline_array, size=uvf.Nbls // 2, replace=False
)
first_ants, second_ants = uvf.baseline_to_antnums(bls_select)
# give the conjugate bls for a few baselines
first_ants[5:8], second_ants[5:8] = (
copy.copy(second_ants[5:8]),
copy.copy(first_ants[5:8]),
)
new_unique_ants = np.unique(first_ants.tolist() + second_ants.tolist())
ant_pairs_to_keep = list(zip(first_ants, second_ants))
sorted_pairs_to_keep = [sort_bl(p) for p in ant_pairs_to_keep]
blts_select = [
sort_bl((a1, a2)) in sorted_pairs_to_keep
for (a1, a2) in zip(uvf.ant_1_array, uvf.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
uvf2 = copy.deepcopy(uvf)
uvf2.select(bls=ant_pairs_to_keep)
sorted_pairs_object2 = [
sort_bl(p) for p in zip(uvf2.ant_1_array, uvf2.ant_2_array)
]
assert len(new_unique_ants) == uvf2.Nants_data
assert Nblts_selected == uvf2.Nblts
for ant in new_unique_ants:
assert ant in uvf2.ant_1_array or ant in uvf2.ant_2_array
for ant in np.unique(uvf2.ant_1_array.tolist() + uvf2.ant_2_array.tolist()):
assert ant in new_unique_ants
for pair in sorted_pairs_to_keep:
assert pair in sorted_pairs_object2
for pair in sorted_pairs_object2:
assert pair in sorted_pairs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific baselines using pyuvdata.",
uvf2.history,
)
# Check with polarization too
first_ants, second_ants = uvf.baseline_to_antnums(bls_select)
# conjugate a few bls
first_ants[5:8], second_ants[5:8] = (
copy.copy(second_ants[5:8]),
copy.copy(first_ants[5:8]),
)
pols = ["xx"] * len(first_ants)
new_unique_ants = np.unique(first_ants.tolist() + second_ants.tolist())
ant_pairs_to_keep = list(zip(first_ants, second_ants, pols))
sorted_pairs_to_keep = [sort_bl(p) for p in ant_pairs_to_keep]
blts_select = [
sort_bl((a1, a2, "xx")) in sorted_pairs_to_keep
for (a1, a2) in zip(uvf.ant_1_array, uvf.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
uvf2 = copy.deepcopy(uvf)
uvf2.select(bls=ant_pairs_to_keep)
sorted_pairs_object2 = [
sort_bl(p) + ("xx",) for p in zip(uvf2.ant_1_array, uvf2.ant_2_array)
]
assert len(new_unique_ants) == uvf2.Nants_data
assert Nblts_selected == uvf2.Nblts
for ant in new_unique_ants:
assert ant in uvf2.ant_1_array or ant in uvf2.ant_2_array
for ant in np.unique(uvf2.ant_1_array.tolist() + uvf2.ant_2_array.tolist()):
assert ant in new_unique_ants
for pair in sorted_pairs_to_keep:
assert pair in sorted_pairs_object2
for pair in sorted_pairs_object2:
assert pair in sorted_pairs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to "
"specific baselines, polarizations using pyuvdata.",
uvf2.history,
)
# check that you can specify a single pair without errors
assert isinstance(ant_pairs_to_keep[0], tuple)
uvf2.select(bls=ant_pairs_to_keep[0])
sorted_pairs_object2 = [
sort_bl(p) + ("xx",) for p in zip(uvf2.ant_1_array, uvf2.ant_2_array)
]
assert list(set(sorted_pairs_object2)) == [ant_pairs_to_keep[0]]
@cases_decorator_no_waterfall
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
@pytest.mark.parametrize(
"select_kwargs,err_msg",
[
({"bls": [3]}, "bls must be a list of tuples"),
({"bls": [(np.pi, 2 * np.pi)]}, "bls must be a list of tuples of integer"),
(
{"bls": (0, 1, "xx"), "polarizations": [-5]},
"Cannot provide length-3 tuples and also specify polarizations.",
),
(
{"bls": (0, 1, 5)},
"The third element in each bl must be a polarization string",
),
({"bls": (455, 456)}, "Antenna number 455 is not present"),
({"bls": (97, 456)}, "Antenna number 456 is not present"),
(
{"bls": (97, 97)},
r"Antenna pair \(97, 97\) does not have any data associated with it.",
),
],
)
def test_select_bls_errors(input_uvf, uvf_mode, select_kwargs, err_msg):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
if uvf.type != "baseline":
with pytest.raises(ValueError) as cm:
uvf.select(bls=[(0, 1)])
assert str(cm.value).startswith(
'Only "baseline" mode UVFlag '
"objects may select along the "
"baseline axis"
)
else:
if select_kwargs["bls"] == (97, 97):
uvf.select(bls=[(97, 104), (97, 105), (88, 97)])
with pytest.raises(ValueError, match=err_msg):
uvf.select(**select_kwargs)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
def test_select_times(input_uvf, uvf_mode):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
old_history = uvf.history
unique_times = np.unique(uvf.time_array)
times_to_keep = np.random.choice(
unique_times, size=unique_times.size // 2, replace=False
)
Nblts_selected = np.sum([t in times_to_keep for t in uvf.time_array])
uvf2 = copy.deepcopy(uvf)
uvf2.select(times=times_to_keep)
assert len(times_to_keep) == uvf2.Ntimes
if uvf2.type == "baseline":
n_compare = uvf2.Nblts
else:
n_compare = uvf2.Ntimes
assert Nblts_selected == n_compare
for t in times_to_keep:
assert t in uvf2.time_array
for t in np.unique(uvf2.time_array):
assert t in times_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific times using pyuvdata.",
uvf2.history,
)
# check that it also works with higher dimension array
uvf2 = copy.deepcopy(uvf)
uvf2.select(times=times_to_keep[np.newaxis, :])
assert len(times_to_keep) == uvf2.Ntimes
assert Nblts_selected == n_compare
for t in times_to_keep:
assert t in uvf2.time_array
for t in np.unique(uvf2.time_array):
assert t in times_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific times using pyuvdata.",
uvf2.history,
)
# check for errors associated with times not included in data
with pytest.raises(ValueError) as cm:
bad_time = [np.min(unique_times) - 0.005]
uvf.select(times=bad_time)
assert str(cm.value).startswith(
"Time {t} is not present in" " the time_array".format(t=bad_time[0])
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
def test_select_frequencies(input_uvf, uvf_mode):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
old_history = uvf.history
freqs_to_keep = np.random.choice(
uvf.freq_array.squeeze(), size=uvf.Nfreqs // 10, replace=False
)
uvf2 = copy.deepcopy(uvf)
uvf2.select(frequencies=freqs_to_keep)
assert len(freqs_to_keep) == uvf2.Nfreqs
for f in freqs_to_keep:
assert f in uvf2.freq_array
for f in np.unique(uvf2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uvf2.history,
)
# check that it also works with higher dimension array
uvf2 = copy.deepcopy(uvf)
uvf2.select(frequencies=freqs_to_keep[np.newaxis, :])
assert len(freqs_to_keep) == uvf2.Nfreqs
for f in freqs_to_keep:
assert f in uvf2.freq_array
for f in np.unique(uvf2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uvf2.history,
)
# check that selecting one frequency works
uvf2 = copy.deepcopy(uvf)
uvf2.select(frequencies=freqs_to_keep[0])
assert 1 == uvf2.Nfreqs
assert freqs_to_keep[0] in uvf2.freq_array
for f in uvf2.freq_array:
assert f in [freqs_to_keep[0]]
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uvf2.history,
)
# check for errors associated with frequencies not included in data
with pytest.raises(ValueError) as cm:
bad_freq = [np.max(uvf.freq_array) + 100]
uvf.select(frequencies=bad_freq)
assert str(cm.value).startswith(
"Frequency {f} is not present in the freq_array".format(f=bad_freq[0])
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
def test_select_freq_chans(input_uvf, uvf_mode):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
old_history = uvf.history
old_history = uvf.history
chans = np.random.choice(uvf.Nfreqs, 2)
c1, c2 = np.sort(chans)
chans_to_keep = np.arange(c1, c2)
uvf2 = copy.deepcopy(uvf)
uvf2.select(freq_chans=chans_to_keep)
assert len(chans_to_keep) == uvf2.Nfreqs
for chan in chans_to_keep:
if uvf2.type != "waterfall":
assert uvf.freq_array[0, | |
},
":older_woman_tone5:": {
"category": "people",
"name": "older woman tone 5",
"unicode": "1f475-1f3ff"
},
":om_symbol:": {
"category": "symbols",
"name": "om symbol",
"unicode": "1f549",
"unicode_alt": "1f549-fe0f"
},
":on:": {
"category": "symbols",
"name": "on with exclamation mark with left right arrow abo",
"unicode": "1f51b"
},
":oncoming_automobile:": {
"category": "travel",
"name": "oncoming automobile",
"unicode": "1f698"
},
":oncoming_bus:": {
"category": "travel",
"name": "oncoming bus",
"unicode": "1f68d"
},
":oncoming_police_car:": {
"category": "travel",
"name": "oncoming police car",
"unicode": "1f694"
},
":oncoming_taxi:": {
"category": "travel",
"name": "oncoming taxi",
"unicode": "1f696"
},
":one:": {
"category": "symbols",
"name": "keycap digit one",
"unicode": "0031-20e3",
"unicode_alt": "0031-fe0f-20e3"
},
":open_file_folder:": {
"category": "objects",
"name": "open file folder",
"unicode": "1f4c2"
},
":open_hands:": {
"category": "people",
"name": "open hands sign",
"unicode": "1f450"
},
":open_hands_tone1:": {
"category": "people",
"name": "open hands sign tone 1",
"unicode": "1f450-1f3fb"
},
":open_hands_tone2:": {
"category": "people",
"name": "open hands sign tone 2",
"unicode": "1f450-1f3fc"
},
":open_hands_tone3:": {
"category": "people",
"name": "open hands sign tone 3",
"unicode": "1f450-1f3fd"
},
":open_hands_tone4:": {
"category": "people",
"name": "open hands sign tone 4",
"unicode": "1f450-1f3fe"
},
":open_hands_tone5:": {
"category": "people",
"name": "open hands sign tone 5",
"unicode": "1f450-1f3ff"
},
":open_mouth:": {
"category": "people",
"name": "face with open mouth",
"unicode": "1f62e"
},
":ophiuchus:": {
"category": "symbols",
"name": "ophiuchus",
"unicode": "26ce"
},
":orange_book:": {
"category": "objects",
"name": "orange book",
"unicode": "1f4d9"
},
":orthodox_cross:": {
"category": "symbols",
"name": "orthodox cross",
"unicode": "2626",
"unicode_alt": "2626-fe0f"
},
":outbox_tray:": {
"category": "objects",
"name": "outbox tray",
"unicode": "1f4e4"
},
":owl:": {
"category": "nature",
"name": "owl",
"unicode": "1f989"
},
":ox:": {
"category": "nature",
"name": "ox",
"unicode": "1f402"
},
":package:": {
"category": "objects",
"name": "package",
"unicode": "1f4e6"
},
":page_facing_up:": {
"category": "objects",
"name": "page facing up",
"unicode": "1f4c4"
},
":page_with_curl:": {
"category": "objects",
"name": "page with curl",
"unicode": "1f4c3"
},
":pager:": {
"category": "objects",
"name": "pager",
"unicode": "1f4df"
},
":paintbrush:": {
"category": "objects",
"name": "lower left paintbrush",
"unicode": "1f58c",
"unicode_alt": "1f58c-fe0f"
},
":palm_tree:": {
"category": "nature",
"name": "palm tree",
"unicode": "1f334"
},
":pancakes:": {
"category": "food",
"name": "pancakes",
"unicode": "1f95e"
},
":panda_face:": {
"category": "nature",
"name": "panda face",
"unicode": "1f43c"
},
":paperclip:": {
"category": "objects",
"name": "paperclip",
"unicode": "1f4ce"
},
":paperclips:": {
"category": "objects",
"name": "linked paperclips",
"unicode": "1f587",
"unicode_alt": "1f587-fe0f"
},
":park:": {
"category": "travel",
"name": "national park",
"unicode": "1f3de",
"unicode_alt": "1f3de-fe0f"
},
":parking:": {
"category": "symbols",
"name": "negative squared latin capital letter p",
"unicode": "1f17f",
"unicode_alt": "1f17f-fe0f"
},
":part_alternation_mark:": {
"category": "symbols",
"name": "part alternation mark",
"unicode": "303d",
"unicode_alt": "303d-fe0f"
},
":partly_sunny:": {
"category": "nature",
"name": "sun behind cloud",
"unicode": "26c5",
"unicode_alt": "26c5-fe0f"
},
":passport_control:": {
"category": "symbols",
"name": "passport control",
"unicode": "1f6c2"
},
":pause_button:": {
"category": "symbols",
"name": "double vertical bar",
"unicode": "23f8",
"unicode_alt": "23f8-fe0f"
},
":peace:": {
"category": "symbols",
"name": "peace symbol",
"unicode": "262e",
"unicode_alt": "262e-fe0f"
},
":peach:": {
"category": "food",
"name": "peach",
"unicode": "1f351"
},
":peanuts:": {
"category": "food",
"name": "peanuts",
"unicode": "1f95c"
},
":pear:": {
"category": "food",
"name": "pear",
"unicode": "1f350"
},
":pen_ballpoint:": {
"category": "objects",
"name": "lower left ballpoint pen",
"unicode": "1f58a",
"unicode_alt": "1f58a-fe0f"
},
":pen_fountain:": {
"category": "objects",
"name": "lower left fountain pen",
"unicode": "1f58b",
"unicode_alt": "1f58b-fe0f"
},
":pencil2:": {
"category": "objects",
"name": "pencil",
"unicode": "270f",
"unicode_alt": "270f-fe0f"
},
":pencil:": {
"category": "objects",
"name": "memo",
"unicode": "1f4dd"
},
":penguin:": {
"category": "nature",
"name": "penguin",
"unicode": "1f427"
},
":pensive:": {
"category": "people",
"name": "pensive face",
"unicode": "1f614"
},
":performing_arts:": {
"category": "activity",
"name": "performing arts",
"unicode": "1f3ad"
},
":persevere:": {
"category": "people",
"name": "persevering face",
"unicode": "1f623"
},
":person_frowning:": {
"category": "people",
"name": "<NAME>",
"unicode": "1f64d"
},
":person_frowning_tone1:": {
"category": "people",
"name": "person frowning tone 1",
"unicode": "1f64d-1f3fb"
},
":person_frowning_tone2:": {
"category": "people",
"name": "person frowning tone 2",
"unicode": "1f64d-1f3fc"
},
":person_frowning_tone3:": {
"category": "people",
"name": "person frowning tone 3",
"unicode": "1f64d-1f3fd"
},
":person_frowning_tone4:": {
"category": "people",
"name": "person frowning tone 4",
"unicode": "1f64d-1f3fe"
},
":person_frowning_tone5:": {
"category": "people",
"name": "person frowning tone 5",
"unicode": "1f64d-1f3ff"
},
":person_with_blond_hair:": {
"category": "people",
"name": "person with blond hair",
"unicode": "1f471"
},
":person_with_blond_hair_tone1:": {
"category": "people",
"name": "person with blond hair tone 1",
"unicode": "1f471-1f3fb"
},
":person_with_blond_hair_tone2:": {
"category": "people",
"name": "person with blond hair tone 2",
"unicode": "1f471-1f3fc"
},
":person_with_blond_hair_tone3:": {
"category": "people",
"name": "person with blond hair tone 3",
"unicode": "1f471-1f3fd"
},
":person_with_blond_hair_tone4:": {
"category": "people",
"name": "person with blond hair tone 4",
"unicode": "1f471-1f3fe"
},
":person_with_blond_hair_tone5:": {
"category": "people",
"name": "person with blond hair tone 5",
"unicode": "1f471-1f3ff"
},
":person_with_pouting_face:": {
"category": "people",
"name": "person with pouting face",
"unicode": "1f64e"
},
":person_with_pouting_face_tone1:": {
"category": "people",
"name": "person with pouting face tone1",
"unicode": "1f64e-1f3fb"
},
":person_with_pouting_face_tone2:": {
"category": "people",
"name": "person with pouting face tone2",
"unicode": "1f64e-1f3fc"
},
":person_with_pouting_face_tone3:": {
"category": "people",
"name": "person with pouting face tone3",
"unicode": "1f64e-1f3fd"
},
":person_with_pouting_face_tone4:": {
"category": "people",
"name": "person with pouting face tone4",
"unicode": "1f64e-1f3fe"
},
":person_with_pouting_face_tone5:": {
"category": "people",
"name": "person with pouting face tone5",
"unicode": "1f64e-1f3ff"
},
":pick:": {
"category": "objects",
"name": "pick",
"unicode": "26cf",
"unicode_alt": "26cf-fe0f"
},
":pig2:": {
"category": "nature",
"name": "pig",
"unicode": "1f416"
},
":pig:": {
"category": "nature",
"name": "pig face",
"unicode": "1f437"
},
":pig_nose:": {
"category": "nature",
"name": "pig nose",
"unicode": "1f43d"
},
":pill:": {
"category": "objects",
"name": "pill",
"unicode": "1f48a"
},
":pineapple:": {
"category": "food",
"name": "pineapple",
"unicode": "1f34d"
},
":ping_pong:": {
"category": "activity",
"name": "table tennis paddle and ball",
"unicode": "1f3d3"
},
":pisces:": {
"category": "symbols",
"name": "pisces",
"unicode": "2653",
"unicode_alt": "2653-fe0f"
},
":pizza:": {
"category": "food",
"name": "slice of pizza",
"unicode": "1f355"
},
":place_of_worship:": {
"category": "symbols",
"name": "place of worship",
"unicode": "1f6d0"
},
":play_pause:": {
"category": "symbols",
"name": "black right-pointing double triangle with double vertical bar",
"unicode": "23ef",
"unicode_alt": "23ef-fe0f"
},
":point_down:": {
"category": "people",
"name": "white down pointing backhand index",
"unicode": "1f447"
},
":point_down_tone1:": {
"category": "people",
"name": "white down pointing backhand index tone 1",
"unicode": "1f447-1f3fb"
},
":point_down_tone2:": {
"category": "people",
"name": "white down pointing backhand index tone 2",
"unicode": "1f447-1f3fc"
},
":point_down_tone3:": {
"category": "people",
"name": "white down pointing backhand index tone 3",
"unicode": "1f447-1f3fd"
},
":point_down_tone4:": {
"category": "people",
"name": "white down pointing backhand index tone 4",
"unicode": "1f447-1f3fe"
},
":point_down_tone5:": {
"category": "people",
"name": "white down pointing backhand index tone 5",
"unicode": "1f447-1f3ff"
},
":point_left:": {
"category": "people",
"name": "white left pointing backhand index",
"unicode": "1f448"
},
":point_left_tone1:": {
"category": "people",
"name": "white left pointing backhand index tone 1",
"unicode": "1f448-1f3fb"
},
":point_left_tone2:": {
"category": "people",
"name": "white left pointing backhand index tone 2",
"unicode": "1f448-1f3fc"
},
":point_left_tone3:": {
"category": "people",
"name": "white left pointing backhand index tone 3",
"unicode": "1f448-1f3fd"
},
":point_left_tone4:": {
"category": "people",
"name": "white left pointing backhand index tone 4",
"unicode": "1f448-1f3fe"
},
":point_left_tone5:": {
"category": "people",
"name": "white left pointing backhand index tone 5",
"unicode": "1f448-1f3ff"
},
":point_right:": {
"category": "people",
"name": "white right pointing backhand index",
"unicode": "1f449"
},
":point_right_tone1:": {
"category": "people",
"name": "white right pointing backhand index tone 1",
"unicode": "1f449-1f3fb"
},
":point_right_tone2:": {
"category": "people",
"name": "white right pointing backhand index tone 2",
"unicode": "1f449-1f3fc"
},
":point_right_tone3:": {
"category": "people",
"name": "white right pointing backhand index tone 3",
"unicode": "1f449-1f3fd"
},
":point_right_tone4:": {
"category": "people",
"name": "white right pointing backhand index tone 4",
"unicode": "1f449-1f3fe"
},
":point_right_tone5:": {
"category": "people",
"name": "white right pointing backhand index tone 5",
"unicode": "1f449-1f3ff"
},
":point_up:": {
"category": "people",
"name": "white up pointing index",
"unicode": "261d",
"unicode_alt": "261d-fe0f"
},
":point_up_2:": {
"category": "people",
"name": "white up pointing backhand index",
"unicode": "1f446"
},
":point_up_2_tone1:": {
"category": "people",
"name": "white up pointing backhand index tone 1",
"unicode": "1f446-1f3fb"
},
":point_up_2_tone2:": {
"category": "people",
"name": "white up pointing backhand index tone 2",
"unicode": "1f446-1f3fc"
},
":point_up_2_tone3:": {
"category": "people",
"name": "white up pointing backhand index tone 3",
"unicode": "1f446-1f3fd"
},
":point_up_2_tone4:": {
"category": "people",
"name": "white up pointing backhand index tone 4",
"unicode": "1f446-1f3fe"
},
":point_up_2_tone5:": {
"category": "people",
"name": "white up pointing backhand index tone 5",
"unicode": "1f446-1f3ff"
},
":point_up_tone1:": {
"category": "people",
| |
0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.38352,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 3.12036,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.107973,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.287496,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.433757,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.49485,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.173556,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.300537,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.181226,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.655319,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.105052,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.205488,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.9347,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.081946,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00629155,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0919908,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0465299,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.173937,
'Execution Unit/Register Files/Runtime Dynamic': 0.0528214,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.251868,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.461671,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.15764,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000232644,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000232644,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000201085,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 7.69966e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000668405,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00133478,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00228587,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0447303,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.84523,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.085181,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.151924,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.20394,
'Instruction Fetch Unit/Runtime Dynamic': 0.285456,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0144078,
'L2/Runtime Dynamic': 0.00571127,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.58887,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.18794,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0113798,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0113799,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.64282,
'Load Store Unit/Runtime Dynamic': 0.255442,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0280607,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.0561217,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.00995882,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0101736,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.176906,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0139389,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.352621,
'Memory Management Unit/Runtime Dynamic': 0.0241125,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 17.7102,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.285891,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0123149,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0849999,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
| |
<gh_stars>1-10
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.mixture import GaussianMixture
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from anytree import NodeMixin, LevelOrderIter
from .autogmm import AutoGMMCluster
from .kclust import KMeansCluster
def _check_common_inputs(min_components, max_components, cluster_kws):
if not isinstance(min_components, int):
raise TypeError("min_components must be an int")
elif min_components < 1:
raise ValueError("min_components must be > 0")
if not isinstance(max_components, int):
raise TypeError("max_components must be an int")
elif max_components < 1:
raise ValueError("max_components must be > 0")
elif max_components < min_components:
raise ValueError("max_components must be >= min_components")
if not isinstance(cluster_kws, dict):
raise TypeError("cluster_kws must be a dict")
def _check_fcluster(fcluster, level):
if level is not None:
if not isinstance(level, int):
raise TypeError("level must be an int")
elif level < 1:
raise ValueError("level must be positive")
elif fcluster is False:
msg = "level-specific flat clustering is availble\
only if 'fcluster' is enabled"
raise ValueError(msg)
class DivisiveCluster(NodeMixin, BaseEstimator):
"""
Recursively clusters data based on a chosen clustering algorithm.
This algorithm implements a "divisive" or "top-down" approach.
Parameters
----------
cluster_method : str {"gmm", "kmeans"}, defaults to "gmm".
The underlying clustering method to apply. If "gmm" will use
:class:`~graspologic.cluster.AutoGMMCluster`. If "kmeans", will use
:class:`~graspologic.cluster.KMeansCluster`.
min_components : int, defaults to 1.
The minimum number of mixture components/clusters to consider
for the first split if "gmm" is selected as ``cluster_method``;
and is set to 1 for later splits.
If ``cluster_method`` is "kmeans", it is set to 2 for all splits.
max_components : int, defaults to 2.
The maximum number of mixture components/clusters to consider
at each split.
min_split : int, defaults to 1.
The minimum size of a cluster for it to be considered to be split again.
max_level : int, defaults to 4.
The maximum number of times to recursively cluster the data.
delta_criter : float, non-negative, defaults to 0.
The smallest difference between selection criterion values of a new
model and the current model that is required to accept the new model.
Applicable only if ``cluster_method`` is "gmm".
cluster_kws : dict, defaults to {}
Keyword arguments (except ``min_components`` and ``max_components``) for chosen
clustering method.
Attributes
----------
model_ : GaussianMixture or KMeans object
Fitted clustering object based on which ``cluster_method`` was used.
See Also
--------
graspologic.cluster.AutoGMMCluster
graspologic.cluster.KMeansCluster
anytree.node.nodemixin.NodeMixin
Notes
-----
This class inherits from :class:`anytree.node.nodemixin.NodeMixin`, a lightweight
class for doing various simple operations on trees.
This algorithm was strongly inspired by maggotcluster, a divisive
clustering algorithm in https://github.com/neurodata/maggot_models and the
algorithm for estimating a hierarchical stochastic block model presented in [2]_.
References
----------
.. [1] <NAME>., & <NAME>. (2019).
AutoGMM: Automatic Gaussian Mixture Modeling in Python.
arXiv preprint arXiv:1909.02688.
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>
(2016). Community detection and classification in hierarchical
stochastic blockmodels. IEEE Transactions on Network Science and
Engineering, 4(1), 13-26.
"""
def __init__(
self,
cluster_method="gmm",
min_components=1,
max_components=2,
cluster_kws={},
min_split=1,
max_level=4,
delta_criter=0,
):
_check_common_inputs(min_components, max_components, cluster_kws)
if cluster_method not in ["gmm", "kmeans"]:
msg = "clustering method must be one of"
msg += "{gmm, kmeans}"
raise ValueError(msg)
if delta_criter < 0:
raise ValueError("delta_criter must be non-negative")
self.parent = None
self.min_components = min_components
self.max_components = max_components
self.cluster_method = cluster_method
self.cluster_kws = cluster_kws
self.min_split = min_split
self.max_level = max_level
self.delta_criter = delta_criter
def fit(self, X):
"""
Fits clustering models to the data as well as resulting clusters
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
self : object
Returns an instance of self.
"""
self.fit_predict(X)
return self
def fit_predict(self, X, fcluster=False, level=None):
"""
Fits clustering models to the data as well as resulting clusters
and using fitted models to predict a hierarchy of labels
Parameters
----------
X : array-like, shape (n_samples, n_features)
fcluster: bool, default=False
if True, returned labels will be re-numbered so that each column
of labels represents a flat clustering at current level,
and each label corresponds to a cluster indexed the same as
the corresponding node in the overall clustering dendrogram
level: int, optional (default=None)
the level of a single flat clustering to generate
only available if ``fcluster`` is True
Returns
-------
labels : array_label, shape (n_samples, n_levels)
if no level specified; otherwise, shape (n_samples,)
"""
X = check_array(X, dtype=[np.float64, np.float32], ensure_min_samples=1)
_check_fcluster(fcluster, level)
if self.max_components > X.shape[0]:
msg = "max_components must be >= n_samples, but max_components = "
msg += "{}, n_samples = {}".format(self.max_components, X.shape[0])
raise ValueError(msg)
labels = self._fit(X)
# delete the last column if predictions at the last level
# are all zero vectors
if (labels.shape[1] > 1) and (np.max(labels[:, -1]) == 0):
labels = labels[:, :-1]
if level is not None:
if level > labels.shape[1]:
msg = "input exceeds max level = {}".format(labels.shape[1])
raise ValueError(msg)
if fcluster:
labels = self._relabel(labels, level)
return labels
def _cluster_and_decide(self, X):
if self.is_root:
min_components = self.min_components
else:
min_components = 1
if self.cluster_method == "gmm":
cluster = AutoGMMCluster(
min_components=min_components,
max_components=self.max_components,
**self.cluster_kws
)
cluster.fit(X)
model = cluster.model_
criter = cluster.criter_
k = cluster.n_components_
pred = cluster.predict(X)
if self.delta_criter > 0:
single_cluster = AutoGMMCluster(
min_components=1, max_components=1, **self.cluster_kws
)
single_cluster.fit(X)
criter_single_cluster = single_cluster.criter_
if k > 1:
# check whether the difference between the criterion
# of "split" and "not split" is greater than
# the threshold, delta_criter
if criter_single_cluster - criter < self.delta_criter:
pred = np.zeros((len(X), 1), dtype=int)
elif self.cluster_method == "kmeans":
cluster = KMeansCluster(
max_clusters=self.max_components, **self.cluster_kws
)
cluster.fit(X)
model = cluster.model_
pred = cluster.predict(X)
self.model_ = model
return pred
def _fit(self, X):
pred = self._cluster_and_decide(X)
self.children = []
uni_labels = np.unique(pred)
labels = pred.reshape((-1, 1)).copy()
if len(uni_labels) > 1:
for ul in uni_labels:
inds = pred == ul
new_X = X[inds]
dc = DivisiveCluster(
cluster_method=self.cluster_method,
max_components=self.max_components,
min_split=self.min_split,
max_level=self.max_level,
cluster_kws=self.cluster_kws,
delta_criter=self.delta_criter,
)
dc.parent = self
if (
len(new_X) > self.max_components
and len(new_X) >= self.min_split
and self.depth + 1 < self.max_level
):
child_labels = dc._fit(new_X)
while labels.shape[1] <= child_labels.shape[1]:
labels = np.column_stack(
(labels, np.zeros((len(X), 1), dtype=int))
)
labels[inds, 1 : child_labels.shape[1] + 1] = child_labels
else:
# make a "GaussianMixture" model for clusters
# that were not fitted
if self.cluster_method == "gmm":
cluster_idx = len(dc.parent.children) - 1
parent_model = dc.parent.model_
model = GaussianMixture()
model.weights_ = np.array([1])
model.means_ = parent_model.means_[cluster_idx].reshape(1, -1)
model.covariance_type = parent_model.covariance_type
if model.covariance_type == "tied":
model.covariances_ = parent_model.covariances_
model.precisions_ = parent_model.precisions_
model.precisions_cholesky_ = (
parent_model.precisions_cholesky_
)
else:
cov_types = ["spherical", "diag", "full"]
n_features = model.means_.shape[-1]
cov_shapes = [
(1,),
(1, n_features),
(1, n_features, n_features),
]
cov_shape_idx = cov_types.index(model.covariance_type)
model.covariances_ = parent_model.covariances_[
cluster_idx
].reshape(cov_shapes[cov_shape_idx])
model.precisions_ = parent_model.precisions_[
cluster_idx
].reshape(cov_shapes[cov_shape_idx])
model.precisions_cholesky_ = (
parent_model.precisions_cholesky_[cluster_idx].reshape(
cov_shapes[cov_shape_idx]
)
)
dc.model_ = model
return labels
def predict(self, X, fcluster=False, level=None):
"""
Predicts a hierarchy of labels based on fitted models
Parameters
----------
X : array-like, shape (n_samples, n_features)
fcluster: bool, default=False
if True, returned labels will be re-numbered so that each column
of labels represents a flat clustering at current level,
and each label corresponds to a cluster indexed the same as
the corresponding node in the overall clustering dendrogram
level: int, optional (default=None)
the level of a single flat clustering to generate
only available if ``fcluster`` is True
Returns
-------
labels : array-like, shape (n_samples, n_levels)
if no level specified; otherwise, shape (n_samples,)
"""
check_is_fitted(self, ["model_"], all_or_any=all)
X = check_array(X, dtype=[np.float64, np.float32], ensure_min_samples=1)
_check_fcluster(fcluster, level)
labels = self._predict_labels(X)
if (level is not None) and (level > labels.shape[1]):
msg = "input exceeds max level = {}".format(labels.shape[1])
raise ValueError(msg)
if fcluster:
# convert labels to stacked flat clusterings
# based on the flat clusterings on fitted data
inds = [(labels == row).all(1).any() for row in self._labels]
labels = self._new_labels[inds]
if level is not None:
labels = labels[:, level - 1]
return labels
def _predict_labels(self, X):
if not self.is_leaf:
pred_labels = np.zeros((len(X), self.height), dtype=int)
current_pred_labels = self.model_.predict(X)
pred_labels[:, 0] = current_pred_labels
for label in np.unique(current_pred_labels):
current_child = self.children[label]
if not current_child.is_leaf:
child_pred_labels = current_child._predict_labels(
X[current_pred_labels == label]
)
pred_labels[
current_pred_labels == label, 1 : child_pred_labels.shape[1] + 1
] = child_pred_labels
else:
# only for cases where root is a | |
"""
The MIT License (MIT)
Copyright (c) 2018 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
from collections import defaultdict
from collections import namedtuple
from itertools import zip_longest
import aiofiles
import aiohttp
import discord
import json
import os
import re
import socket
import yaml
from cogs.utils import checks
from cogs.utils.chat_formatting import bold
from cogs.utils.chat_formatting import inline
from cogs.utils.dataIO import dataIO
from discord.ext import commands
from random import choice
PATH = os.path.join("data", "brawlstars")
JSON = os.path.join(PATH, "settings.json")
BAND_CONFIG_YML = os.path.join(PATH, "club.config.yml")
CACHE_PATH = os.path.join(PATH, "cache")
CACHE_PLAYER_PATH = os.path.join(CACHE_PATH, "player")
CACHE_CLUB_PATH = os.path.join(CACHE_PATH, "club")
MANAGE_ROLE_ROLES = ['Bot Commander']
from box import Box
def nested_dict():
"""Recursively nested defaultdict."""
return defaultdict(nested_dict)
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def clean_tag(tag):
"""Clean supercell tag"""
t = tag
t = t.upper()
t = t.replace('O', '0')
t = t.replace('B', '8')
t = t.replace('#', '')
return t
def remove_color_tags(s):
"""Clean string and remove color tags from string"""
return re.sub("<[^>]*>", "", s)
def print_json(d):
print(json.dumps(d))
class BSPlayer(Box):
"""Player model"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class BSClub(Box):
"""Player model"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class APIError(Exception):
pass
class APIRequestError(APIError):
pass
class APIServerError(APIError):
pass
class APITimeoutError(APIError):
pass
class MissingServerConfig(Exception):
pass
ClubResults = namedtuple("ClubResults", ["results", "club_tags"])
def random_discord_color():
"""Return random color as an integer."""
color = ''.join([choice('0123456789ABCDEF') for x in range(6)])
color = int(color, 16)
return discord.Color(value=color)
async def api_fetch(url=None, auth=None):
"""Fetch from BS API"""
conn = aiohttp.TCPConnector(
family=socket.AF_INET,
verify_ssl=False,
)
try:
async with aiohttp.ClientSession(connector=conn) as session:
async with session.get(url, headers=dict(Authorization=auth), timeout=10) as resp:
if str(resp.status).startswith('4'):
raise APIRequestError()
if str(resp.status).startswith('5'):
raise APIServerError()
data = await resp.json()
except asyncio.TimeoutError:
raise APITimeoutError()
return data
async def api_fetch_player(tag=None, auth=None, **kwargs):
"""Fetch player"""
url = 'https://api.starlist.pro/v1/player?tag={}'.format(clean_tag(tag))
fn = os.path.join(CACHE_PLAYER_PATH, "{}.json".format(tag))
try:
data = await api_fetch(url=url, auth=auth)
except APIServerError:
if os.path.exists(fn):
async with aiofiles.open(fn, mode='r') as f:
data = json.load(await f.read())
else:
raise
else:
async with aiofiles.open(fn, mode='w') as f:
json.dump(data, f)
return BSPlayer(data)
async def api_fetch_club(tag=None, auth=None, **kwargs):
"""Fetch player"""
url = 'https://api.starlist.pro/v1/club?tag={}'.format(clean_tag(tag))
fn = os.path.join(CACHE_CLUB_PATH, "{}.json".format(tag))
try:
data = await api_fetch(url=url, auth=auth)
except APIServerError:
if os.path.exists(fn):
async with aiofiles.open(fn, mode='r') as f:
data = json.load(await f.read())
else:
raise
else:
async with aiofiles.open(fn, mode='w') as f:
json.dump(data, f)
return BSClub(data)
def normalized_trophy_by_level(trophy, level, count=1):
"""Calculate trophy per level using specific formula.
In BS, levels have the following multiplier:
1 100
2 105
3 110
4 115
5 120
6 125
7 130
8 135
9 140
relative level = (100 + 5 * level)/100
Add 100 per count
"""
return trophy / (1 * count + 0.05 * (level - 1))
class BrawlStars:
"""Brawl Stars API"""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = nested_dict()
self.settings.update(dataIO.load_json(JSON))
self._club_config = None
def _save_settings(self):
dataIO.save_json(JSON, self.settings)
return True
def get_emoji(self, name):
for emoji in self.bot.get_all_emojis():
if emoji.name == str(str(name).replace('-', '').replace('.', '')):
return '<:{}:{}>'.format(emoji.name, emoji.id)
return ''
def get_avatar(self, player):
avatar_id = player.get('avatarId') or 28000000
avatar = self.get_emoji(avatar_id)
return avatar
def _player_embed(self, player: BSPlayer):
if player.avatarId:
avatar = self.get_avatar(player)
description = '{} #{}'.format(avatar, player.tag.upper())
else:
description = '#{}'.format(player.tag.upper())
em = discord.Embed(
title=player.name,
description=description,
color=random_discord_color()
)
# club
em.add_field(name=player.club.name, value=player.club.role, inline=False)
# fields
em.add_field(name='Trophies', value="{} / {} PB".format(player.trophies, player.highestTrophies))
em.add_field(name='Boss', value="{}".format(player.bestTimeAsBoss or ''))
em.add_field(name='Robo Rumble', value="{}".format(player.bestRoboRumbleTime or ''))
em.add_field(name='XP', value="{}".format(player.totalExp or ''))
em.add_field(name='Victories', value="{}".format(player.victories or ''))
em.add_field(name='Solo SD', value="{}".format(player.soloShowdownVictories or ''))
em.add_field(name='Duo SD', value="{}".format(player.duoShowdownVictories or ''))
# brawlers
em.add_field(name="Brawlers Unlocked", value=player.brawlersUnlocked, inline=False)
for b in player.brawlers or []:
em.add_field(
name="{} {}".format(self.get_emoji(b.name.lower().replace(' ', '').replace('-', '')), b.name),
value="{} / {} Lvl {}".format(b.trophies, b.highestTrophies, b.power)
)
# footer
em.set_footer(
text="Data by BrawlAPI https://api.starlist.pro"
)
return em
def _player_embed_2(self, player: BSPlayer):
"""New player embed."""
if player.avatarId:
avatar = self.get_avatar(player)
description = '{} #{}'.format(avatar, player.tag.upper())
else:
description = '#{}'.format(player.tag.upper())
em = discord.Embed(
title=player.name,
description=description,
color=random_discord_color()
)
# club
em.add_field(name=player.club.name, value=player.club.role, inline=False)
# fields
em.add_field(name='Trophies', value="{} / {} PB".format(player.trophies, player.highestTrophies))
em.add_field(name='Boss', value="{}".format(player.bestTimeAsBoss or ''))
em.add_field(name='Robo Rumble', value="{}".format(player.bestRoboRumbleTime or ''))
em.add_field(name='XP', value="{}".format(player.totalExp or ''))
em.add_field(name='Victories', value="{}".format(player.victories or ''))
em.add_field(name='Solo SD', value="{}".format(player.soloShowdownVictories or ''))
em.add_field(name='Duo SD', value="{}".format(player.duoShowdownVictories or ''))
# brawlers
em.add_field(name="Brawlers Unlocked", value=player.brawlersUnlocked, inline=False)
o = []
for b in player.brawlers or []:
o.append(
'{emoji} `{trophies: >3} / {pb: >3} Lvl {level: >2}\u2800` {name}'.format(
emoji=self.get_emoji(b.name.lower().replace(' ', '').replace('-', '')),
trophies=b.trophies,
pb=b.highestTrophies,
level=b.power,
name=b.name
)
)
em.add_field(name="Brawlers {}/22".format(len(player.brawlers)), value='\n'.join(o))
# footer
em.set_footer(
text="Data by BrawlAPI https://api.starlist.pro"
)
return em
def _player_mini_str(self, player: BSPlayer):
"""Minimal player profile for verification."""
avatar = self.get_avatar(player)
o = [
'{}'.format(avatar),
'{} #{}'.format(bold(player.name), player.tag),
'{}, {} #{}'.format(player.club.role, player.club.name, player.club.tag) if player.club else 'No Clan',
'{} {} / {}'.format(self.get_emoji('bstrophy'), player.trophies, player.highestTrophies),
]
return "\n".join(o)
def _player_str(self, player: BSPlayer, sort='trophies'):
"""Player profile as plain text."""
avatar = self.get_avatar(player)
o = [
'{}'.format(avatar),
'{} #{}'.format(bold(player.name), player.tag),
'{}, {} #{}'.format(player.club.role, player.club.name, player.club.tag) if player.club else 'No Clan',
'{} {} / {}'.format(self.get_emoji('bstrophy'), player.trophies, player.highestTrophies),
'{emoji} {time} Best time as Big Brawler'.format(
emoji=self.get_emoji('bossfight'),
time=inline(player.bestTimeAsBigBrawler)),
'{emoji} {time} Best Robo Rumble time'.format(
emoji=self.get_emoji('roborumble'),
time=inline(player.bestRoboRumbleTime)),
# victories
'{normal} {solo} {duo}'.format(
normal='{emoji} {value} {name}'.format(
emoji=self.get_emoji('battlelog'),
value=inline(player.victories),
name='Victories'
),
solo='{emoji} {value} {name}'.format(
emoji=self.get_emoji('showdown'),
value=inline(player.soloShowdownVictories),
name='Solo SD'
),
duo='{emoji} {value} {name}'.format(
emoji=self.get_emoji('duoshowdown'),
value=inline(player.duoShowdownVictories),
name='Duo SD'
),
),
# brawler stats
'Brawlers: {}'.format(len(player.brawlers)),
'Normalized Trophies per Level {:.2f}'.format(
normalized_trophy_by_level(player.trophies, sum([b.power for b in player.brawlers]),
count=len(player.brawlers))
# player.trophies / sum([b.level for b in player.brawlers])
),
'Trophies per Brawler: {:.2f}'.format(
player.trophies / len(player.brawlers)
),
]
# brawlers
brawlers = player.brawlers.copy()
if sort == 'level':
brawlers.sort(key=lambda x: x.level, reverse=True)
elif sort == 'trophy_by_level':
brawlers.sort(key=lambda x: normalized_trophy_by_level(x.trophies, x.level), reverse=True)
for b in brawlers or []:
o.append(
'{emoji} `\u2800{trophies: >3} Lvl {level: >2} {trophy_per_level: >2.2f}\u2800` {name}'.format(
emoji=self.get_emoji(b.name.lower().replace(' ', '')),
trophies=b.trophies,
pb=b.highestTrophies,
level=b.power,
name=b.name,
# trophy_per_level=b.trophies / b.level,
trophy_per_level=normalized_trophy_by_level(b.trophies, b.power)
)
)
return '\n'.join(o)
async def _get_club_config(self, force_update=False):
if force_update or self._club_config is None:
async with aiofiles.open(BAND_CONFIG_YML) as f:
contents = await f.read()
self._club_config = yaml.load(contents)
return self._club_config
async def _get_server_config(self, server_id=None):
cfg = await self._get_club_config()
for server in cfg.get('servers', []):
if str(server.get('id')) == str(server_id):
return server
return None
async def send_error_message(self, ctx):
channel = ctx.message.channel
await self.bot.send_message(channel, "BrawlAPI Error. Please try again later…")
async def _api_fetch(self, section=None, **kwargs):
data = dict()
auth = self.settings.get('brawlapi_token')
if section == 'player':
data = await api_fetch_player(auth=auth, **kwargs)
if section == 'club':
data = await api_fetch_club(auth=auth, **kwargs)
return data
@commands.group(pass_context=True, no_pm=True)
@checks.serverowner_or_permissions()
async def bsset(self, ctx):
"""Set Brawl Stars API settings.
Require https://brawlapi.cf/api
"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@bsset.command(name="init", pass_context=True)
async def _bsset_init(self, ctx):
"""Init BS Band settings."""
server = ctx.message.server
self.settings[server.id] = {}
if self._save_settings:
await self.bot.say("Server settings initialized.")
@bsset.command(name="auth", pass_context=True)
async def _bsset_auth(self, ctx, token):
"""Authorization (token)."""
self.settings['brawlapi_token'] = token
if self._save_settings():
await self.bot.say("Authorization (token) updated.")
await self.bot.delete_message(ctx.message)
@bsset.command(name="config", pass_context=True)
async def _bsset_config(self, ctx):
"""Band config"""
if len(ctx.message.attachments) == 0:
await self.bot.say(
"Please attach config yaml with this command. "
"See config.example.yml for how to format it."
)
return
attach = ctx.message.attachments[0]
url = attach["url"]
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
with open(BAND_CONFIG_YML, "wb") as f:
f.write(await resp.read())
await self.bot.say(
"Attachment received and saved as {}".format(BAND_CONFIG_YML))
self.settings['config'] = BAND_CONFIG_YML
dataIO.save_json(JSON, self.settings)
await self.bot.delete_message(ctx.message)
@commands.group(pass_context=True, no_pm=True)
async def bs(self, ctx):
"""Brawl Stars."""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
# @bs.command(name="settag", alias=['st'], pass_context=True)
# async def bs_settag(self, ctx, tag):
# """Assign tag to self."""
# tag = clean_tag(tag)
# server = ctx.message.server
# author = | |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
from awkward._v2.tmp_for_testing import v1_to_v2, v1v2_equal
pytestmark = pytest.mark.skipif(
ak._util.py27, reason="No Python 2.7 support in Awkward 2.x"
)
def test_NumpyArray():
a = ak._v2.contents.RegularArray(
v1_to_v2(ak.from_numpy(np.arange(2 * 3 * 5).reshape(-1, 5)).layout), 3
)
assert ak.to_list(a[1]) == [
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29],
]
assert ak.to_list(a[1, -2]) == [20, 21, 22, 23, 24]
assert a.typetracer[1, -2].form == a[1, -2].form
assert a[1, -2, 2] == 22
with pytest.raises(IndexError):
a[1, -2, 2, 0]
assert ak.to_list(a[1, -2, 2:]) == [22, 23, 24]
assert a.typetracer[1, -2, 2:].form == a[1, -2, 2:].form
with pytest.raises(IndexError):
a[1, -2, 2:, 0]
with pytest.raises(IndexError):
a[1, -2, "hello"]
with pytest.raises(IndexError):
a[1, -2, ["hello", "there"]]
assert ak.to_list(a[1, -2, np.newaxis, 2]) == [22]
assert ak.to_list(a[1, -2, np.newaxis, np.newaxis, 2]) == [[22]]
assert ak.to_list(a[1, -2, ...]) == [20, 21, 22, 23, 24]
assert a.typetracer[1, -2, ...].form == a[1, -2, ...].form
assert a.typetracer[1, ..., -2].form == a[1, ..., -2].form
assert a[1, -2, ..., 2] == 22
with pytest.raises(IndexError):
a[1, -2, ..., 2, 2]
b = ak.layout.RegularArray(
ak.from_numpy(np.arange(2 * 3 * 5).reshape(-1, 5)).layout, 3
)
assert ak.to_list(b[1, -2, [3, 1, 1, 2]]) == [23, 21, 21, 22]
assert ak.to_list(a[1, -2, [3, 1, 1, 2]]) == [23, 21, 21, 22]
with pytest.raises(IndexError):
a[1, -2, [3, 1, 1, 2], 2]
def test_RegularArray():
old = ak.layout.RegularArray(
ak.from_numpy(np.arange(2 * 3 * 5).reshape(-1, 5)).layout, 3
)
new = v1_to_v2(old)
assert ak.to_list(old[1, 1:]) == [[20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]
assert ak.to_list(new[1, 1:]) == [[20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]
assert v1v2_equal(old[0, 1:], new[0, 1:])
assert new.typetracer[1, 1:].form == new[1, 1:].form
with pytest.raises(IndexError):
new[1, "hello"]
with pytest.raises(IndexError):
new[1, ["hello", "there"]]
assert ak.to_list(new[1, np.newaxis, -2]) == [[20, 21, 22, 23, 24]]
assert ak.to_list(new[1, np.newaxis, np.newaxis, -2]) == [[[20, 21, 22, 23, 24]]]
assert new.typetracer[1, np.newaxis, -2].form == new[1, np.newaxis, -2].form
assert old.minmax_depth == (3, 3)
assert new.minmax_depth == (3, 3)
assert ak.to_list(old[1, ..., -2]) == [18, 23, 28]
assert ak.to_list(new[1, ..., -2]) == [18, 23, 28]
assert new.typetracer[1, ..., -2].form == new[1, ..., -2].form
expectation = [
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]],
]
assert (
ak.to_list(
old[
[1, 0],
]
)
== expectation
)
assert (
ak.to_list(
new[
[1, 0],
]
)
== expectation
)
assert (
new.typetracer[
[1, 0],
].form
== new[
[1, 0],
].form
)
assert ak.to_list(new[[1, 0]]) == expectation
assert ak.to_list(old[1, [2, 0]]) == [[25, 26, 27, 28, 29], [15, 16, 17, 18, 19]]
assert ak.to_list(new[1, [2, 0]]) == [[25, 26, 27, 28, 29], [15, 16, 17, 18, 19]]
def test_RecordArray():
old = ak.layout.RecordArray(
[
ak.layout.NumpyArray(
np.array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], np.int64)
),
ak.layout.NumpyArray(
np.array(
[[0.0, 1.1, 2.2, 3.3, 4.4, 5.5], [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]]
)
),
],
["x", "y"],
)
new = v1_to_v2(old)
assert ak.to_list(old[:, 3:]) == [
{"x": [3, 4], "y": [3.3, 4.4, 5.5]},
{"x": [3, 4], "y": [3.3, 4.4, 5.5]},
]
assert ak.to_list(new[:, 3:]) == [
{"x": [3, 4], "y": [3.3, 4.4, 5.5]},
{"x": [3, 4], "y": [3.3, 4.4, 5.5]},
]
assert new.typetracer[:, 3:].form == new[:, 3:].form
with pytest.raises(IndexError):
new[1, "hello"]
with pytest.raises(IndexError):
new[1, ["hello", "there"]]
assert ak.to_list(new[1, np.newaxis]) == [
{"x": [0, 1, 2, 3, 4], "y": [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]}
]
assert ak.to_list(old[1, np.newaxis]) == [
{"x": [0, 1, 2, 3, 4], "y": [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]}
]
assert new.typetracer[1, np.newaxis].form == new[1, np.newaxis].form
assert old.minmax_depth == (2, 2)
assert new.minmax_depth == (2, 2)
assert ak.to_list(old[0, ..., 0]) == {"x": 0, "y": 0.0}
assert ak.to_list(new[0, ..., 0]) == {"x": 0, "y": 0.0}
assert new.typetracer[0, ..., 0].array.form == new[0, ..., 0].array.form
expectation = [
{"x": [0, 1, 2, 3, 4], "y": [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]},
{"x": [0, 1, 2, 3, 4], "y": [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]},
]
assert (
ak.to_list(
old[
[1, 0],
]
)
== expectation
)
assert (
ak.to_list(
new[
[1, 0],
]
)
== expectation
)
assert (
new.typetracer[
[1, 0],
].form
== new[
[1, 0],
].form
)
assert ak.to_list(new[[1, 0]]) == expectation
assert ak.to_list(old[1, [1, 0]]) == [{"x": 1, "y": 1.1}, {"x": 0, "y": 0.0}]
assert ak.to_list(new[1, [1, 0]]) == [{"x": 1, "y": 1.1}, {"x": 0, "y": 0.0}]
def test_UnmaskedArray():
old = ak.layout.UnmaskedArray(
ak.layout.NumpyArray(np.array([[0.0, 1.1, 2.2, 3.3], [0.0, 1.1, 2.2, 3.3]]))
)
new = v1_to_v2(old)
assert ak.to_list(old[0, 1:]) == [1.1, 2.2, 3.3]
assert ak.to_list(new[0, 1:]) == [1.1, 2.2, 3.3]
assert v1v2_equal(old[0, 1:], new[0, 1:])
assert new.typetracer[0, 1:].form == new[0, 1:].form
with pytest.raises(IndexError):
new[1, "hello"]
with pytest.raises(IndexError):
new[1, ["hello", "there"]]
assert ak.to_list(new[1, np.newaxis, -2]) == [2.2]
assert ak.to_list(new[1, np.newaxis, np.newaxis, -2]) == [[2.2]]
assert new.typetracer[1, np.newaxis, -2].form == new[1, np.newaxis, -2].form
assert old.minmax_depth == (2, 2)
assert new.minmax_depth == (2, 2)
assert ak.to_list(old[1, ..., -2]) == 2.2
assert ak.to_list(new[1, ..., -2]) == 2.2
expectation = [[0.0, 1.1, 2.2, 3.3], [0.0, 1.1, 2.2, 3.3]]
assert (
ak.to_list(
old[
[1, 0],
]
)
== expectation
)
assert (
ak.to_list(
new[
[1, 0],
]
)
== expectation
)
assert ak.to_list(new[[1, 0]]) == expectation
assert v1v2_equal(old[1, ...], new[1, ...])
assert ak.to_list(old[1, [1, 0]]) == [1.1, 0.0]
assert ak.to_list(new[1, [1, 0]]) == [1.1, 0.0]
assert (
new.typetracer[
[1, 0],
].form
== new[
[1, 0],
].form
)
def test_UnionArray():
old = ak.layout.UnionArray8_64(
ak.layout.Index8(np.array([1, 1], np.int8)),
ak.layout.Index64(np.array([1, 0], np.int64)),
[
ak.layout.RegularArray(
ak.from_numpy(np.arange(2 * 3 * 5).reshape(-1, 5)).layout, 3
),
ak.layout.RegularArray(
ak.from_numpy(np.arange(2 * 3 * 5).reshape(-1, 5)).layout, 3
),
],
)
new = v1_to_v2(old)
assert new.typetracer[1, [1, 0]].form == new[1, [1, 0]].form
assert ak.to_list(old[0, :]) == [
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29],
]
assert ak.to_list(new[0, :]) == [
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29],
]
assert v1v2_equal(old[0, :], new[0, :])
with pytest.raises(IndexError):
new[1, "hello"]
with pytest.raises(IndexError):
new[1, ["hello", "there"]]
assert ak.to_list(new[0, np.newaxis]) == [
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]
]
assert ak.to_list(old[0, np.newaxis]) == [
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]
]
assert new.typetracer[0, np.newaxis].form == new[0, np.newaxis].form
assert old.minmax_depth == (3, 3)
assert new.minmax_depth == (3, 3)
assert ak.to_list(old[1, ...]) == [
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
]
assert ak.to_list(new[1, ...]) == [
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
]
expectation = [
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
]
assert (
ak.to_list(
old[
[1, 0],
]
)
== expectation
)
assert (
ak.to_list(
new[
[1, 0],
]
)
== expectation
)
assert (
new.typetracer[
[1, 0],
].form
== new[
[1, 0],
].form
)
assert ak.to_list(old[[1, 0]]) == expectation
assert ak.to_list(new[[1, 0]]) == expectation
assert ak.to_list(old[1, [1, 0]]) == [[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]
assert ak.to_list(new[1, [1, 0]]) == [[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]
def test_IndexedArray():
old = ak.layout.IndexedArray64(
ak.layout.Index64(np.array([1, 0], np.int64)),
ak.layout.RegularArray(
ak.from_numpy(np.arange(2 * 3 * 5).reshape(-1, 5)).layout, 3
),
)
new = v1_to_v2(old)
assert v1v2_equal(old[1, 1:], new[1, 1:])
assert ak.to_list(old[1, 1:]) == [[5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]
assert ak.to_list(new[1, 1:]) == [[5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]
assert new.typetracer[1, 1:].form == new[1, 1:].form
with pytest.raises(IndexError):
new[1, "hello"]
with pytest.raises(IndexError):
new[1, ["hello", "there"]]
assert ak.to_list(new[0, np.newaxis]) == [
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], | |
color, label
for experiment in exps:
col = 0 if experiment.params['mdp'] == 'bandits' else 1
ax = get_ax(axes, 0, 1, num_columns, col)
params = experiment.params
if args.exclude:
if any(str(params[key]) in args.exclude for key in params.keys()):
continue
means = experiment.means_data
num_iter = len(means[y_var])
cum_regret = means[y_var][num_iter - 1]
x_pos, color, label = params_to_x_pos_and_color_and_label(params)
if x_pos is not None:
ax.bar([x_pos], [cum_regret], yerr=[1], color=color, label=label)
labels.append(label)
# shift = -0.175
# plt.xticks([0.0 + shift + 0.5*barwidth, 1.125 + shift + barwidth, 1.125 + shift + 2*barwidth, 1.125 + shift + 3*barwidth,
# 1.125 + shift + 5*barwidth, 1.125 + shift + 6*barwidth, 1.125 + shift + 7*barwidth],
# [2,3,5,10,1,2,3],fontsize=12)
shift = -0.1
barwidth = 1.0 + shift
plt.xticks(
[0.0 + shift + 0.5 * barwidth, 1. + shift + 0.5 * barwidth, 2. + shift + 0.5 * barwidth,
3. + shift + 0.5 * barwidth,
5. + shift + 0.5 * barwidth - 0.3, 6. + shift + 0.5 * barwidth - 0.3, 7. + shift + 0.5 * barwidth - 0.3],
[2, 3, 5, 10, 1, 2, 3], fontsize=12)
ax.set_xlim([-0.25, 7.7])
ax_left = get_ax(axes, 1, 1, num_columns, 0)
ax_left.set_ylabel(var_to_label(y_var), fontsize=17)
# Set title
title = get_title(col)
ax_top = get_ax(axes, 0, 1, num_columns, col)
ax_top.set_title(title, fontsize=17, fontweight='normal')
'Make legend'
try:
ax = axes[-1][-1]
except TypeError:
try:
ax = axes[-1]
except TypeError:
ax = axes
# for ax in flatten(axes):
plt.sca(ax)
handles, labels = ax.get_legend_handles_labels()
# try: legend_order = sorted([int(label) for label in labels])
legend_order = [2, 3, 0, 1] # for outperform_IRD
# legend_order = range(len(labels)) # for discrete
# legend_order = [1,0,2] # for continuous
hl = sorted(zip(handles, labels, legend_order), # Sorts legend by putting labels 0:k to place as specified
key=lambda elem: elem[2])
hl = [[handle, label] for handle, label, idx in hl]
try:
handles2, labels2 = zip(*hl)
except ValueError:
handles2, labels2 = [], []
print(Warning('Warning: Possibly data only exists for one environment'))
# ax.legend(handles2, labels2, fontsize=10)
plt.legend(handles2, labels2, fontsize=13)
'Change global layout'
sns.despine(fig) # Removes top and right graph edges
plt.suptitle('Number of queries asked', y=0.0, x=0.52, fontsize=17, verticalalignment='bottom')
fig.subplots_adjust(left=0.09, right=.96, top=0.92, bottom=0.12)
# plt.tight_layout(w_pad=0.02, rect=[0, 0.03, 1, 0.95]) # w_pad adds horizontal space between graphs
# plt.subplots_adjust(top=1, wspace=0.35) # adds space at the top or bottom
# plt.subplots_adjust(bottom=.2)
fig.set_figwidth(7.5) # Can be adjusted by resizing window
'Save file'
subtitle = ','.join(['{0}={1}'.format(k, v) for k, v in controls])
subtitle = '{0},{1}'.format(subtitle, other_vals).strip(',')
folder = concat('graph', folder)
filename = '{0}-vs-{1}-for-{2}-with-{3}.png'.format(
','.join(dependent_vars), x_var, ','.join(independent_vars), subtitle)
if not os.path.exists(folder):
os.makedirs(folder)
# plt.show()
plt.savefig(concat(folder, filename))
plt.close()
def graph(exps, x_var, dependent_vars, independent_vars, controls,
other_vals, folder, args):
"""Creates and saves a single graph.
Arguments are almost the same as for graph_all.
- other_vals: String of the form "{var}={val},..." specifying values of
variables not in x_var, dependent_vars, independent_vars, or
controls.
"""
# Whole figure layout setting
set_style()
num_rows = len(dependent_vars)
num_columns = 2 if args.double_envs else 1
fig, axes = plt.subplots(num_rows, num_columns, sharex=True)
sns.set_context(rc={'lines.markeredgewidth': 1.0}) # Thickness or error bars
capsize = 0. # length of horizontal line on error bars
spacing = 100.0
# Draw all lines and labels
for row, y_var in enumerate(dependent_vars):
labels = []
for experiment in exps:
col = 0 if experiment.params['mdp'] == 'bandits' else 1
ax = get_ax(axes, row, num_rows, num_columns, col)
params = experiment.params
if args.exclude:
if any(str(params[key]) in args.exclude for key in params.keys()):
continue
means, sterrs = experiment.means_data, experiment.sterrs_data
i_var_val = ', '.join([str(params[k]) for k in independent_vars])
# if 'full' in i_var_val:
# means, sterrs = constant_data_full_IRD(means, sterrs, y_var)
label = i_var_to_label(i_var_val) + (', ' + str(params['qsize'])) * args.compare_qsizes # name in legend
color = chooser_to_color(i_var_val, args, params)
x_data = np.array(means[x_var]) + 1
try:
ax.errorbar(x_data, means[y_var], yerr=sterrs[y_var], color=color,
capsize=capsize, capthick=1, label=label) # ,
# marker='o', markerfacecolor='white', markeredgecolor=chooser_to_color(chooser),
# markersize=4)
except:
pass
labels.append(label)
ax.set_xlim([0, 21])
# ylim = ax.get_ylim()
# ax.set_ylim(ylim) #-0.15)
# Set ylabel
ax_left = get_ax(axes, row, num_rows, num_columns, 0)
ax_left.set_ylabel(var_to_label(y_var), fontsize=17)
# Set title
# title = 'Data for {0}'.format(', '.join(independent_vars))
title = get_title(col)
ax_top = get_ax(axes, 0, num_rows, num_columns, col)
ax_top.set_title(title, fontsize=17, fontweight='normal')
'Make legend'
try:
ax = axes[-1][-1]
except TypeError:
try:
ax = axes[-1]
except TypeError:
ax = axes
# for ax in flatten(axes):
plt.sca(ax)
handles, labels = ax.get_legend_handles_labels()
# try: legend_order = sorted([int(label) for label in labels])
legend_order = [2, 3, 0, 1] # for outperform_IRD
# legend_order = range(len(labels)) # for discrete
# legend_order = [1,0,2] # for continuous
hl = sorted(zip(handles, labels, legend_order), # Sorts legend by putting labels 0:k to place as specified
key=lambda elem: elem[2])
hl = [[handle, label] for handle, label, idx in hl]
try:
handles2, labels2 = zip(*hl)
except ValueError:
handles2, labels2 = [], []
print(Warning('Warning: Possibly data only exists for one environment'))
# ax.legend(handles2, labels2, fontsize=10)
plt.legend(handles2, labels2, fontsize=13)
'Change global layout'
sns.despine(fig) # Removes top and right graph edges
plt.suptitle('Number of queries asked', y=0.0, x=0.52, fontsize=17, verticalalignment='bottom')
fig.subplots_adjust(left=0.09, right=.96, top=0.92, bottom=0.12)
# plt.tight_layout(w_pad=0.02, rect=[0, 0.03, 1, 0.95]) # w_pad adds horizontal space between graphs
# plt.subplots_adjust(top=1, wspace=0.35) # adds space at the top or bottom
# plt.subplots_adjust(bottom=.2)
fig.set_figwidth(7.5) # Can be adjusted by resizing window
'Save file'
subtitle = ','.join(['{0}={1}'.format(k, v) for k, v in controls])
subtitle = '{0},{1}'.format(subtitle, other_vals).strip(',')
folder = concat('graph', folder)
filename = '{0}-vs-{1}-for-{2}-with-{3}.png'.format(
','.join(dependent_vars), x_var, ','.join(independent_vars), subtitle)
if not os.path.exists(folder):
os.makedirs(folder)
# plt.show()
plt.savefig(concat(folder, filename))
plt.close()
def flatten(l):
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
for sub in flatten(el):
yield sub
else:
yield el
def get_ax(axes, row, num_rows, num_columns, col):
onecol = num_columns == 1
onerow = num_rows == 1
# col = 0 if experiment.params['mdp'] == 'bandits' or num_columns == 1 else 1
if not onerow and not onecol:
ax = axes[row, col]
elif onecol and not onerow:
ax = axes[row]
elif not onecol and onerow:
ax = axes[col]
elif onecol and onerow:
ax = axes
else:
raise ValueError('Number of dependent vars and envs each must be 1 or 2')
return ax
def get_title(axnum):
if axnum == 0:
return 'Shopping'
elif axnum == 1:
return 'Chilly World'
else:
return str(axnum)
def create_legend(ax):
lines = [
('nominal', {'color': '#f79646', 'linestyle': 'solid'}),
('risk-averse', {'color': '#f79646', 'linestyle': 'dashed'}),
('nominal', {'color': '#cccccc', 'linestyle': 'solid'}),
('IRD-augmented', {'color': '#cccccc', 'linestyle': 'dotted'}),
('risk-averse', {'color': '#cccccc', 'linestyle': 'dashed'})
]
def create_dummy_line(**kwds):
return mpl.lines.Line2D([], [], **kwds)
ax.legend([create_dummy_line(**l[1]) for l in lines],
[l[0] for l in lines],
loc='upper right',
ncol=1,
fontsize=10,
bbox_to_anchor=(1.1, 1.0)) # adjust horizontal and vertical legend position
def set_style():
mpl.rcParams['text.usetex'] = True
mpl.rc('font', family='serif', serif=['Palatino']) # Makes font thinner
sns.set(font='serif', font_scale=1.4) # Change font size of (sub) title and legend. Serif seems to have no effect.
# Make the background a dark grid, and specify the
# specific font family
sns.set_style("white", { # Font settings have no effect
"font.family": "serif",
"font.weight": "normal",
"font.serif": ["Times", "Palatino", "serif"]})
# 'axes.facecolor': 'darkgrid'})
# 'lines.markeredgewidth': 1})
def plot_sig_line(ax, x1, x2, y1, h, padding=0.3):
'''
Plots the bracket thing denoting significance in plots. h controls how tall vertically the bracket is.
Only need one y coordinate (y1) since the bracket is parallel to the x-axis.
'''
ax.plot([x1, x1, x2, x2], [y1, y1 + h, y1 + h, y1], linewidth=1, color='k')
ax.text(0.5 * (x1 + x2), y1 + h + padding * h, '*', color='k', fontsize=16, fontweight='normal')
def var_to_label(varname):
if varname in ['true_entropy']:
return 'Entropy $\mathcal{H}[w^*|\mathcal{D}]$'
if varname in ['test_regret']:
return 'Regret in test envs'
if varname in ['post_regret']:
return 'Regret in training environment'
if varname in ['cum_test_regret']:
return 'Cumulative test regret'
if varname in ['time']:
return 'Seconds per iteration'
if varname in ['norm post_avg-true']:
return 'Distance of posterior average to true reward'
if varname in ['iteration']:
return 'Number of queries asked'
return varname
def chooser_to_color(chooser, args, params):
greedy_color = 'darkorange' # 'lightblue'
exhaustive_color = 'crimson' # 'peachpuff', 'crimson'
random_color = 'darkgrey' # 'darkorange'
full_color = 'lightgrey' # 'grey'
# Colors to distinguish optimizers
# feature_color = 'blue'
# feature_color_random = 'lightblue'
# search_color = 'olivedrab'
# both_color = | |
<filename>nengo/transforms.py
import warnings
import numpy as np
from nengo.base import FrozenObject
from nengo.dists import Distribution, DistOrArrayParam, Uniform
from nengo.exceptions import ValidationError
from nengo.params import (
BoolParam,
EnumParam,
IntParam,
NdarrayParam,
Parameter,
ShapeParam,
)
from nengo.rc import rc
from nengo.utils.numpy import is_array_like, scipy_sparse
class Transform(FrozenObject):
"""A base class for connection transforms.
.. versionadded:: 3.0.0
"""
def sample(self, rng=np.random):
"""Returns concrete weights to implement the specified transform.
Parameters
----------
rng : `numpy.random.RandomState`, optional
Random number generator state.
Returns
-------
array_like
Transform weights
"""
raise NotImplementedError()
@property
def size_in(self):
"""Expected size of input to transform."""
raise NotImplementedError()
@property
def size_out(self):
"""Expected size of output from transform."""
raise NotImplementedError()
class ChannelShapeParam(ShapeParam):
"""A parameter where the value must be a shape with channels.
.. versionadded:: 3.0.0
"""
def coerce(self, transform, shape):
if isinstance(shape, ChannelShape):
if shape.channels_last != transform.channels_last:
raise ValidationError(
"transform has channels_last=%s, but input shape has "
"channels_last=%s" % (transform.channels_last, shape.channels_last),
attr=self.name,
obj=transform,
)
super().coerce(transform, shape.shape)
else:
super().coerce(transform, shape)
shape = ChannelShape(shape, channels_last=transform.channels_last)
return shape
class Dense(Transform):
"""A dense matrix transformation between an input and output signal.
.. versionadded:: 3.0.0
Parameters
----------
shape : tuple of int
The shape of the dense matrix: ``(size_out, size_in)``.
init : `.Distribution` or array_like, optional
A Distribution used to initialize the transform matrix, or a concrete
instantiation for the matrix. If the matrix is square we also allow a
scalar (equivalent to ``np.eye(n) * init``) or a vector (equivalent to
``np.diag(init)``) to represent the matrix more compactly.
"""
shape = ShapeParam("shape", length=2, low=1)
init = DistOrArrayParam("init")
def __init__(self, shape, init=1.0):
super().__init__()
self.shape = shape
if is_array_like(init):
init = np.asarray(init, dtype=rc.float_dtype)
# check that the shape of init is compatible with the given shape
# for this transform
expected_shape = None
if shape[0] != shape[1]:
# init must be 2D if transform is not square
expected_shape = shape
elif init.ndim == 1:
expected_shape = (shape[0],)
elif init.ndim >= 2:
expected_shape = shape
if expected_shape is not None and init.shape != expected_shape:
raise ValidationError(
"Shape of initial value %s does not match expected "
"shape %s" % (init.shape, expected_shape),
attr="init",
)
self.init = init
@property
def _argreprs(self):
return ["shape=%r" % (self.shape,)]
def sample(self, rng=np.random):
if isinstance(self.init, Distribution):
return self.init.sample(*self.shape, rng=rng)
return self.init
@property
def init_shape(self):
"""The shape of the initial value."""
return self.shape if isinstance(self.init, Distribution) else self.init.shape
@property
def size_in(self):
return self.shape[1]
@property
def size_out(self):
return self.shape[0]
class SparseInitParam(Parameter):
def coerce(self, instance, value):
if not (
isinstance(value, SparseMatrix)
or (scipy_sparse is not None and isinstance(value, scipy_sparse.spmatrix))
):
raise ValidationError(
"Must be `nengo.transforms.SparseMatrix` or "
"`scipy.sparse.spmatrix`, got %s" % type(value),
attr="init",
obj=instance,
)
return super().coerce(instance, value)
class SparseMatrix(FrozenObject):
"""Represents a sparse matrix.
.. versionadded:: 3.0.0
Parameters
----------
indices : array_like of int
An Nx2 array of integers indicating the (row,col) coordinates for the
N non-zero elements in the matrix.
data : array_like or `.Distribution`
An Nx1 array defining the value of the nonzero elements in the matrix
(corresponding to ``indices``), or a `.Distribution` that will be
used to initialize the nonzero elements.
shape : tuple of int
Shape of the full matrix.
"""
indices = NdarrayParam("indices", shape=("*", 2), dtype=np.int64)
data = DistOrArrayParam("data", sample_shape=("*",))
shape = ShapeParam("shape", length=2)
def __init__(self, indices, data, shape):
super().__init__()
self.indices = indices
self.shape = shape
# if data is not a distribution
if is_array_like(data):
data = np.asarray(data)
# convert scalars to vectors
if data.size == 1:
data = data.item() * np.ones(self.indices.shape[0], dtype=data.dtype)
if data.ndim != 1 or data.shape[0] != self.indices.shape[0]:
raise ValidationError(
"Must be a vector of the same length as `indices`",
attr="data",
obj=self,
)
self.data = data
self._allocated = None
self._dense = None
@property
def dtype(self):
return self.data.dtype
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
return self.indices.shape[0]
def allocate(self):
"""Return a `scipy.sparse.csr_matrix` or dense matrix equivalent.
We mark this data as readonly to be consistent with how other
data associated with signals are allocated. If this allocated
data is to be modified, it should be copied first.
"""
if self._allocated is not None:
return self._allocated
if scipy_sparse is None:
warnings.warn(
"Sparse operations require Scipy, which is not "
"installed. Using dense matrices instead."
)
self._allocated = self.toarray().view()
else:
self._allocated = scipy_sparse.csr_matrix(
(self.data, self.indices.T), shape=self.shape
)
self._allocated.data.setflags(write=False)
return self._allocated
def sample(self, rng=np.random):
"""Convert `.Distribution` data to fixed array.
Parameters
----------
rng : `.numpy.random.RandomState`
Random number generator that will be used when
sampling distribution.
Returns
-------
matrix : `.SparseMatrix`
A new `.SparseMatrix` instance with `.Distribution` converted to
array if ``self.data`` is a `.Distribution`, otherwise simply
returns ``self``.
"""
if isinstance(self.data, Distribution):
return SparseMatrix(
self.indices,
self.data.sample(self.indices.shape[0], rng=rng),
self.shape,
)
else:
return self
def toarray(self):
"""Return the dense matrix equivalent of this matrix."""
if self._dense is not None:
return self._dense
self._dense = np.zeros(self.shape, dtype=self.dtype)
self._dense[self.indices[:, 0], self.indices[:, 1]] = self.data
# Mark as readonly, if the user wants to modify they should copy first
self._dense.setflags(write=False)
return self._dense
class Sparse(Transform):
"""A sparse matrix transformation between an input and output signal.
.. versionadded:: 3.0.0
Parameters
----------
shape : tuple of int
The full shape of the sparse matrix: ``(size_out, size_in)``.
indices : array_like of int
An Nx2 array of integers indicating the (row,col) coordinates for the
N non-zero elements in the matrix.
init : `.Distribution` or array_like, optional
A Distribution used to initialize the transform matrix, or a concrete
instantiation for the matrix. If the matrix is square we also allow a
scalar (equivalent to ``np.eye(n) * init``) or a vector (equivalent to
``np.diag(init)``) to represent the matrix more compactly.
"""
shape = ShapeParam("shape", length=2, low=1)
init = SparseInitParam("init")
def __init__(self, shape, indices=None, init=1.0):
super().__init__()
self.shape = shape
if scipy_sparse and isinstance(init, scipy_sparse.spmatrix):
assert indices is None
assert init.shape == self.shape
self.init = init
elif indices is not None:
self.init = SparseMatrix(indices, init, shape)
else:
raise ValidationError(
"Either `init` must be a `scipy.sparse.spmatrix`, "
"or `indices` must be specified.",
attr="init",
)
@property
def _argreprs(self):
return ["shape=%r" % (self.shape,)]
def sample(self, rng=np.random):
if scipy_sparse and isinstance(self.init, scipy_sparse.spmatrix):
return self.init
else:
return self.init.sample(rng=rng)
@property
def size_in(self):
return self.shape[1]
@property
def size_out(self):
return self.shape[0]
class Convolution(Transform):
"""An N-dimensional convolutional transform.
The dimensionality of the convolution is determined by the input shape.
.. versionadded:: 3.0.0
Parameters
----------
n_filters : int
The number of convolutional filters to apply
input_shape : tuple of int or `.ChannelShape`
Shape of the input signal to the convolution; e.g.,
``(height, width, channels)`` for a 2D convolution with
``channels_last=True``.
kernel_size : tuple of int, optional
Size of the convolutional kernels (1 element for a 1D convolution,
2 for a 2D convolution, etc.).
strides : tuple of int, optional
Stride of the convolution (1 element for a 1D convolution, 2 for
a 2D convolution, etc.).
padding : ``"same"`` or ``"valid"``, optional
Padding method for input signal. "Valid" means no padding, and
convolution will only be applied to the fully-overlapping areas of the
input signal (meaning the output will be smaller). "Same" means that
the input signal is zero-padded so that the output is the same shape
as the input.
channels_last : bool, optional
If ``True`` (default), the channels are the last dimension in the input
signal (e.g., a 28x28 image with 3 channels would have shape
``(28, 28, 3)``). ``False`` means that channels are the first
dimension (e.g., ``(3, 28, 28)``).
init : `.Distribution` or `~numpy:numpy.ndarray`, optional
A predefined kernel with shape
``kernel_size + (input_channels, n_filters)``, or a ``Distribution``
that will be used to initialize the kernel.
Notes
-----
As is typical in neural networks, this is technically correlation rather
than convolution (because the kernel is not flipped).
"""
n_filters = IntParam("n_filters", low=1)
input_shape = ChannelShapeParam("input_shape", low=1)
kernel_size = ShapeParam("kernel_size", low=1)
strides = ShapeParam("strides", low=1)
padding = EnumParam("padding", values=("same", "valid"))
channels_last = BoolParam("channels_last")
init = DistOrArrayParam("init")
_param_init_order = ["channels_last", "input_shape"]
def __init__(
self,
n_filters,
input_shape,
kernel_size=(3, 3),
strides=(1, 1),
padding="valid",
channels_last=True,
init=Uniform(-1, 1),
):
super().__init__()
self.n_filters = n_filters
self.channels_last = channels_last # must be set before input_shape
self.input_shape = input_shape
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
| |
ext_attr_cont """
if len(p) == 3:
p[0] = ListFromConcat(self.BuildExtAttribute(p[1], 'True'), p[2])
if len(p) == 5:
p[0] = ListFromConcat(self.BuildExtAttribute(p[1], p[3]), p[4])
if len(p) == 6:
p[0] = ListFromConcat(self.BuildExtAttribute(p[1], p[3]), p[5])
if self.parse_debug: DumpReduction('ext_attribute_list', p)
def p_ext_attr_cont(self, p):
"""ext_attr_cont : ',' ext_attr_list
|"""
if len(p) > 1:
p[0] = ListFromConcat(p[2], p[3])
if self.parse_debug: DumpReduction('ext_attribute_cont', p)
def p_attr_arg_list(self, p):
"""attr_arg_list : SYMBOL attr_arg_cont
| value attr_arg_cont """
p[0] = ','.join(ListFromConcat(p[1], p[2]))
if self.parse_debug: DumpReduction('attr_arg_list', p)
def p_attr_arg_cont(self, p):
"""attr_arg_cont : ',' attr_arg_list
| """
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('attr_arg_cont', p)
def p_attr_arg_error(self, p):
"""attr_arg_cont : error attr_arg_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('attr_arg_error', p)
#
# Describe
#
# A describe block is defined at the top level. It provides a mechanism for
# attributing a group of ext_attr to a describe_list. Members of the
# describe list are language specific 'Type' declarations
#
def p_describe_block(self, p):
"""describe_block : modifiers DESCRIBE '{' describe_list '}' ';'"""
children = ListFromConcat(p[1], p[2])
p[0] = self.BuildProduction('Describe', p, 2, children)
if self.parse_debug: DumpReduction('describe_block', p)
def p_describe_list(self, p):
"""describe_list : modifiers SYMBOL ';' describe_list
| modifiers ENUM ';' describe_list
| modifiers STRUCT ';' describe_list
| modifiers TYPEDEF ';' describe_list
| """
if len(p) > 1:
Type = self.BuildProduction('Type', p, 2, p[1])
p[0] = ListFromConcat(Type, p[4])
def p_describe_error(self, p):
"""describe_list : error describe_list"""
p[0] = p[2]
#
# Constant Values (integer, value)
#
# Constant values can be found at various levels. A Constant value is returns
# as the string value after validated against a FLOAT, HEX, INT, OCT or
# STRING pattern as appropriate.
#
def p_value(self, p):
"""value : FLOAT
| HEX
| INT
| OCT
| STRING"""
p[0] = p[1]
if self.parse_debug: DumpReduction('value', p)
def p_value_lshift(self, p):
"""value : integer LSHIFT INT"""
p[0] = "(%s << %s)" % (p[1], p[3])
if self.parse_debug: DumpReduction('value', p)
# Integers are numbers which may not be floats used in cases like array sizes.
def p_integer(self, p):
"""integer : HEX
| INT
| OCT"""
p[0] = p[1]
#
# Parameter List
#
# A parameter list is a collection of arguments which are passed to a
# function. In the case of a PPAPI, it is illegal to have a function
# which passes no parameters.
#
# NOTE:-We currently do not support functions which take no arguments in PPAPI.
def p_param_list(self, p):
"""param_list : modifiers typeref SYMBOL param_cont"""
children = ListFromConcat(p[1], p[2])
param = self.BuildProduction('Param', p, 3, children)
p[0] = ListFromConcat(param, p[4])
if self.parse_debug: DumpReduction('param_list', p)
def p_param_cont(self, p):
"""param_cont : ',' param_list
| """
if len(p) > 1:
p[0] = p[2]
if self.parse_debug: DumpReduction('param_cont', p)
def p_param_error(self, p):
"""param_cont : error param_cont"""
p[0] = p[2]
#
# Typeref
#
# A typeref is a reference to a type definition. The type definition may
# be a built in such as int32_t or a defined type such as an enum, or
# struct, or typedef. Part of the reference to the type is how it is
# used, such as directly, a fixed size array, or unsized (pointer). The
# reference is reduced and becomes a property of the parent Node.
#
def p_typeref_data(self, p):
"""typeref : SYMBOL typeref_arrays"""
Type = self.BuildExtAttribute('TYPEREF', p[1])
p[0] = ListFromConcat(Type, p[2])
if self.parse_debug: DumpReduction('typeref', p)
def p_typeref_arrays(self, p):
"""typeref_arrays : '[' ']' typeref_arrays
| '[' integer ']' typeref_arrays
| """
if len(p) == 1: return
if len(p) == 5:
count = self.BuildExtAttribute('FIXED', p[2])
array = self.BuildProduction('Array', p, 2, ListFromConcat(p[4], count))
else:
array = self.BuildProduction('Array', p, 1, p[3])
p[0] = [array]
if self.parse_debug: DumpReduction('arrays', p)
#
# Enumeration
#
# An enumeration is a set of named integer constants. An enumeration
# is valid type which can be referenced in other definitions.
#
def p_enum_block(self, p):
"""enum_block : modifiers ENUM SYMBOL '{' enum_list '}' ';'"""
p[0] = self.BuildProduction('Enum', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('enum_block', p)
def p_enum_list(self, p):
"""enum_list : comments SYMBOL '=' value enum_cont"""
val = self.BuildExtAttribute('VALUE', p[4])
enum = self.BuildProduction('EnumItem', p, 2, ListFromConcat(val, p[1]))
p[0] = ListFromConcat(enum, p[5])
if self.parse_debug: DumpReduction('enum_list', p)
def p_enum_cont(self, p):
"""enum_cont : ',' enum_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('enum_cont', p)
def p_enum_cont_error(self, p):
"""enum_cont : error enum_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('enum_error', p)
#
# Interface
#
# An interface is a named collection of functions.
#
def p_interface_block(self, p):
"""interface_block : modifiers INTERFACE SYMBOL '{' member_list '}' ';'"""
p[0] = self.BuildProduction('Interface', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('interface_block', p)
def p_member_list(self, p):
"""member_list : member_function member_list
| """
if len(p) > 1 :
p[0] = ListFromConcat(p[1], p[2])
if self.parse_debug: DumpReduction('member_list', p)
def p_member_function(self, p):
"""member_function : modifiers typeref SYMBOL '(' param_list ')' ';'"""
params = self.BuildProduction('Callspec', p, 4, p[5])
p[0] = self.BuildProduction('Function', p, 3, ListFromConcat(p[1], params))
if self.parse_debug: DumpReduction('member_function', p)
def p_member_error(self, p):
"""member_list : error member_list"""
p[0] = p[2]
#
# Struct
#
# A struct is a named collection of members which in turn reference other
# types. The struct is a referencable type.
#
def p_struct_block(self, p):
"""struct_block : modifiers STRUCT SYMBOL '{' struct_list '}' ';'"""
p[0] = self.BuildProduction('Struct', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('struct_block', p)
def p_struct_list(self, p):
"""struct_list : modifiers typeref SYMBOL ';' struct_list
| """
if len(p) > 1:
member = self.BuildProduction('Member', p, 3, ListFromConcat(p[1], p[2]))
p[0] = ListFromConcat(member, p[5])
if self.parse_debug: DumpReduction('struct_list', p)
#
# Typedef
#
# A typedef creates a new referencable type. The tyepdef can specify an array
# definition as well as a function declaration.
#
def p_typedef_data(self, p):
"""typedef_def : modifiers TYPEDEF typeref SYMBOL ';' """
p[0] = self.BuildProduction('Typedef', p, 4, ListFromConcat(p[1], p[3]))
if self.parse_debug: DumpReduction('typedef_data', p)
def p_typedef_func(self, p):
"""typedef_def : modifiers TYPEDEF typeref SYMBOL '(' param_list ')' ';'"""
params = self.BuildProduction('Callspec', p, 5, p[6])
children = ListFromConcat(p[1], p[3], params)
p[0] = self.BuildProduction('Typedef', p, 4, children)
if self.parse_debug: DumpReduction('typedef_func', p)
#
# Parser Errors
#
# p_error is called whenever the parser can not find a pattern match for
# a set of items from the current state. The p_error function defined here
# is triggered logging an error, and parsing recover happens as the
# p_<type>_error functions defined above are called. This allows the parser
# to continue so as to capture more than one error per file.
#
def p_error(self, t):
filename = self.lexobj.filename
self.parse_errors += 1
if t:
lineno = t.lineno
pos = t.lexpos
prev = self.yaccobj.symstack[-1]
if type(prev) == lex.LexToken:
msg = "Unexpected %s after %s." % (
TokenTypeName(t), TokenTypeName(prev))
else:
msg = "Unexpected %s." % (t.value)
else:
lineno = self.last.lineno
pos = self.last.lexpos
msg = "Unexpected end of file after %s." % TokenTypeName(self.last)
self.yaccobj.restart()
# Attempt to remap the error to a friendlier form
if msg in ERROR_REMAP:
msg = ERROR_REMAP[msg]
# Log the error
self.Logger(filename, lineno, pos, msg)
def __init__(self, builder, logger, options = {}):
global PARSER_OPTIONS
IDLLexer.__init__(self, options)
self.yaccobj = yacc.yacc(module=self, tabmodule=None, debug=False,
optimize=0, write_tables=0)
for k in options:
PARSER_OPTIONS[k] = options[k]
self.build_debug = PARSER_OPTIONS['build_debug']
self.parse_debug = PARSER_OPTIONS['parse_debug']
self.token_debug = PARSER_OPTIONS['token_debug']
self.verbose = PARSER_OPTIONS['verbose']
self.Builder = builder
self.Logger = logger
self.parse_errors = 0
#
# Tokenizer
#
# The token function returns the next token provided by IDLLexer for matching
# against the leaf paterns.
#
def token(self):
tok = self.lexobj.token()
if tok:
self.last = tok
if self.token_debug:
PrintInfo("TOKEN %s(%s)" % (tok.type, tok.value))
return tok
#
# BuildProduction
#
# Production is the set of items sent to a grammar rule resulting in a new
# item being returned.
#
# p - Is the Yacc production object containing the stack of items
# index - Index into the production of the name for the item being produced.
# cls - The type of item being producted
# childlist - The children of the new item
def BuildProduction(self, cls, p, index, childlist):
name = p[index]
filename = self.lexobj.filename
lineno = p.lineno(index)
pos = p.lexpos(index)
if self.build_debug:
PrintInfo("Building %s(%s)" % (cls, name))
return self.Builder(cls, name, filename, lineno, pos, childlist)
#
# BuildExtAttribute
#
# An ExtendedAttribute is a special production that results in a property
# which is applied to the adjacent item. Attributes have no children and
# instead represent key/value pairs.
#
def BuildExtAttribute(self, name, value):
if self.build_debug:
PrintInfo("Adding ExtAttribute %s = %s" % (name, str(value)))
return self.Builder('ExtAttribute', '%s=%s' % (name,value),
self.lexobj.filename, self.last.lineno, self.last.lexpos, [])
#
# ParseData
#
# Attempts to parse | |
εσωστρέφεια εσωτερίκευση εσωτερικοποίηση
εσωτερικό εσωτερικότης εσωτερικότητα εσωτερισμός εσωτρόπιο εσώρουχο εσώφυλλο
εταίρος εταζέρα εταιρία εταιρεία εταιρισμός εταλονάζ εταλονέρ ετεραρχία
ετεροίωσις ετεροαπασχόληση ετεροβίωτος ετεροβιωματικός ετεροβιωματικότητα
ετερογένεια ετερογένεση ετερογαμία ετερογενές ετερογονία ετεροδημότης
ετεροδημότισσα ετεροδικία ετεροδοξία ετεροεπαγγελματίας ετεροκαθορισμός
ετερομέρεια ετερομορφία ετερομορφισμός ετερονομία ετεροπροσδιορισμός
ετεροσκεδαστικότητα ετεροσωματικός ετεροφυλία ετεροφυλοφιλία ετεροφυλόφιλος
ετεροχρονισμός ετερόκλιτο ετερότης ετερότητα ετησίαι ετιά ετικέτα ετικετάρισμα
ετοιμασία ετοιματζίδικο ετοιμολογία ετοιμότης ετοιμότητα ετρουσκικά ετρούσκος
ετυμηγορία ετυμολογία ετυμολόγημα ετυμολόγηση ετυμολόγος ευήθεια ευαγγέλιο
ευαγγελισμός ευαγγελιστής ευαισθησία ευαισθητοποίηση ευαισθητοποίησις
ευαρέσκεια ευαρέστηση ευαρέστησις ευβοιώτης ευβοιώτισσα ευβουλία ευβραδύπορα
ευγενής ευγενικότητα ευγευσία ευγηρία ευγλωττία ευγνωμοσύνη ευγονία ευγονική
ευγραμμία ευδία ευδαίμονας ευδαιμονία ευδαιμονίστρια ευδαιμονισμός
ευδιαθεσία ευδιαλυτότητα ευδοκία ευδοκίμηση ευδοκίμησις ευδόκηση ευελιξία
ευεργέτημα ευεργέτης ευεργέτιδα ευεργέτις ευεργέτισσα ευεργέτρια ευεργεσία
ευεργετικότητα ευερεθιστότης ευερεθιστότητα ευετηρία ευζωία ευζωνάκι ευζωνικό
ευημερία ευθέτησις ευθανασία ευθεία ευθιξία ευθραυστότης ευθραυστότητα
ευθυγράμμιση ευθυγράμμισις ευθυδικία ευθυκρισία ευθυμία ευθυμογράφημα
ευθυμογραφία ευθυμολογία ευθυμολόγημα ευθυμολόγος ευθυνοφοβία ευθύαυλος ευθύνη
ευθύτητα ευκάλυπτος ευκή ευκαιρία ευκαλυπτέλαιο ευκαμψία ευκινησία ευκοίλια
ευκοιλιότης ευκοιλιότητα ευκολάκι ευκολία ευκοσμία ευκρίνεια ευκρασία ευκτική
ευλαλία ευληπτότητα ευλογία ευλογητάρια ευλογητάριο ευλογιά ευλογοφάνεια
ευλόγηση ευλόγησις ευλύγιστος ευμάθεια ευμάρεια ευμένεια ευμεταβλησία
ευνή ευνήκτης ευνοιοκρατία ευνομία ευνουχισμός ευνούχος ευορκία ευοσμία
ευπατρίδης ευπείθεια ευπεψία ευπιστία ευποιία ευπορία ευπρέπεια ευπραγία
ευπροσηγορία ευπώλητο ευρέτης ευραπηλιώτης ευρειαγγεία ευρεσιτέχνης
ευρεσιτυχία ευρετήριο ευρετηρίαση ευρετηριασμός ευρετική ευρηματικότητα
ευρυαγγεία ευρυεκπομπή ευρυθμία ευρυμάθεια ευρυχωρία ευρωαστυνομία ευρωβουλή
ευρωβουλευτίνα ευρωβουλεύτρια ευρωδίπλωμα ευρωδιαβατήριο ευρωδολάριο
ευρωεπιταγή ευρωεταίρος ευρωκοινοβουλευτής ευρωκοινοβούλιο ευρωκοινοβούλιον
ευρωκομουνιστής ευρωκράτης ευρωλιμένας ευρωναζί ευρωνόμισμα ευρωομολογία
ευρωπαΐστρια ευρωπαία ευρωπαίος ευρωπαϊσμός ευρωπαϊστής ευρωπύραυλος
ευρωσκεπτικισμός ευρωσκεπτικιστής ευρωστία ευρωστρατός ευρωτίαση ευρωτίασις
ευρωχώρος ευρύτης ευρύτητα ευρώ ευρώπιο ευρώπιον ευρώπουλο ευρώς ευσέβεια
ευσεβισμός ευσπλαχνία ευστάθεια ευστατισμός ευστοχία ευστροφία ευσυγκινησία
ευσχημοσύνη ευτέλεια ευταξία ευτελισμός ευτηξία ευτολμία ευτονία ευτρεπισμός
ευτροφισμός ευτυχία ευτύχημα ευφημισμός ευφλογιστία ευφορία ευφράδεια
ευφυΐα ευφυολογία ευφυολόγημα ευφυολόγος ευφωνία ευφώνιο ευχέλαιο ευχέρεια
ευχέτις ευχή ευχαρίστηση ευχαριστία ευχαριστώ ευχολόγιο ευχρηστία ευψυχία
ευωδιά ευωχία ευόδωση ευόδωσις εφάπαξ εφάπλωμα εφέ εφέδρανο εφένδης εφέντης
εφήβαιο εφίδρωση εφίδρωσις εφίππιον εφαλτήριο εφαπλωματοποιός εφαπτομένη
εφαρμοσμένα μαθηματικά εφαρμοστήριο εφαρμοστής εφαψίας εφεδρεία εφεκτικότητα
εφελκίς εφελκυσμός εφετείο εφευρέτης εφευρέτρια εφευρετικότητα εφεύρεση
εφηβεία εφηλίδα εφηλίς εφημέριος εφημερία εφημερίδα εφημεριδογράφος
εφημεριδοπώλης εφημεριδοπώλισσα εφημεριδοφάγος εφησυχασμός εφησύχαση εφιάλτης
εφικτότητα εφιός εφκιός εφοδιασμός εφοδιαστική εφοδιοπομπή εφοπλίστρια
εφοπλιστής εφοπλιστίνα εφορία εφορεία εφτάδα εφτάδυμα εφτάζυμο εφτάμηνο εφτάρι
εφτάστιχο εφταήμερο εφταετία εφτακοσαριά εφταμηνίτης εφταμηνίτισσα εφταπλέτο
εφυάλωση εφφέ εφόδιο εφόδιον εφόρμηση εφόρμησις εχέγγυο εχίνος εχεμύθεια
εχθρά εχθρικότητα εχθροπάθεια εχθροπραξία εχθρός εχθρότητα εχινοκοκκίαση
εχινόκοκκος εχταγή εχτρός εψιδίνη εωθινό εύδρομο εύελπις εύζωνας εύζωνος
εύνοια εύρεση εύρεσις εύρετρα εύρημα εύρυνση εύσημο εύσημον εἰρήνη ζάβαλης
ζάλισμα ζάλο ζάντα ζάπι ζάπινγκ ζάρα ζάρι ζάρωμα ζάφτι ζάχαρη ζάχαρις ζάχαρο
ζέβρος ζέον ζέπελιν ζέρμπερα ζέρσεϊ ζέρσεϋ ζέση ζέστα ζέσταμα ζέστη ζέφυρος
ζήλεια ζήλια ζήλος ζήση ζήτημα ζήτηση ζήτουλας ζίζιρος ζίλι ζίνια ζίννια
ζαΐφης ζαβάδα ζαβαλής ζαβαλίδικο ζαβαλού ζαβαρακατρανέμια ζαβλάκωμα
ζαβολιά ζαβομάρα ζαγάρι ζαγαρομάτης ζαζάκι ζακέτα ζακετάκι ζακετούλα
ζακόνι ζαλάδα ζαλίκα ζαλίκι ζαλιά ζαμάνι ζαμάνια ζαμανφουτίστας ζαμανφουτισμός
ζαμανφουτιστικός ζαμενής ο μαύρος ζαμενής της ρόδου ζαμπάκι ζαμπίτης
ζαμπαράς ζαμπονοπατατοκροκέτα ζαμπονοτυρόπιτα ζαμπονόπιτα ζαμπόν ζαμπόνια
ζαρίφης ζαρίφισσα ζαργάνα ζαρζαβάτι ζαρζαβατικό ζαριά ζαριφλίκι ζαρκάδι
ζαρταλούδι ζαρτιέρα ζαρωματιά ζατρίκιο ζατρίκιον ζαφείρι ζαφειρόπετρα ζαφορά
ζαχάρωμα ζαχαράσβεστος ζαχαρένια ζαχαρί ζαχαρίνη ζαχαριέρα ζαχαροδιάλυμα
ζαχαροκάλαμο ζαχαροκεφιρόκοκκος ζαχαρομάζα ζαχαρομύκητας ζαχαρονερόκοκκος
ζαχαροπλάστης ζαχαροπλάστισσα ζαχαροπλάστρια ζαχαροπλαστείο ζαχαροπλαστική
ζαχαρουργείο ζαχαρωτό ζαχαρόζη ζαχαρόνερο ζαχαρόπιτα ζαχαρότευτλο ζαϊφλίκι
ζεβζεκιά ζεδοάρειο ζεια ζελέ ζελές ζελατίνα ζελατίνη ζελεδάκι ζεμάν φου
ζεμανφουτίστας ζεμανφουτίστρια ζεμανφουτισμός ζεμανφουτιστικός ζεμπίλι
ζεν πρεμιέ ζενίθ ζερβοκουτάλα ζερβοκουτάλας ζερζεβούλης ζερνεκαδές ζερό
ζεσεοσκόπιο ζεσεόμετρο ζεστασιά ζεστοκόπημα ζεστούλα ζεστό ζετέ ζευγάρι
ζευγάρωμα ζευγάς ζευγίτης ζευγαράκι ζευγαρονήσι ζευγηλάτης ζευγηλατρίς
ζευγολάτισσα ζευγολατειό ζευγολατιό ζευγού ζευγόλουρο ζευκτήρ ζευκτήρας
ζεόλιθος ζεύγλα ζεύγμα ζεύγος ζεύκι ζεύξη ζηλαδέρφια ζηλιαρόγατα ζηλιαρόγατος
ζηλοφθονία ζηλωτής ζηλώτρια ζημία ζημιά ζην ζητακισμός ζητεία ζητητής ζητιάνα
ζητιανάκι ζητιανιά ζητούμενο ζητωκραυγή ζιαμέτι ζιαφέτι ζιβάγκο ζιβέτ ζιβανία
ζιγκ-ζαγκ ζιγκλέρ ζιγκλεράκι ζιγκολέτα ζιγκολό ζιγκουράτ ζιζάνιο ζιζανιοκτόνο
ζιλέ ζιλές ζιλεδάκι ζιμπελίνα ζιμπούλι ζιπ κιλότ ζιρκόνιο ζιτούνι ζιφιός ζλάπι
ζνίχι ζο ζογκλέρ ζολότα ζορζέτα ζοριλίκι ζορμπάς ζορμπαλής ζορμπαλίκι ζουάβος
ζουζουνάκι ζουζουνίτσα ζουζουνιά ζουζούνα ζουζούνι ζουζούνισμα ζουλάπι
ζουλού ζουλού ζουμ ζουμί ζουμπάς ζουμπουλάκι ζουμπούλι ζουνάρι ζουρίδα
ζουρλοκαμπέρω ζουρλομανδύας ζουρλοπαντιέρα ζουρνάς ζουφάδα ζοφερότητα ζοχάδα
ζοχαδιάρης ζοχός ζούγκλα ζούδι ζούδος ζούζουλο ζούλα ζούληγμα ζούλημα ζούλια
ζούμπερο ζούπηγμα ζούπισμα ζούρα ζούρια ζούριασμα ζούρλα ζούρλια ζυγαριά ζυγιά
ζυγιστής ζυγιστικά ζυγολούρι ζυγολόγιο ζυγοστάθμιση ζυγοταινία ζυγούρι
ζυγός ζυθεστιατόριο ζυθοζύμη ζυθοποιία ζυθοποιείο ζυθοποιός ζυθοποσία
ζυθοπώλης ζυμάρι ζυμαράκι ζυμαρικό ζυμοκαλλιέργεια ζυμομυκητίαση ζυμομύκητας
ζυμωτήριο ζυμωτής ζυμωτικά ζυμώτρα ζυμώτρια ζυφτήρι ζω ζωάκι ζωάνθρωπος ζωάριο
ζωέμπορας ζωέμπορος ζωή ζωανθρωπία ζωγράφισμα ζωγράφος ζωγραφιά ζωγραφική
ζωηράδα ζωηρότητα ζωμάρι ζωμός ζωνάρι ζωνάτο ζωντάνεμα ζωντάνια ζωντανό
ζωντοχήρος ζωντόβολο ζωοαγορά ζωοανθρωπονόσος ζωοβένθος ζωογεωγραφία ζωογόνηση
ζωοδότρα ζωοδόχος πηγή ζωοθεραπευτική ζωοθεϊσμός ζωοκλέφτης ζωοκλοπή ζωοκομία
ζωολάτρης ζωολάτρισσα ζωολατρία ζωολογία ζωολόγος ζωομορφισμός ζωονομία
ζωοπανήγυρη ζωοπλαγκτόν ζωοποίηση ζωοτεχνία ζωοτεχνικός ζωοτοκία ζωοτομία
ζωοτροφή ζωοτροφία ζωοτροφείο ζωοτρόφος ζωοφαγία ζωοφιλία ζωοφοβία ζωοφυσική
ζωοχημεία ζωοψία ζωούλα ζωροαστρισμός ζωστήρα ζωστήρας ζωτικότητα ζωφόρος
ζωόγλοια ζωύφιο ζόλος ζόμπι ζόρε ζόρι ζόρισμα ζόρκος ζόφος ζύγι ζύγιασμα
ζύγισμα ζύγωμα ζύθος ζύμη ζύμωμα ζύμωση ζώδιο ζώμη ζώνη ζώο ζώον ζώπυρο ζώσιμο
ηγέτης ηγέτιδα ηγήτορας ηγεμονία ηγεμονίδα ηγεμονίσκος ηγεμονικότητα
ηγεμόνας ηγεμόνευση ηγερία ηγεσία ηγετίσκος ηγουμένη ηγουμένισσα ηγουμενία
ηγουμενιάρης ηγουμενιτσιώτης ηγουμενοσυμβούλιο ηγούμενος ηδονή ηδονίστρια
ηδονιστής ηδονοβλεψία ηδονοβλεψίας ηδονοθήρας ηδονολάτρης ηδονολάτρισσα
ηδυλογία ηδυπάθεια ηδύοσμος ηδύποτο ηδύτητα ηθική ηθικοδιδάσκαλος ηθικοκρατία
ηθικολόγος ηθικοποίηση ηθικό ηθικότητα ηθμοσωλήνες ηθμός ηθογράφημα ηθογράφηση
ηθογραφία ηθολογία ηθολόγος ηθοποιία ηθοποιός ηλάγρα ηλέκτριση ηλίανθος ηλίαση
ηλακάτη ηλεκτράμαξα ηλεκτρικός ηλεκτρισμός ηλεκτροακουστική ηλεκτροακτινολογία
ηλεκτροαμφιβληστροειδογραφία ηλεκτροβιογένεση ηλεκτροβιολογία ηλεκτρογεννήτρια
ηλεκτροδυναμική ηλεκτροδυναμόμετρο ηλεκτροδότηση ηλεκτροεγκεφαλογράφημα
ηλεκτροεγκεφαλογραφία ηλεκτροθεραπεία ηλεκτροκάμινος ηλεκτροκίνηση
ηλεκτροκαρδιογράφος ηλεκτροκαρδιογραφία ηλεκτροκεφαλογράφημα ηλεκτροκινητήρας
ηλεκτροληψία ηλεκτρολογία ηλεκτρολογείο ηλεκτρολόγος ηλεκτρολύτης
ηλεκτροματσάκονο ηλεκτρομεταλλουργία ηλεκτρομετρία ηλεκτρομηχανή
ηλεκτρομηχανικός ηλεκτρομυογράφημα ηλεκτρομυογραφία ηλεκτρονική
ηλεκτρονικός ηλεκτρονιοβόλτ ηλεκτρονόμος ηλεκτροπαραγωγή ηλεκτροπληξία
ηλεκτροπόρωση ηλεκτροσκόπιο ηλεκτροστατική ηλεκτροσυγκολλητής
ηλεκτροσυσσωρευτής ηλεκτροσόκ ηλεκτροσύντηξη ηλεκτροτεχνία ηλεκτροτεχνίτης
ηλεκτροφωταύγεια ηλεκτροφωτισμός ηλεκτροφόρηση ηλεκτροφώτιση ηλεκτροχημεία
ηλεκτρόλυση ηλεκτρόμετρο ηλεκτρόνιο ηλεκτρόφωνο ηλεκτρώσμωση ηλεμήνυμα
ηλιέλαιο ηλιακός ηλιανθόμελο ηλιασμός ηλιαστήριο ηλιαχτίδα ηλιθιότητα ηλικία
ηλιοβασίλεμα ηλιοβολή ηλιοβολία ηλιογράφος ηλιογραφία ηλιοθεραπεία ηλιολάτρης
ηλιολατρία ηλιοπληξία ηλιοροφή ηλιοσκοπία ηλιοσκόπιο ηλιοστάσιο ηλιοσυλλέκτης
ηλιοτροπία ηλιοτροπισμός ηλιοτρόπιο ηλιοτυπία ηλιοφάνεια ηλιοφοβία ηλιόβγαλμα
ηλιόγερμα ηλιόκαμα ηλιόλουτρο ηλιόπετρα ηλιόσκονη ηλιόσπορος ηλιόσφαιρα
ημέιλ ημέρα ημέρευση ημέρωμα ημέρωση ημίθεος ημίμετρο ημίονος ημίτονο ημίφωνο
ημίχρονο ημίψηλο ημίωρο ημεράδα ημερίδα ημεραλωπία ημεραργία ημεροδείκτης
ημερολόγιο ημερομήνια ημερομίσθιο ημερομηνία ημερονύκτιο ημερονύχτιο
ημερόπλοιο ημερότητα ημιέκταση ημιαγωγός ημιαθροιστής ημιαμινάλη ημιανάπαυση
ημιαποθετικό ημιαργία ημιδιατήρηση ημιδιατροφή ημιεπεξεργαστής ημικίονας
ημικύκλιο ημικύκλιο ημιμάθεια ημιμόριο ημιοκτάβα ημιολία ημιονηγός ημιπερίοδος
ημιπληγία ημιπληγικός ημισέληνος ημιστίχιο ημιστύλιο ημισυντήρηση ημισφαίριο
ημιταυτοχρονισμός ημιτελικά ημιτελικός ημιτονισμός ημιτόνιο ημιφορτηγό
ημιχρόνιο ημιχόριο ημιώροφος ηνίο ηνίοχος ηπάτωμα ηπατίτιδα ηπατίτις ηπαταλγία
ηπατισμός ηπατοκήλη ηπατολογία ηπατομεγαλία ηπατοπάθεια ηπατορραγία ηπατοτομία
ηπειρωτικά ηπειρώτης ηπειρώτισσα ηπιότητα ηρέμηση ηραίο ηρακλειώτης ηρεμία
ηρεμότητα ηρωίδα ηρωίνη ηρωινισμός ηρωισμός ηρωολατρία ηρωολατρεία ηρωοποίηση
ηρώο ησυχία ησυχασμός ησυχαστήριο ησυχαστής ηττοπάθεια ηφαίστειο
ηφαιστειολόγος ηφαιστειότητα ηχείο ηχηρότητα ηχοαίσθημα ηχοβολή ηχοβολίδα
ηχοβόλιση ηχογράφημα ηχογράφηση ηχογράφος ηχοεντοπισμός ηχοεπεξεργασία
ηχοκαταστολή ηχοκινησία ηχοκυματική ηχολήπτης ηχολήπτρια ηχολαλία ηχοληψία
ηχομετρία ηχομιμία ηχομόνωση ηχοπέτασμα ηχορύπανση ηχοσκόπιο ηχοτοπίο
ηχωεντοπισμός ηχωκαρδιογραφία ηχόμετρο ηχόχρωμα ηχώ ηωσίνη ηώς θάλαμος θάλασσα
θάλπος θάμα θάμασμα θάμβος θάμβωμα θάμβωση θάμνος θάμπος θάμπωμα θάνατος
θάψιμο θέα θέμα θέαινα θέαμα θέαση θέατρο θέλγητρο θέλγητρον θέλημα θέληση θέλησις
θέμελο θέρετρο θέριεμα θέρισμα θέρμανση θέρμη θέση θέσμιο θέσμιση θέσπιση
θέσπισμα θέσφατο θήκη θήλασμα θήλαστρο θήλαστρον θήλεια θήλιασμα θήλυ θήλωμα
θήραμα θήρευμα θήτα θήτης θίασος θίνα θίξιμο θα θαλάμη θαλάμι θαλάσσερμα
θαλαμάρχης θαλαμίσκος θαλαμηγός θαλαμηπόλος θαλαμοντόγκ θαλαμοφύλακας
θαλασσάκι θαλασσίλα θαλασσίτσα θαλασσαετός θαλασσαιμία θαλασσασφάλεια
θαλασσινά θαλασσινομανιταρόσουπα θαλασσινός θαλασσινόσουπα θαλασσογράφος
θαλασσοδάνειο θαλασσοδαρμός θαλασσοθεραπεία θαλασσοκαλλιέργεια θαλασσοκράτειρα
θαλασσοκρατία θαλασσοκρατορία θαλασσομάχος θαλασσομαχία θαλασσομαχητό
θαλασσοπνίξιμο θαλασσοπνίχτης θαλασσοποίηση θαλασσοπούλι θαλασσοπόρος
θαλασσοταξιδευτής θαλασσοταξιδιώτης θαλασσοταραχή θαλασσοφοβία θαλασσοχελώνα
θαλασσόβραχος θαλασσόλυκος θαλασσόνερο θαλασσόχορτο θαλερότητα θαλιδομίδη
θαλλόφυτα θαλπερότητα θαλπωρή θαμνόφιδο θαμπάδα θαμπόγυαλο θαμώνας θανάσης
θανή θανατάς θανατολογία θανατοπαγίδα θανατοποινίτης θανατοποινίτισσα
θανατοφοβία θαρθουέλα θασίτης θαυμάστρια θαυμασμός θαυμαστής θαυμαστικό
θαυματοποιός θαυματουργία θαύμα θεά θεάνθρωπος θεία θεία θείο θείον θείος
θείωση θεαθήναι θεαματικότητα θεανθρωπισμός θεατής θεατράκι θεατράνθρωπος
θεατρίνος θεατρικογράφος θεατρινισμός θεατρισμός θεατρολογία θεατρολόγος
θεατρώνης θειάφι θειάφισμα θεια θειαφιστήρι θειαφοκέρι θειικοκάλι θειοπηγή
θειότητα θεληματάρης θεληματίας θελιά θελκτικότητα θεμέλιο θεμέλιωμα
θεματοθέτης θεματοθέτρια θεματολογία θεματολόγιο θεματοφυλακή θεματοφύλακας
θεμελίωση θεμελίωσις θεμελιωτής θεμιστοπόλος θεογεννήτορας θεογεννήτρα
θεογνωσία θεογονία θεοδικία θεοδόλιχος θεοκαπηλία θεοκράτης θεοκρασία
θεοκρισία θεοκτονία θεολογία θεολογείο θεολόγος θεομαχία θεομηνία θεομπαίχτης
θεοπνευστία θεοποίηση θεοσέβεια θεοσκόταδο θεοσοφία θεοσοφίστρια θεοσοφισμός
θεοσύνη θεοτόκιο θεουργία θεουργός θεοφάνεια θεοφαγία θεούσα θεράπαινα
θερίστρια θεραπαινίδα θεραπαινίς θεραπεία θεραπευτήριο θεραπευτής θεραπευτική
θεριακή θεριακλής θεριακλίδισσα θεριακλίκι θεριακλού θερισμός θεριστής
θεριό θερμάστρα θερμίδα θερμίστορ θερμαισθησία θερμαλισμός θερμαντήρας
θερμασιά θερμαστής θερμηλασία θερμιδομετρία θερμιδόμετρο θερμοαίσθηση
θερμοαισθησία θερμοβαθογράφος θερμογέφυρα θερμογονία θερμογράφος
θερμοδιαμόρφωση θερμοδιαχυτότητα θερμοδυναμική θερμοηλεκτρισμός θερμοθεραπεία
θερμοκαυτήρας θερμοκαυτηρίαση θερμοκλιματισμός θερμοκοιτίδα θερμοκρασία
θερμομέτρηση θερμομαγνητισμός θερμομετρία θερμομηχανική θερμομόνωση
θερμοπίδακας θερμοπεριοδισμός θερμοπηγή θερμοπληξία θερμοπομπός | |
<reponame>hlebuschek/l2
import logging
import threading
import time
import re
from collections import defaultdict
from typing import Optional, Union
from utils.response import status_response
from django.db.utils import IntegrityError
from utils.data_verification import as_model, data_parse
import simplejson as json
import yaml
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group, User
from django.core.cache import cache
from django.db import connections, transaction
from django.db.models import Q, Prefetch
from django.http import JsonResponse
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
import api.models as models
import directions.models as directions
import users.models as users
from contracts.models import Company
from api import fias
from appconf.manager import SettingManager
from barcodes.views import tubes
from clients.models import CardBase, Individual, Card, Document, District
from context_processors.utils import menu
from directory.models import Fractions, ParaclinicInputField, ParaclinicUserInputTemplateField, ResearchSite, Culture, Antibiotic, ResearchGroup, Researches as DResearches, ScreeningPlan
from doctor_call.models import DoctorCall
from external_system.models import FsliRefbookTest
from hospitals.models import Hospitals
from laboratory.decorators import group_required
from laboratory.utils import strdatetime
from pharmacotherapy.models import Drugs
from podrazdeleniya.models import Podrazdeleniya
from slog import models as slog
from slog.models import Log
from statistics_tickets.models import VisitPurpose, ResultOfTreatment, StatisticsTicket, Outcomes, ExcludePurposes
from tfoms.integration import match_enp
from utils.common import non_selected_visible_type
from utils.dates import try_parse_range, try_strptime
from utils.nsi_directories import NSI
from .sql_func import users_by_group, users_all
from laboratory.settings import URL_RMIS_AUTH, URL_ELN_MADE, URL_SCHEDULE
import urllib.parse
logger = logging.getLogger("API")
def translit(locallangstring):
"""
Translit func
:param locallangstring: orign
:return: translit of locallangstring
"""
conversion = {
u'\u0410': 'A',
u'\u0430': 'a',
u'\u0411': 'B',
u'\u0431': 'b',
u'\u0412': 'V',
u'\u0432': 'v',
u'\u0413': 'G',
u'\u0433': 'g',
u'\u0414': 'D',
u'\u0434': 'd',
u'\u0415': 'E',
u'\u0435': 'e',
u'\u0401': 'Yo',
u'\u0451': 'yo',
u'\u0416': 'Zh',
u'\u0436': 'zh',
u'\u0417': 'Z',
u'\u0437': 'z',
u'\u0418': 'I',
u'\u0438': 'i',
u'\u0419': 'Y',
u'\u0439': 'y',
u'\u041a': 'K',
u'\u043a': 'k',
u'\u041b': 'L',
u'\u043b': 'l',
u'\u041c': 'M',
u'\u043c': 'm',
u'\u041d': 'N',
u'\u043d': 'n',
u'\u041e': 'O',
u'\u043e': 'o',
u'\u041f': 'P',
u'\u043f': 'p',
u'\u0420': 'R',
u'\u0440': 'r',
u'\u0421': 'S',
u'\u0441': 's',
u'\u0422': 'T',
u'\u0442': 't',
u'\u0423': 'U',
u'\u0443': 'u',
u'\u0424': 'F',
u'\u0444': 'f',
u'\u0425': 'H',
u'\u0445': 'h',
u'\u0426': 'Ts',
u'\u0446': 'ts',
u'\u0427': 'Ch',
u'\u0447': 'ch',
u'\u0428': 'Sh',
u'\u0448': 'sh',
u'\u0429': 'Sch',
u'\u0449': 'sch',
u'\u042a': '',
u'\u044a': '',
u'\u042b': 'Y',
u'\u044b': 'y',
u'\u042c': '',
u'\u044c': '',
u'\u042d': 'E',
u'\u044d': 'e',
u'\u042e': 'Yu',
u'\u044e': 'yu',
u'\u042f': 'Ya',
u'\u044f': 'ya',
}
translitstring = []
for c in locallangstring:
translitstring.append(conversion.setdefault(c, c))
return ''.join(translitstring)
@csrf_exempt
def send(request):
"""
Sysmex save results
:param request:
:return:
"""
result = {"ok": False}
try:
if request.method == "POST":
resdict = yaml.load(request.POST["result"])
appkey = request.POST.get("key", "")
else:
resdict = yaml.load(request.GET["result"])
appkey = request.GET.get("key", "")
astm_user = users.DoctorProfile.objects.filter(user__username="astm").first()
app = models.Application.objects.filter(key=appkey, active=True).first()
resdict["pk"] = int(resdict.get("pk", -111))
if "LYMPH%" in resdict["result"]:
resdict["orders"] = {}
dpk = -1
if ("bydirection" in request.POST or "bydirection" in request.GET) and not app.tube_work:
dpk = resdict["pk"]
if dpk >= 4600000000000:
dpk -= 4600000000000
dpk //= 10
tubes(request, direction_implict_id=dpk)
if directions.TubesRegistration.objects.filter(issledovaniya__napravleniye__pk=dpk, issledovaniya__time_confirmation__isnull=True).exists():
resdict["pk"] = directions.TubesRegistration.objects.filter(issledovaniya__napravleniye__pk=dpk, issledovaniya__time_confirmation__isnull=True).order_by("pk").first().pk
else:
resdict["pk"] = False
result["A"] = appkey
direction = None
if resdict["pk"] and app:
if app.tube_work:
direction = directions.Napravleniya.objects.filter(issledovaniya__tubes__pk=resdict["pk"]).first()
elif directions.TubesRegistration.objects.filter(pk=resdict["pk"]).exists():
tubei = directions.TubesRegistration.objects.get(pk=resdict["pk"])
direction = tubei.issledovaniya_set.first().napravleniye
pks = []
for key in resdict["result"].keys():
if models.RelationFractionASTM.objects.filter(astm_field=key).exists():
fractionRels = models.RelationFractionASTM.objects.filter(astm_field=key)
for fractionRel in fractionRels:
fraction = fractionRel.fraction
if directions.Issledovaniya.objects.filter(napravleniye=direction, research=fraction.research, time_confirmation__isnull=True).exists():
issled = directions.Issledovaniya.objects.filter(napravleniye=direction, research=fraction.research, time_confirmation__isnull=True).order_by("pk")[0]
if directions.Result.objects.filter(issledovaniye=issled, fraction=fraction).exists():
fraction_result = directions.Result.objects.filter(issledovaniye=issled, fraction__pk=fraction.pk).order_by("-pk")[0]
else:
fraction_result = directions.Result(issledovaniye=issled, fraction=fraction)
fraction_result.value = str(resdict["result"][key]).strip() # Установка значения
if 'Non-React' in fraction_result.value:
fraction_result.value = 'Отрицательно'
if fraction_result.value.isdigit():
fraction_result.value = "%s.0" % fraction_result.value
find = re.findall(r"\d+.\d+", fraction_result.value)
if len(find) > 0:
val = float(find[0]) * fractionRel.get_multiplier_display()
val = app.auto_set_places(fractionRel, val)
fraction_result.value = fraction_result.value.replace(find[0], str(val))
fraction_result.iteration = 1 # Установка итерации
ref = fractionRel.default_ref
if ref:
fraction_result.ref_title = ref.title
fraction_result.ref_about = ref.about
fraction_result.ref_m = ref.m
fraction_result.ref_f = ref.f
fraction_result.save() # Сохранение
issled.api_app = app
issled.save()
fraction_result.get_ref(re_save=True)
fraction_result.issledovaniye.doc_save = astm_user # Кто сохранил
fraction_result.issledovaniye.time_save = timezone.now() # Время сохранения
fraction_result.issledovaniye.save()
if issled not in pks:
pks.append(issled)
slog.Log(key=appkey, type=22, body=json.dumps(resdict), user=None).save()
result["ok"] = True
elif not directions.TubesRegistration.objects.filter(pk=resdict["pk"]).exists():
if dpk > -1:
resdict["pk"] = dpk
slog.Log(key=resdict["pk"], type=23, body=json.dumps(resdict), user=None).save()
except Exception as e:
result = {"ok": False, "Exception": True, "MSG": str(e)}
return JsonResponse(result)
@csrf_exempt
def endpoint(request):
result = {"answer": False, "body": "", "patientData": {}}
data = json.loads(request.POST.get("result", request.GET.get("result", "{}")))
api_key = request.POST.get("key", request.GET.get("key", ""))
message_type = data.get("message_type", "C")
pk_s = str(data.get("pk", "")).strip()
iss_s = str(data.get("iss_pk", "-1")).strip()
pk = -1 if not pk_s.isdigit() else int(pk_s)
iss_pk = -1 if not iss_s.isdigit() else int(iss_s)
data["app_name"] = "API key is incorrect"
# pid = data.get("processing_id", "P")
if models.Application.objects.filter(key=api_key).exists():
astm_user = users.DoctorProfile.objects.filter(user__username="astm").first()
if astm_user is None:
astm_user = users.DoctorProfile.objects.filter(user__is_staff=True).order_by("pk").first()
app = models.Application.objects.get(key=api_key)
if app.active:
data["app_name"] = app.name
if message_type == "R" or data.get("result") or message_type == "R_BAC":
if pk != -1 or iss_pk != -1:
direction: Union[directions.Napravleniya, None] = None
dw = app.direction_work or message_type == "R_BAC"
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
dw = True
if pk == -1:
iss = directions.Issledovaniya.objects.filter(pk=iss_pk)
if iss.exists():
direction = iss[0].napravleniye
elif dw:
direction = directions.Napravleniya.objects.filter(pk=pk).first()
else:
direction = directions.Napravleniya.objects.filter(issledovaniya__tubes__pk=pk).first()
pks = []
oks = []
if direction is not None:
if message_type == "R" or (data.get("result") and message_type == 'C'):
result["patientData"] = {
"fio": direction.client.individual.fio(short=True),
"card": direction.client.number_with_type(),
}
result["patientData"]["fioTranslit"] = translit(result["patientData"]["fio"])
result["patientData"]["cardTranslit"] = translit(result["patientData"]["card"])
results = data.get("result", {})
for key in results:
ok = False
q = models.RelationFractionASTM.objects.filter(astm_field=key)
if q.filter(application_api=app).exists():
q = q.filter(application_api=app)
ok = True
elif q.filter(application_api__isnull=True).exists():
q = q.filter(application_api__isnull=True)
ok = True
if ok:
for fraction_rel in q:
save_state = []
issleds = []
for issled in directions.Issledovaniya.objects.filter(
napravleniye=direction, research=fraction_rel.fraction.research, time_confirmation__isnull=True
):
if directions.Result.objects.filter(issledovaniye=issled, fraction=fraction_rel.fraction).exists():
fraction_result = directions.Result.objects.filter(issledovaniye=issled, fraction=fraction_rel.fraction).order_by("-pk")[0]
else:
fraction_result = directions.Result(issledovaniye=issled, fraction=fraction_rel.fraction)
fraction_result.value = str(results[key]).strip()
if 'Non-React' in fraction_result.value:
fraction_result.value = 'Отрицательно'
find = re.findall(r"\d+.\d+", fraction_result.value)
if len(find) == 0 and fraction_result.value.isdigit():
find = [fraction_result.value]
if len(find) > 0:
val_str = fraction_result.value
for f in find:
try:
val = float(f) * fraction_rel.get_multiplier_display()
val = app.auto_set_places(fraction_rel, val)
val_str = val_str.replace(f, str(val))
except Exception as e:
logger.exception(e)
fraction_result.value = val_str
fraction_result.iteration = 1
ref = fraction_rel.default_ref
if ref:
fraction_result.ref_title = ref.title
fraction_result.ref_about = ref.about
fraction_result.ref_m = ref.m
fraction_result.ref_f = ref.f
fraction_result.save()
issled.api_app = app
issled.save()
fraction_result.get_ref(re_save=True)
fraction_result.issledovaniye.doc_save = astm_user
fraction_result.issledovaniye.time_save = timezone.now()
fraction_result.issledovaniye.save()
save_state.append({"fraction": fraction_result.fraction.title, "value": fraction_result.value})
issleds.append({"pk": issled.pk, "title": issled.research.title})
if issled not in pks:
pks.append(issled)
oks.append(ok)
elif message_type == "R_BAC":
mo = data.get('mo')
if mo:
code = mo.get('code')
name = mo.get('name')
anti = data.get('anti', {})
comments = data.get('comments', [])
if code:
culture = Culture.objects.filter(lis=code).first()
iss = directions.Issledovaniya.objects.filter(napravleniye=direction, time_confirmation__isnull=True, research__is_microbiology=True)
if iss.filter(pk=iss_pk).exists():
iss = iss.filter(pk=iss_pk)
iss = iss.first()
if not culture:
print('NO CULTURE', code, name) # noqa: T001
elif not iss:
print('IGNORED') # noqa: T001
else:
directions.MicrobiologyResultCulture.objects.filter(issledovaniye=iss, culture=culture).delete()
comments = '\n'.join(
[c["text"] for c in comments if not c["text"].startswith('S:') and not c["text"].startswith('R:') and not c["text"].startswith('I:')]
)
culture_result = directions.MicrobiologyResultCulture(issledovaniye=iss, culture=culture, comments=comments)
culture_result.save()
for a in anti:
anti_r = anti[a]
anti_obj = Antibiotic.objects.filter(lis=a).first()
if anti_obj and anti_r.get('RSI'):
a_name = anti_r.get('name', '').replace('µg', 'мг')
a_name_parts = a_name.split()
a_name = a_name_parts[-2] + ' ' + a_name_parts[-1]
anti_result = directions.MicrobiologyResultCultureAntibiotic(
result_culture=culture_result,
antibiotic=anti_obj,
sensitivity=anti_r.get('RSI'),
dia=anti_r.get('dia', ''),
antibiotic_amount=a_name,
)
anti_result.save()
result["body"] = "{} {} {} {} {}".format(dw, pk, iss_pk, json.dumps(oks), direction is not None)
else:
result["body"] = "pk '{}' is not exists".format(pk_s)
elif message_type == "Q":
result["answer"] = True
pks = [int(x) for x in data.get("query", [])]
researches = defaultdict(list)
for row in app.get_issledovaniya(pks):
k = row["pk"]
i = row["iss"]
result["patientData"] = {
"fio": i.napravleniye.client.individual.fio(short=True),
"card": i.napravleniye.client.number_with_type(),
}
result["patientData"]["fioTranslit"] = translit(result["patientData"]["fio"])
result["patientData"]["cardTranslit"] = translit(result["patientData"]["card"])
for fraction in Fractions.objects.filter(research=i.research, hide=False):
rel = models.RelationFractionASTM.objects.filter(fraction=fraction, application_api=app)
if not rel.exists():
continue
# rel = models.RelationFractionASTM.objects.filter(fraction=fraction)
# if not rel.exists():
# continue
rel = rel[0]
researches[k].append(rel.astm_field)
result["body"] = researches
else:
pass
else:
data["app_name"] = "API app banned " + api_key
result["body"] = "API app banned " + api_key
else:
result["body"] = "API key is incorrect"
slog.Log(key=pk, type=6000, body=json.dumps({"data": data, "answer": result}), user=None).save()
return JsonResponse(result)
@login_required
def departments(request):
req = json.loads(request.body)
method = req.get('method', 'GET')
without_default = req.get('withoutDefault', False)
current_user_hospital_id = request.user.doctorprofile.get_hospital_id() or -1
hospital_pk = req.get('hospital', current_user_hospital_id)
su = request.user.is_superuser
if hospital_pk == -1:
hospital_pk = None
if hospital_pk != current_user_hospital_id and not su:
return JsonResponse({"ok": False})
can_edit = su or request.user.doctorprofile.has_group('Создание и редактирование пользователей')
if method == "GET":
if without_default:
qs = Podrazdeleniya.objects.filter(hospital_id=hospital_pk).order_by("pk")
else:
qs = Podrazdeleniya.objects.filter(Q(hospital_id=hospital_pk) | Q(hospital__isnull=True)).order_by("pk")
deps = [{"pk": x.pk, "title": x.get_title(), "type": str(x.p_type), "oid": x.oid} for x in qs]
en = SettingManager.en()
more_types = []
| |
<filename>openstack/cloud/openstackcloud.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import queue
# import types so that we can reference ListType in sphinx param declarations.
# We can't just use list, because sphinx gets confused by
# openstack.resource.Resource.list and openstack.resource2.Resource.list
import types # noqa
import warnings
import dogpile.cache
import keystoneauth1.exceptions
import keystoneauth1.session
import munch
import requests.models
import requestsexceptions
from openstack import _log
from openstack.cloud import _floating_ip
from openstack.cloud import _object_store
from openstack.cloud import _utils
from openstack.cloud import exc
from openstack.cloud import meta
import openstack.config
from openstack.config import cloud_region as cloud_region_mod
from openstack import proxy
from openstack import utils
DEFAULT_SERVER_AGE = 5
DEFAULT_PORT_AGE = 5
DEFAULT_FLOAT_AGE = 5
_CONFIG_DOC_URL = _floating_ip._CONFIG_DOC_URL
DEFAULT_OBJECT_SEGMENT_SIZE = _object_store.DEFAULT_OBJECT_SEGMENT_SIZE
# This halves the current default for Swift
DEFAULT_MAX_FILE_SIZE = _object_store.DEFAULT_MAX_FILE_SIZE
OBJECT_CONTAINER_ACLS = _object_store.OBJECT_CONTAINER_ACLS
class _OpenStackCloudMixin:
"""Represent a connection to an OpenStack Cloud.
OpenStackCloud is the entry point for all cloud operations, regardless
of which OpenStack service those operations may ultimately come from.
The operations on an OpenStackCloud are resource oriented rather than
REST API operation oriented. For instance, one will request a Floating IP
and that Floating IP will be actualized either via neutron or via nova
depending on how this particular cloud has decided to arrange itself.
:param bool strict: Only return documented attributes for each resource
as per the Data Model contract. (Default False)
"""
_OBJECT_MD5_KEY = '<KEY>'
_OBJECT_SHA256_KEY = 'x-object-meta-x-sdk-sha256'
_OBJECT_AUTOCREATE_KEY = 'x-object-meta-x-sdk-autocreated'
_OBJECT_AUTOCREATE_CONTAINER = 'images'
# NOTE(shade) shade keys were x-object-meta-x-shade-md5 - we need to check
# those in freshness checks so that a shade->sdk transition
# doesn't result in a re-upload
_SHADE_OBJECT_MD5_KEY = 'x-object-meta-x-shade-md5'
_SHADE_OBJECT_SHA256_KEY = 'x-object-meta-x-shade-sha256'
_SHADE_OBJECT_AUTOCREATE_KEY = 'x-object-meta-x-shade-autocreated'
def __init__(self):
super(_OpenStackCloudMixin, self).__init__()
self.log = _log.setup_logging('openstack')
self.name = self.config.name
self.auth = self.config.get_auth_args()
self.default_interface = self.config.get_interface()
self.force_ipv4 = self.config.force_ipv4
(self.verify, self.cert) = self.config.get_requests_verify_args()
# Turn off urllib3 warnings about insecure certs if we have
# explicitly configured requests to tell it we do not want
# cert verification
if not self.verify:
self.log.debug(
"Turning off Insecure SSL warnings since verify=False")
category = requestsexceptions.InsecureRequestWarning
if category:
# InsecureRequestWarning references a Warning class or is None
warnings.filterwarnings('ignore', category=category)
self._disable_warnings = {}
cache_expiration_time = int(self.config.get_cache_expiration_time())
cache_class = self.config.get_cache_class()
cache_arguments = self.config.get_cache_arguments()
self._resource_caches = {}
if cache_class != 'dogpile.cache.null':
self.cache_enabled = True
self._cache = self._make_cache(
cache_class, cache_expiration_time, cache_arguments)
expirations = self.config.get_cache_expirations()
for expire_key in expirations.keys():
# Only build caches for things we have list operations for
if getattr(
self, 'list_{0}'.format(expire_key), None):
self._resource_caches[expire_key] = self._make_cache(
cache_class, expirations[expire_key], cache_arguments)
self._SERVER_AGE = DEFAULT_SERVER_AGE
self._PORT_AGE = DEFAULT_PORT_AGE
self._FLOAT_AGE = DEFAULT_FLOAT_AGE
else:
self.cache_enabled = False
def _fake_invalidate(unused):
pass
class _FakeCache:
def invalidate(self):
pass
# Don't cache list_servers if we're not caching things.
# Replace this with a more specific cache configuration
# soon.
self._SERVER_AGE = 0
self._PORT_AGE = 0
self._FLOAT_AGE = 0
self._cache = _FakeCache()
# Undecorate cache decorated methods. Otherwise the call stacks
# wind up being stupidly long and hard to debug
for method in _utils._decorated_methods:
meth_obj = getattr(self, method, None)
if not meth_obj:
continue
if (hasattr(meth_obj, 'invalidate')
and hasattr(meth_obj, 'func')):
new_func = functools.partial(meth_obj.func, self)
new_func.invalidate = _fake_invalidate
setattr(self, method, new_func)
# If server expiration time is set explicitly, use that. Otherwise
# fall back to whatever it was before
self._SERVER_AGE = self.config.get_cache_resource_expiration(
'server', self._SERVER_AGE)
self._PORT_AGE = self.config.get_cache_resource_expiration(
'port', self._PORT_AGE)
self._FLOAT_AGE = self.config.get_cache_resource_expiration(
'floating_ip', self._FLOAT_AGE)
self._container_cache = dict()
self._file_hash_cache = dict()
# self.__pool_executor = None
self._raw_clients = {}
self._local_ipv6 = (
_utils.localhost_supports_ipv6() if not self.force_ipv4 else False)
def connect_as(self, **kwargs):
"""Make a new OpenStackCloud object with new auth context.
Take the existing settings from the current cloud and construct a new
OpenStackCloud object with some of the auth settings overridden. This
is useful for getting an object to perform tasks with as another user,
or in the context of a different project.
.. code-block:: python
conn = openstack.connect(cloud='example')
# Work normally
servers = conn.list_servers()
conn2 = conn.connect_as(username='different-user', password='')
# Work as different-user
servers = conn2.list_servers()
:param kwargs: keyword arguments can contain anything that would
normally go in an auth dict. They will override the same
settings from the parent cloud as appropriate. Entries
that do not want to be overridden can be ommitted.
"""
if self.config._openstack_config:
config = self.config._openstack_config
else:
# TODO(mordred) Replace this with from_session
config = openstack.config.OpenStackConfig(
app_name=self.config._app_name,
app_version=self.config._app_version,
load_yaml_config=False)
params = copy.deepcopy(self.config.config)
# Remove profile from current cloud so that overridding works
params.pop('profile', None)
# Utility function to help with the stripping below.
def pop_keys(params, auth, name_key, id_key):
if name_key in auth or id_key in auth:
params['auth'].pop(name_key, None)
params['auth'].pop(id_key, None)
# If there are user, project or domain settings in the incoming auth
# dict, strip out both id and name so that a user can say:
# cloud.connect_as(project_name='foo')
# and have that work with clouds that have a project_id set in their
# config.
for prefix in ('user', 'project'):
if prefix == 'user':
name_key = 'username'
else:
name_key = 'project_name'
id_key = '{prefix}_id'.format(prefix=prefix)
pop_keys(params, kwargs, name_key, id_key)
id_key = '{prefix}_domain_id'.format(prefix=prefix)
name_key = '{prefix}_domain_name'.format(prefix=prefix)
pop_keys(params, kwargs, name_key, id_key)
for key, value in kwargs.items():
params['auth'][key] = value
cloud_region = config.get_one(**params)
# Attach the discovery cache from the old session so we won't
# double discover.
cloud_region._discovery_cache = self.session._discovery_cache
# Override the cloud name so that logging/location work right
cloud_region._name = self.name
cloud_region.config['profile'] = self.name
# Use self.__class__ so that we return whatever this if, like if it's
# a subclass in the case of shade wrapping sdk.
return self.__class__(config=cloud_region)
def connect_as_project(self, project):
"""Make a new OpenStackCloud object with a new project.
Take the existing settings from the current cloud and construct a new
OpenStackCloud object with the project settings overridden. This
is useful for getting an object to perform tasks with as another user,
or in the context of a different project.
.. code-block:: python
cloud = openstack.connect(cloud='example')
# Work normally
servers = cloud.list_servers()
cloud2 = cloud.connect_as_project('different-project')
# Work in different-project
servers = cloud2.list_servers()
:param project: Either a project name or a project dict as returned by
`list_projects`.
"""
auth = {}
if isinstance(project, dict):
auth['project_id'] = project.get('id')
auth['project_name'] = project.get('name')
if project.get('domain_id'):
auth['project_domain_id'] = project['domain_id']
else:
auth['project_name'] = project
return self.connect_as(**auth)
def global_request(self, global_request_id):
"""Make a new Connection object with a global request id set.
Take the existing settings from the current Connection and construct a
new Connection object with the global_request_id overridden.
.. code-block:: python
from oslo_context import context
cloud = openstack.connect(cloud='example')
# Work normally
servers = cloud.list_servers()
cloud2 = cloud.global_request(context.generate_request_id())
# cloud2 sends all requests with global_request_id set
servers = cloud2.list_servers()
Additionally, this can be used as a context manager:
.. code-block:: python
from oslo_context import context
c = openstack.connect(cloud='example')
# Work normally
servers = c.list_servers()
with c.global_request(context.generate_request_id()) as c2:
# c2 sends all requests with global_request_id set
servers = c2.list_servers()
:param global_request_id: The `global_request_id` to send.
"""
params = copy.deepcopy(self.config.config)
cloud_region = cloud_region_mod.from_session(
session=self.session,
app_name=self.config._app_name,
app_version=self.config._app_version,
discovery_cache=self.session._discovery_cache,
**params)
# Override the cloud name so that logging/location work right
cloud_region._name = self.name
cloud_region.config['profile'] = self.name
# Use self.__class__ so that we return whatever this is, like if it's
# a subclass in the case of shade wrapping sdk.
new_conn = self.__class__(config=cloud_region)
new_conn.set_global_request_id(global_request_id)
return new_conn
def _make_cache(self, cache_class, expiration_time, arguments):
return dogpile.cache.make_region(
function_key_generator=self._make_cache_key
).configure(
cache_class,
expiration_time=expiration_time,
arguments=arguments)
def _make_cache_key(self, namespace, fn):
fname = fn.__name__
if namespace is None:
name_key = self.name
else:
name_key = '%s:%s' % (self.name, namespace)
def generate_key(*args, **kwargs):
# TODO(frickler): make handling arg keys actually work
arg_key = ''
kw_keys = sorted(kwargs.keys())
kwargs_key = ','.join(
['%s:%s' % (k, kwargs[k]) for k in kw_keys if k != 'cache'])
ans = "_".join(
[str(name_key), fname, arg_key, kwargs_key])
return ans
return generate_key
def _get_cache(self, resource_name):
if | |
from collections import OrderedDict
import torch
from torch import nn
from torchvision.ops import MultiScaleRoIAlign
from torchvision.models.utils import load_state_dict_from_url
from torchvision.models.detection.faster_rcnn import FasterRCNN
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from torchvision.models.detection.anchor_utils import AnchorGenerator
from torchvision.models.detection.faster_rcnn import TwoMLPHead
from torchvision.models.detection.image_list import ImageList
__all__ = [
"MaskRCNN"
]
class MaskRCNN(FasterRCNN):
"""
Implements Mask R-CNN.
The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
image, and should be in 0-1 range. Different images can have different sizes.
The behavior of the model changes depending if it is in training or evaluation mode.
During training, the model expects both the input tensors, as well as a targets (list of dictionary),
containing:
- boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values of x
between 0 and W and values of y between 0 and H
- labels (Int64Tensor[N]): the class label for each ground-truth box
- masks (UInt8Tensor[N, H, W]): the segmentation binary masks for each instance
The model returns a Dict[Tensor] during training, containing the classification and regression
losses for both the RPN and the R-CNN, and the mask loss.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
follows:
- boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values of x
between 0 and W and values of y between 0 and H
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores or each prediction
- masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to
obtain the final segmentation masks, the soft masks can be thresholded, generally
with a value of 0.5 (mask >= 0.5)
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain a out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
The backbone should return a single Tensor or and OrderedDict[Tensor].
num_classes (int): number of output classes of the model (including the background).
If box_predictor is specified, num_classes should be None.
min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
image_mean (Tuple[float, float, float]): mean values used for input normalization.
They are generally the mean values of the dataset on which the backbone has been trained
on
image_std (Tuple[float, float, float]): std values used for input normalization.
They are generally the std values of the dataset on which the backbone has been trained on
rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps.
rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
considered as positive during training of the RPN.
rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
considered as negative during training of the RPN.
rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
for computing the loss
rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
of the RPN
box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
the locations indicated by the bounding boxes
box_head (nn.Module): module that takes the cropped feature maps as input
box_predictor (nn.Module): module that takes the output of box_head and returns the
classification logits and box regression deltas.
box_score_thresh (float): during inference, only return proposals with a classification score
greater than box_score_thresh
box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
box_detections_per_img (int): maximum number of detections per image, for all classes.
box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
considered as positive during training of the classification head
box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
considered as negative during training of the classification head
box_batch_size_per_image (int): number of proposals that are sampled during training of the
classification head
box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
of the classification head
bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
bounding boxes
mask_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
the locations indicated by the bounding boxes, which will be used for the mask head.
mask_head (nn.Module): module that takes the cropped feature maps as input
mask_predictor (nn.Module): module that takes the output of the mask_head and returns the
segmentation mask logits
Example::
>>> import torch
>>> import torchvision
>>> from torchvision.models.detection import MaskRCNN
>>> from torchvision.models.detection.anchor_utils import AnchorGenerator
>>>
>>> # load a pre-trained model for classification and return
>>> # only the features
>>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features
>>> # MaskRCNN needs to know the number of
>>> # output channels in a backbone. For mobilenet_v2, it's 1280
>>> # so we need to add it here
>>> backbone.out_channels = 1280
>>>
>>> # let's make the RPN generate 5 x 3 anchors per spatial
>>> # location, with 5 different sizes and 3 different aspect
>>> # ratios. We have a Tuple[Tuple[int]] because each feature
>>> # map could potentially have different sizes and
>>> # aspect ratios
>>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
>>> aspect_ratios=((0.5, 1.0, 2.0),))
>>>
>>> # let's define what are the feature maps that we will
>>> # use to perform the region of interest cropping, as well as
>>> # the size of the crop after rescaling.
>>> # if your backbone returns a Tensor, featmap_names is expected to
>>> # be ['0']. More generally, the backbone should return an
>>> # OrderedDict[Tensor], and in featmap_names you can choose which
>>> # feature maps to use.
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=7,
>>> sampling_ratio=2)
>>>
>>> mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=14,
>>> sampling_ratio=2)
>>> # put the pieces together inside a MaskRCNN model
>>> model = MaskRCNN(backbone,
>>> num_classes=2,
>>> rpn_anchor_generator=anchor_generator,
>>> box_roi_pool=roi_pooler,
>>> mask_roi_pool=mask_roi_pooler)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
"""
def __init__(self, backbone, num_classes=None,
# transform parameters
min_size=320, max_size=1333,
image_mean=None, image_std=None,
# RPN parameters
rpn_anchor_generator=None, rpn_head=None,
rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,
rpn_nms_thresh=0.7,
rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
# Box parameters
box_roi_pool=None, box_head=None, box_predictor=None,
box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,
box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
box_batch_size_per_image=512, box_positive_fraction=0.25,
bbox_reg_weights=None,
# Mask parameters
mask_roi_pool=None, mask_head=None, mask_predictor=None):
assert isinstance(mask_roi_pool, (MultiScaleRoIAlign, type(None)))
if num_classes is not None:
if mask_predictor is not None:
raise ValueError("num_classes should be None when mask_predictor is specified")
out_channels = backbone.out_channels
if mask_roi_pool is None:
mask_roi_pool = MultiScaleRoIAlign(
featmap_names=['0', '1', '2', '3'],
# featmap_names=[0, 1, 2, 3],
output_size=14,
sampling_ratio=2)
if mask_head is None:
mask_layers = (256, 256, 256, 256)
mask_dilation = 1
mask_head = MaskRCNNHeads(out_channels, mask_layers, mask_dilation)
if mask_predictor is None:
mask_predictor_in_channels = 256 # == mask_layers[-1]
mask_dim_reduced = 256
mask_predictor = MaskRCNNPredictor(mask_predictor_in_channels,
mask_dim_reduced, num_classes)
# faster rcnn header
if rpn_anchor_generator is None:
anchor_sizes = ((16,), (32,), (64,), (128,), (256,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
box_output_size = (7, 7)
if box_roi_pool is None:
box_roi_pool = MultiScaleRoIAlign(
featmap_names=['0', '1', '2', '3'],
# featmap_names=[0, 1, 2, 3],
output_size=box_output_size,
sampling_ratio=2)
print(box_roi_pool)
if | |
None currCtr")
if not seqInd in barriersBySeqByCtr:
# FIXME: Error?
logger.warning("getBarrierForSeqCtr got seqInd %s not in barriersBySeqByCtr", seqInd)
return -1
# For a wait, could look it up by the index
# But the wait should be on the ByCtr list too
# if str(currCtr).startswith('with-'):
# # It was a with, so this has the index in it
# return barriersBySeqByPos[seqInd].get(int(currCtr[5:]), -1)
# elif currCtr == -1:
# # start - should really be -1, but if the sequence has something, why not?
def getLengthBetweenBarriers(seqInd, currCtr, prevCtr='-1', iterCnt=0):
'''For the given sequence, find the length between the given 2 barriers.
Return float('NaN') if indeterminate.
Recurses up the list of barriers adding up the lengths we previously
calculated for each pair of Barriers.
So if the length between any 2 barriers within that chain are indeterminate,
the whole thing is indeterminate.
'''
# ctr of '-1' means start
# ctr of 'wait-' means a Wait of some kind: Format is 'wait-chans-%s-ctr-%d' % (curBarrier['channels'], curBarrier['waitCount'])
# seqInd is the index of the sequence
import math
if currCtr == prevCtr:
logger.debug("%sgetLengthBetweenBarriers asked for length to self '%s' (0)", " "*iterCnt, currCtr)
return 0
# find the barrier lengths for this channel
# follow the previous pointers, adding lengths
currBarrier = getBarrierForSeqCtr(seqInd, currCtr)
if currBarrier == -1:
logger.debug("%sgetLengthBetweenBarriers from current -1 (start or error), use length 0", " "*iterCnt)
# from start
return 0
logger.debug("%sgetLengthBetweenBarriers: currBarrier: {'counter': '%s', 'type': '%s', 'seqIndex': %s, 'lengthSince': %s, 'prevBarrierCtr': '%s', 'lengthCalculated': %s}", " "*iterCnt, currBarrier['counter'], currBarrier['type'], currBarrier['seqIndex'], currBarrier['lengthSince'], currBarrier['prevBarrierCtr'], currBarrier['lengthCalculated'])
# Basic case: the previous barrier is the one we're looking for
prevBarrierCtr = currBarrier['prevBarrierCtr']
prevLen = currBarrier['lengthSince']
# FIXME: Guard against barrier not having these fields?
if prevBarrierCtr == prevCtr:
logger.debug("%sDesired previous barrier '%s' is actual previous from current '%s', so use stored length: %s", " "*iterCnt, prevCtr, currCtr, prevLen)
return prevLen
if not currBarrier['lengthCalculated'] and iterCnt>0:
logger.warn("%slength from '%s' to '%s' is not reliable cause not calculated", " "*iterCnt, currCtr, prevBarrierCtr)
# Old code stored firstPrev and prevPrev to handle repeat with barrier inside
# But now realize that doesn't make sense; any barrier inside a repeat (if allowed at all)
# must be treated as indetermine / a Wait
# If the length so far is indeterminate, no use in recursing -
# the whole thing will be indeterminate
if math.isnan(prevLen):
logger.debug("%sLength to previous from current '%s' was NaN, return that", " "*iterCnt, currCtr)
return prevLen
logger.debug("%sLength from curr '%s' to prev '%s' will start with length from curr to next '%s': %s", " "*iterCnt, currCtr, prevCtr, prevBarrierCtr, prevLen)
# If this barrier doesn't store the desired length, then recurse
return prevLen + getLengthBetweenBarriers(seqInd, prevBarrierCtr, prevCtr, iterCnt+1)
def isReplaceableBarrier(barrier, seqs):
'''Is the given barrier object replacable on its sequence?
Start, Wait, WaitSome, and barriers that are no longer in
their sequence are not replacable. So only a Barrier() of the correct id (counter).
HOWEVER: We now pretend Wait/WaitSome are replacable so that later we calculate the real length, though
we don't actually do the replacement.
'''
# Is the given barrier something we can replace?
# Not a Wait or WaitSome, and still in its sequence
# return boolean
ind = barrier['seqPos']
nextCtr = barrier['counter']
nextType = barrier['type']
seqInd = barrier['seqIndex']
logger.debug("Checking if barrier '%s' is replaceable: %s", nextCtr, barrier)
if ind < 0:
logger.debug("Barrier '%s' is start, not replaceable", nextCtr)
return False
# 7/8: Don't bail here; so we can calculate the length later
# if nextType in ('wait', 'waitsome'):
# logger.debug("Barrier %s is a wait, not replaceable", nextCtr)
# return False
if seqs:
if not (seqInd >= 0 and seqInd < len(seqs)):
logger.warn("Barrier '%s' claims to be on sequence %d which doesn't exist (can't replace)", nextCtr, seqInd)
return False
if len(seqs[seqInd]) <= ind:
logger.warn("Barrier '%s' claims to be at position %d on sequence %d; the sequence has only %d items (can't replace)", nextCtr, ind, seqInd, len(seqs[seqInd]))
return False
if hasattr(seqs[seqInd][ind], 'value') and seqs[seqInd][ind].value == nextCtr:
# This is a barrier with the desired counter on the proper sequence
return True
if isID(seqs[seqInd][ind]):
# Expected when we've already done a replacement
logger.debug("Barrier '%s' actual element is (now) %s on sequence %d (don't replace)", nextCtr, seqs[seqInd][ind], seqInd)
return False
# 7/8: We want to let it go through if it's a Wait or WaitSome for now
if isWait(seqs[seqInd][ind]) or isWaitSome(seqs[seqInd][ind]):
logger.debug("Barrier '%s' on sequence %d is a Wait or WaitSome - pretend it's replaceable so we calculate the length: %s", nextCtr, seqInd, seqs[seqInd][ind])
return True
if not isBarrier(seqs[seqInd][ind]):
# We don't think we've replaced any barriers with waits, so this is unexpected
logger.debug("Barrier '%s' claims type %s but actual element is (now) %s on sequence %d (not replaceable)", nextCtr, nextType, seqs[seqInd][ind], seqInd)
return False
else:
# It's a barrier but the wrong barrier ID?
logger.warning("Barrier '%s' should be at %d on sequence %d, but instead found %s (can't replace)", nextCtr, ind, seqInd, seqs[seqInd][ind])
return False
return False
def getNextBarrierCtr(seqs, seqInd, currCtr, positions):
''' Find the id (counter) of the next Barrier after currCtr on the given sequence
that we could (still) replace. So skip barriers no longer in the sequence.
positions is sorted indices in sequence seqInd in barriersBySeqByPos.
Return '-1' if there is none.
'''
# Walk to the next barrier past currCtr on sequence seqInd and return the counter of that barrier
# Return '-1' if no more
# This is just iterating over barriers on this channel
# This is for following execution path of a sequence to find
# all the barriers and swap them all
# seqInd is the sequence index
global barriersBySeqByPos, barriersBySeqByCtr
logger.debug("Looking for next barrier to replace on sequence %d after '%s'", seqInd, currCtr)
# Handle case where there's no current - we're looking for the first
if str(currCtr) == '-1':
logger.debug("Looking for 1st barrier on sequence %d", seqInd)
for i in positions:
barrier = barriersBySeqByPos[seqInd][i]
# Make sure that barrier is actually still in the sequence it claims to be in;
# we might have already removed it
if isReplaceableBarrier(barrier, seqs):
# return this ctr
if str(barrier) == '-1':
return '-1'
elif barrier['lengthCalculated']:
logger.debug("... but this barrier length already calculated, continue")
continue
else:
logger.debug("First replaceable barrier on sequence %d: %s\n", seqInd, barrier['counter'])
return barrier['counter']
else:
# logger.debug("%s not (no longer) replaceable", barrier)
# keep looping
continue
# If we get here, there are no replaceable barriers
logger.debug("No (more) replaceable barriers on sequence %d\n", seqInd)
return '-1'
# Find this barrier object in barriersBySeqByCtr or -1
currBarrier = getBarrierForSeqCtr(seqInd, currCtr)
found = False
try:
currPos = currBarrier['seqPos']
for pos in positions:
# Start looking at things after curBarrier
if pos < currPos:
continue
barrier = barriersBySeqByPos[seqInd][pos]
nextCtr = barrier['counter']
# logger.debug("getNextBarrier after '%s' on seq %d: looking at barrier '%s'", currCtr, seqInd, barrier['counter'])
# Could use barriersEqual but we know both use same seqInd and curr uses currCtr
if not found and nextCtr == currCtr:
# If we hadn't yet found the desired barrier but did now, say so
# logger.debug("found current")
if pos != currPos:
logger.warning("Huh? Barrier ctrs are same (%s) but positions in sequence are diff (curr %d != pos %d)", nextCtr, currPos, pos)
found = True
continue
if found:
logger.debug("'%s' is barrier after '%s' on sequence %d", nextCtr, currCtr, seqInd)
# But if we had found it, then the next one we found is next
# NOW....
# Before blindly returning this barrier, see if it is actually still in the sequence
# Make sure that barrier is actually still in the sequence it claims to be in;
# we might have already removed it
if isReplaceableBarrier(barrier, seqs):
# return this ctr
if str(barrier) == '-1':
logger.debug("... returning it as next\n")
return '-1'
elif barrier['lengthCalculated']:
logger.debug("... but this barrier length already calculated, continue")
continue
else:
logger.debug("... returning it as next\n")
| |
from xml.etree import ElementTree
import svgwrite
import enum, math
################
# Tree utility functions
#
# For generically handling the various kinds of tree-like things that can be
# passed to this module
################
def treelet_split_str(t):
# treat strings as leaf nodes.
if isinstance(t, str):
return (t, tuple())
else:
return None
def treelet_split_nltk(t):
# nltk.Tree API: the parent is in label(). The children are elements of the
# object itself, which inherits from list.
try:
h = t.label()
return (h, list(t))
except:
return None
def treelet_split_list(t):
# treat a list as a lisp-like tree structure, i.e. each subtree is a list
# of the parent followed by any children.
# A 0-length list is treated as an empty leaf node.
try:
if (len(t) == 0):
return ("", tuple())
else:
return (t[0], t[1:])
except:
return None
def treelet_split_fallback(t):
# fallback to str(). TODO: enhance, or remove?
return (str(t), tuple())
def tree_split(t, fallback=treelet_split_fallback):
"""Splits `t` into a parent and an iterable of children, possibly empty."""
if isinstance(t, ElementTree.Element):
# we do this explcitly because otherwise it gets parsed as an iterable
raise NotImplementedError(
"svgling.core does not support trees constructed with ElementTree objects.")
split = treelet_split_str(t)
if split is not None:
return split
split = treelet_split_nltk(t)
if split is not None:
return split
split = treelet_split_list(t)
if split is not None:
return split
return fallback(t)
def tree_cxr(t, i):
return tree_split(t)[i]
def tree_car(t):
"""What is the parent of a tree-like object `t`?
Try to adapt to various possibilities, including nltk.Tree."""
return tree_cxr(t, 0)
def tree_cdr(t):
"""What are the children of a tree-like object `t`?
Try to adapt to various possibilities, including nltk.Tree."""
return tree_cxr(t, 1)
def is_leaf(t):
return len(tree_cdr(t)) == 0
def tree_depth(t):
"""What is the max depth of t?"""
# n.b. car is always length 1 the way trees are currently parsed
subdepth = 0
for subtree in tree_cdr(t):
subdepth = max(subdepth, tree_depth(subtree))
return subdepth + 1
def leaf_iter(t):
parent, children = tree_split(t)
if len(children) == 0:
yield parent
for c in children:
yield from leaf_iter(c)
def common_parent(path1, path2):
for i in range(min(len(path1), len(path2))):
if path1[i] != path2[i]:
return tuple(path1[0:i])
if len(path1) < len(path2):
return path1
else:
return path2
################
# Tree layout options
################
SERIF = "font-family: times, serif; font-weight:normal; font-style: normal;"
# n.b. Lucida Console is more like 1.5 average glyph width
MONO = "font-family: \"Lucida Console\", Monaco, monospace; font-weight:normal; font-style: normal;"
SANS = "font-family: Arial, Helvetica, sans-serif; font-weight:normal; font-style: normal;"
# either EVEN or NODES usually looks best with abstract trees; TEXT usually
# looks the best for trees with real node labels, and so it is the default.
class HorizSpacing(enum.Enum):
TEXT = 0 # Space daughter nodes proportional to label width
EVEN = 1 # Space daughter nodes evenly
NODES = 2 # Space daughter nodes based on number of leaf nodes
HorizOptions = HorizSpacing # backwards compatibility
class VertAlign(enum.Enum):
TOP = 0 # align nodes at the top of the level's height
CENTER = 1 # align nodes to the center of the level's height. Default.
BOTTOM = 2 # align nodes with the bottom of the level's height
FULL = 3 # all nodes take up the full level height. Currently, this aligns
# text to the top, maybe would be better if centered?
def px(n):
return "%gpx" % n
def em(n, options=None):
if options is None or options.relative_units:
return "%gem" % n
else:
return px(options.em_to_px(n))
def perc(n):
return "%g%%" % n
crisp_perpendiculars = True
class TreeOptions(object):
def __init__(self, horiz_spacing=HorizSpacing.TEXT,
vert_align=VertAlign.CENTER,
leaf_padding=2,
distance_to_daughter=2,
debug=False,
leaf_nodes_align=False,
global_font_style=SERIF,
average_glyph_width=2.0,
descend_direct=True,
relative_units=True,
font_size = 16):
self.horiz_spacing = horiz_spacing
self.vert_align = vert_align
self.leaf_padding = leaf_padding
self.distance_to_daughter = distance_to_daughter
self.debug = debug
self.leaf_nodes_align = leaf_nodes_align
self.global_font_style = global_font_style
# 2.0 default value is a heuristic -- roughly, 2 chars per em
self.average_glyph_width = average_glyph_width
# for multi-level descents, do we just draw a direct (usually shraply
# angled) line, or do we draw an angled line one level, and a straight
# line for the rest? Node position is unaffected.
self.descend_direct = descend_direct
# not technically an option, but convenient to store here for now...
self.max_depth = 0
self.relative_units = relative_units
self.font_size = font_size
def style_str(self):
return self.global_font_style + " font-size: " + px(self.font_size) + ";"
def label_width(self, label):
return (len(str(label)) + self.leaf_padding) / self.average_glyph_width
def tree_height(self, t):
"""Calculate tree height, in ems. Takes into account multi-line leaf
nodes."""
# TODO: generalize to multi-line nodes of all kinds.
parent, children = tree_split(t)
if len(children) == 0:
return len(parent.split("\n"))
subheight = 0
for subtree in children:
subheight = max(subheight, self.tree_height(subtree))
return subheight + self.distance_to_daughter + 1
def em_to_px(self, n):
return n * self.font_size
def leaf_nodecount(t, options=None):
"""How many nodes wide are all the leafs? Will add padding."""
if options is None:
options=TreeOptions()
parent, children = tree_split(t)
if len(children) == 0:
return 1 + options.leaf_padding
subwidth = 0
for subtree in children:
subwidth += leaf_nodecount(subtree, options)
return subwidth
################
# Tree layout and SVG generation
################
class NodePos(object):
def __init__(self, svg, x, y, width, height, depth, options=None):
self.x = x
self.y = y
self.width = max(width, 1) # avoid divide by 0 errors
self.inner_width = width
self.height = height
self.inner_height = height
self.depth = depth
self.svg = svg
self.text = svg
self.options = options
self.clear_edge_styles()
def set_edge_style(self, daughter, style):
self.edge_styles[daughter] = style
def get_edge_style(self, daughter):
return self.edge_styles.get(daughter, None)
def has_edge_style(self, daughter):
return daughter in self.edge_styles
def clear_edge_styles(self):
self.edge_styles = dict() # no info about surrounding tree structure...
def width(self):
return self.width
def em_height(self):
return self.height
def get_svg(self):
# TODO: generalize this / make it less hacky
self.svg["y"] = em(self.y, self.options)
return self.svg
def __repr__(self):
return self.text
@classmethod
def from_label(cls, label, depth, options):
y = 1
svg_parent = svgwrite.container.SVG(x=0, y=0, width="100%")
if len(label) == 0:
return NodePos(svg_parent, 50, 0, options.label_width(""), 0, depth, options)
for line in label.split("\n"):
svg_parent.add(svgwrite.text.Text(line, insert=("50%", em(y, options)),
text_anchor="middle"))
y += 1
width = max([options.label_width(line) for line in label.split("\n")])
result = NodePos(svg_parent, 50, 0, width, y-1, depth, options)
result.text = label
return result
class EdgeStyle(object):
def __init__(self, path=None, stroke="black", stroke_width=None):
if path:
path = tuple(path)
self.path = path
self.stroke = stroke
self.stroke_width = stroke_width
def __hash__(self):
if (self.path is not None):
return hash(self.path)
else:
return object.hash(self)
def svg_opts(self):
opts = dict({"stroke": self.stroke})
if self.stroke_width:
opts["stroke_width"] = self.stroke_width
return opts
def draw(self, svg_parent, tree_layout, parent, child):
line_start = parent.y + parent.height
if parent.height > 0:
line_start += 0.2 # extra space for descenders
box_y = tree_layout.y_distance(parent.depth, child.depth)
y_target = em(box_y + child.y, tree_layout.options)
x_target = perc(child.x + child.width / 2)
svg_parent.add(svgwrite.shapes.Line(
start=("50%", em(line_start, tree_layout.options)),
end=(x_target, y_target),
**self.svg_opts()))
class IndirectDescent(EdgeStyle):
def draw(self, svg_parent, tree_layout, parent, child):
from svgwrite.shapes import Line
if child.depth > parent.depth + 1:
line_start = parent.y + parent.height
if parent.height > 0:
line_start += 0.2 # extra space for descenders
box_y = tree_layout.y_distance(parent.depth, child.depth)
y_target = em(box_y + child.y, tree_layout.options)
x_target = perc(child.x + child.width / 2)
# we are skipping level(s). Find the y position that an empty
# node on the next level would have.
intermediate_y = em(tree_layout.label_y_dodge(level=parent.depth+1,
height=0)[0]
+ tree_layout.y_distance(parent.depth, parent.depth+1),
tree_layout.options)
# TODO: do as Path?
svg_parent.add(Line(start=("50%", em(line_start, tree_layout.options)),
end=(x_target, intermediate_y),
**self.svg_opts()))
svg_parent.add(Line(start=(x_target, intermediate_y),
end=(x_target, y_target),
**self.svg_opts()))
else:
EdgeStyle.draw(self, svg_parent, tree_layout, parent, child)
class TriangleEdge(EdgeStyle):
def draw(self, svg_parent, tree_layout, parent, child):
line_start = parent.y + parent.height
if parent.height > 0:
line_start += 0.2 # extra space for descenders
box_y = tree_layout.y_distance(parent.depth, child.depth)
y_target = em(box_y + child.y, tree_layout.options)
# difference from the midpoint. 0.8 is a heuristic to account for leaf
# padding. Under normal font conditions, doesn't start to look off until
# ~60 character widths.
width_dodge = 0.8 * child.inner_width / 2.0
x_target_l = perc(child.x + child.width / 2 - width_dodge)
x_target_r = perc(child.x + child.width / 2 + width_dodge)
svg_parent.add(svgwrite.shapes.Line(start=("50%", em(line_start, tree_layout.options)),
end=(x_target_l, y_target),
**self.svg_opts()))
svg_parent.add(svgwrite.shapes.Line(start=("50%", em(line_start, tree_layout.options)),
end=(x_target_r, y_target),
**self.svg_opts()))
svg_parent.add(svgwrite.shapes.Line(start=(x_target_l, y_target),
end=(x_target_r, y_target),
**self.svg_opts()))
class TreeLayout(object):
"""Container class for storing a tree layout state."""
def __init__(self, t, options=None):
if options is None:
options = TreeOptions()
self.level_heights = dict()
self.level_ys = dict({0: 0})
self.max_width = 1
self.extra_y = 0.5
self.depth = 0
self.options = options
self.tree = t
| |
import numpy as np
import logging
import emcee
from uncertainties import unumpy, ufloat
import matplotlib
from scipy.interpolate import interp1d, interp2d
from scipy.integrate import trapz
import time
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import corner
import VmaxLumFunc as V
import seaborn as sns
sns.set_context("talk") # options include: talk, poster, paper
sns.set_style("ticks")
sns.set_style({"xtick.direction": "in","ytick.direction": "in",
"xtick.top":True, "ytick.right":True,
"xtick.major.size":12, "xtick.minor.size":4,
"ytick.major.size":12, "ytick.minor.size":4,
})
def TrueLumFunc(logL,alpha,logLstar,logphistar):
''' Calculate true luminosity function (Schechter form)
Input
-----
logL : float or numpy 1-D array
Value or array of log luminosities in erg/s
alpha: float
Schechther alpha parameter
logLstar: float
Schechther log(Lstar) parameter
logphistar: float
Schechther log(phistar) parameter
Returns
-------
Phi(logL,z) : Float or 1-D array (same size as logL and/or z)
Value or array giving luminosity function in Mpc^-3/dex
'''
return np.log(10.0) * 10**logphistar * 10**((logL-logLstar)*(alpha+1))*np.exp(-10**(logL-logLstar))
#
def Omega(logL,z,dLzfunc,Omega_0,Flim,alpha):
''' Calculate fractional area of the sky in which galaxies have fluxes large enough so that they can be detected
Input
-----
logL : float or numpy 1-D array
Value or array of log luminosities in erg/s
z : float or numpy 1-D array
Value or array of redshifts; logL and z cannot be different-sized arrays
dLzfunc : interp1d function
1-D interpolation function for luminosity distance in Mpc
Omega_0: float
Effective survey area in square arcseconds
Flim: float
50% completeness flux value
alpha: float
Completeness-related slope
Returns
-------
Omega(logL,z) : Float or 1-D array (same size as logL and/or z)
'''
L = 10**logL
return Omega_0/V.sqarcsec * V.p(L/(4.0*np.pi*(3.086e24*dLzfunc(z))**2),Flim,alpha)
class LumFuncMCMC:
def __init__(self,z,flux=None,flux_e=None,Flim=2.7e-17,alpha=-2.06,
line_name="OIII",line_plot_name=r'[OIII] $\lambda 5007$',
lum=None,lum_e=None,Omega_0=100.0,nbins=50,nboot=100,sch_al=-1.6,
sch_al_lims=[-3.0,1.0],Lstar=42.5,Lstar_lims=[40.0,45.0],phistar=-3.0,
phistar_lims=[-8.0,5.0],Lc=35.0,Lh=60.0,nwalkers=100,nsteps=1000,root=0.0):
''' Initialize LumFuncMCMC class
Init
----
z : numpy array (1 dim)
Array of redshifts for sample
flux : numpy array (1 dim) or None Object
Array of fluxes in 10^-17 erg/cm^2/s
flux_e : numpy array (1 dim) or None Object
Array of flux errors in 10^-17 erg/cm^2/s
Flim: float
50% completeness flux value
alpha: float
Completeness-related slope
line_name: string
Name of line or monochromatic luminosity element
line_plot_name: (raw) string
Fancier name of line or luminosity element for putting in plot labels
lum: numpy array (1 dim) or None Object
Array of log luminosities in erg/s
lum_e: numpy array (1 dim) or None Object
Array of log luminosity errors in erg/s
Omega_0: float
Effective survey area in square arcseconds
nbins: int
Number of bins for plotting luminosity function and conducting V_eff method
nboot: int
Number of iterations for bootstrap method for determining errors for V_eff method
sch_al: float
Schechther alpha parameter
sch_al_lims: two-element list
Minimum and maximum values allowed in Schechter alpha prior
Lstar: float
Schechther log(Lstar) parameter
Lstar_lims: two-element list
Minimum and maximum values allowed in Schechter log(Lstar) prior
phistar: float
Schechther log(phistar) parameter
phistar_lims: two-element list
Minimum and maximum values allowed in Schechter log(phistar) prior
Lc, Lh: floats
Minimum and maximum log luminosity, respectively, for likelihood integral
nwalkers : int
The number of walkers for emcee when fitting a model
nsteps : int
The number of steps each walker will make when fitting a model
root: Float
Minimum flux cutoff based on the completeness curve parameters and desired minimum completeness
'''
self.z = z
self.zmin, self.zmax = min(self.z), max(self.z)
self.root = root
self.setDLdVdz()
if flux is not None:
self.flux = 1.0e-17*flux
if flux_e is not None:
self.flux_e = 1.0e-17*flux_e
else:
self.lum, self.lum_e = lum, lum_e
self.getFluxes()
self.Flim = Flim
self.alpha = alpha
self.line_name = line_name
self.line_plot_name = line_plot_name
if lum is None:
self.getLumin()
self.Lc, self.Lh = Lc, Lh
self.Omega_0 = Omega_0
self.setOmegaLz()
self.nbins, self.nboot = nbins, nboot
self.sch_al, self.sch_al_lims = sch_al, sch_al_lims
self.Lstar, self.Lstar_lims = Lstar, Lstar_lims
self.phistar, self.phistar_lims = phistar, phistar_lims
self.nwalkers, self.nsteps = nwalkers, nsteps
self.setup_logging()
def setDLdVdz(self):
''' Create 1-D interpolated functions for luminosity distance (cm) and comoving volume differential (Mpc^3); also get function for minimum luminosity considered '''
self.DL = np.zeros(len(self.z))
zint = np.linspace(0.95*self.zmin,1.05*self.zmax,len(self.z))
dVdzarr, DLarr, minlum = np.zeros(len(zint)), np.zeros(len(zint)), np.zeros(len(zint))
for i,zi in enumerate(self.z):
self.DL[i] = V.dLz(zi) # In Mpc
DLarr[i] = V.dLz(zint[i])
dVdzarr[i] = V.dVdz(zint[i])
minlum[i] = np.log10(4.0*np.pi*(DLarr[i]*3.086e24)**2 * self.root)
self.DLf = interp1d(zint,DLarr)
self.dVdzf = interp1d(zint,dVdzarr)
self.minlumf = interp1d(zint,minlum)
def setOmegaLz(self):
''' Create a 2-D interpolated function for Omega (fraction of sources that can be observed) '''
logL = np.linspace(self.Lc,self.Lh,501)
zarr = np.linspace(self.zmin,self.zmax,501)
xx, yy = np.meshgrid(logL,zarr)
Omegaarr = Omega(xx,yy,self.DLf,self.Omega_0,self.Flim,self.alpha)
self.Omegaf = interp2d(logL,zarr,Omegaarr,kind='cubic')
def getLumin(self):
''' Set the sample log luminosities (and error if flux errors available)
based on given flux values and luminosity distance values
'''
if self.flux_e is not None:
ulum = unumpy.log10(4.0*np.pi*(self.DL*3.086e24)**2 * unumpy.uarray(self.flux,self.flux_e))
self.lum, self.lum_e = unumpy.nominal_values(ulum), unumpy.std_devs(ulum)
else:
self.lum = np.log10(4.0*np.pi*(self.DL*3.086e24)**2 * self.flux)
self.lum_e = None
def getFluxes(self):
''' Set sample fluxes based on luminosities if not available '''
if self.lum_e is not None:
ulum = 10**unumpy.uarray(self.lum,self.lum_e)
uflux = ulum/(4.0*np.pi*(self.DL*3.086e24)**2)
self.flux, self.flux_e = unumpy.nominal_values(uflux), unumpy.std_devs(uflux)
else:
self.flux = 10**self.lum/(4.0*np.pi*(self.DL*3.086e24)**2)
self.flux_e = None
def setup_logging(self):
'''Setup Logging for MCSED
Builds
-------
self.log : class
self.log.info() is for general print and self.log.error() is
for raise cases
'''
self.log = logging.getLogger('lumfuncmcmc')
if not len(self.log.handlers):
# Set format for logger
fmt = '[%(levelname)s - %(asctime)s] %(message)s'
fmt = logging.Formatter(fmt)
# Set level of logging
level = logging.INFO
# Set handler for logging
handler = logging.StreamHandler()
handler.setFormatter(fmt)
handler.setLevel(level)
# Build log with name, mcsed
self.log = logging.getLogger('lumfuncmcmc')
self.log.setLevel(logging.DEBUG)
self.log.addHandler(handler)
def set_parameters_from_list(self,input_list):
''' For a given set of model parameters, set the needed class variables.
Input
-----
theta : list
list of input parameters for Schechter Fit'''
self.sch_al = input_list[0]
self.Lstar = input_list[1]
self.phistar = input_list[2]
def lnprior(self):
''' Simple, uniform prior for input variables
Returns
-------
0.0 if all parameters are in bounds, -np.inf if any are out of bounds
'''
sch_al_flag = ((self.sch_al >= self.sch_al_lims[0]) *
(self.sch_al <= self.sch_al_lims[1]))
Lstar_flag = ((self.Lstar >= self.Lstar_lims[0]) *
(self.Lstar <= self.Lstar_lims[1]))
phistar_flag = ((self.phistar >= self.phistar_lims[0]) *
(self.phistar <= self.phistar_lims[1]))
flag = sch_al_flag*Lstar_flag*phistar_flag
if not flag:
return -np.inf
else:
return 0.0
def lnlike(self):
''' Calculate the log likelihood and return the value and stellar mass
of the model as well as other derived parameters
Returns
-------
log likelihood (float)
The log likelihood includes a ln term and an integral term (based on Poisson statistics). '''
lnpart = sum(np.log(TrueLumFunc(self.lum,self.sch_al,self.Lstar,self.phistar)))
# logL = np.linspace(self.Lc,self.Lh,101)
zarr = np.linspace(self.zmin,self.zmax,101)
dz = zarr[1]-zarr[0]
zmid = np.linspace(self.zmin+dz/2.0,self.zmax-dz/2.0,len(zarr)-1)
fullint = 0.0
for i, zi in enumerate(zmid):
logL = np.linspace(max(min(self.lum),self.minlumf(zi)),self.Lstar+1.75,101)
integ = TrueLumFunc(logL,self.sch_al,self.Lstar,self.phistar)*self.dVdzf(zi)*self.Omegaf(logL,zi)
fullint += trapz(integ,logL)*dz
return lnpart - fullint
def lnprob(self, theta):
''' Calculate the log probabilty
Returns
-------
log prior + log likelihood, (float)
The log probability is just the sum of the logs of the prior and likelihood. '''
self.set_parameters_from_list(theta)
lp = self.lnprior()
if np.isfinite(lp):
lnl = self.lnlike()
return lnl+lp
else:
return -np.inf
def get_init_walker_values(self, num=None):
''' Before running emcee, this function generates starting points
for each walker in the MCMC process.
Returns
-------
pos : np.array (2 dim)
Two dimensional array with Nwalker x Ndim values
'''
theta = [self.sch_al, self.Lstar, self.phistar]
theta_lims = np.vstack((self.sch_al_lims,self.Lstar_lims,self.phistar_lims))
if num is None:
num = self.nwalkers
pos = (np.random.rand(num)[:, np.newaxis] *
(theta_lims[:, 1]-theta_lims[:, 0]) + theta_lims[:, 0])
return pos
def get_param_names(self):
''' Grab the names of the parameters for plotting
Returns
-------
names : list
list of all parameter names
'''
return [r'$\alpha$',r'$\log L_*$',r'$\log \phi_*$']
def get_params(self):
''' Grab the the parameters in each class
Returns
-------
vals : list
list of all parameter values
'''
vals = [self.sch_al,self.Lstar,self.phistar]
self.nfreeparams = len(vals)
return vals
def fit_model(self):
''' Using emcee to find parameter estimations for given set of
data measurements and errors
'''
self.log.info('Fitting Schechter model to true luminosity function using emcee')
pos = self.get_init_walker_values()
ndim = pos.shape[1]
start = time.time()
sampler = emcee.EnsembleSampler(self.nwalkers, ndim, self.lnprob)
# Do real run
sampler.run_mcmc(pos, self.nsteps, rstate0=np.random.get_state())
end = time.time()
elapsed = end - start
self.log.info("Total time taken: %0.2f s" % elapsed)
self.log.info("Time taken per step per walker: %0.2f ms" %
(elapsed / (self.nsteps) * 1000. /
self.nwalkers))
# Calculate how long the run should last
tau = np.max(sampler.acor)
burnin_step = int(tau*3)
self.log.info("Mean acceptance fraction: %0.2f" %
| |
<reponame>ngoylufo/casino-simulator<filename>casino_simulator/roulette/gameObjects.py<gh_stars>0
import random
from ..gameObjects import (Outcome, OutcomeFactory, Bet, Table, Player, Game,
Simulator)
from ..exceptions import InvalidObjectError
class Bin(object):
"""A numbered :class:'Bin' in a roulette wheel containing a number of
associated :class:'Outcome's.
Essentially a :class:'Bin' is a collection of :class:'Outcome's associated
with it. The number of :class:'Outcome's a :class:'Bin' holds depends on
the number of bet combinations that can be made with the :class:'Bin's
number.
Creating an :class:'Bin' object.
>>> outcomes = Outcome('24', 35), Outcome('Split 24-25', 17)
>>> Bin(35, *outcomes)
<Bin 35>
Printing this :class:'Bin' returns something like this.
Bin(35, { <Outcome '24' 35:1>, <Outcome 'Split 24-25' 17:1> })
"""
def __init__(self, number, *outcomes):
"""Initialize an instance of a :class:'Bin' with a given number and list
of :class:'Outcome's.
Initializing a Bin object.
>>> outcomes = Outcome('24', 35), Outcome('Split 24-25', 17)
>>> bin_ = Bin(35, *outcomes)
:param number: The number of the :class:'Bin'.
:param outcomes: :class:'Outcome's to initialize the :class:'Bin' with.
"""
if not isinstance(number, int):
raise InvalidObjectError
self.number, self.outcomes = number, frozenset(outcomes)
def add(self, outcome):
"""Adds an individual :class:'Outcome' to the :class:'Bin'.
Adding an Outcome to a Bin.
>>> bin_ = Bin(2)
>>> bin_.add(Outcome('2', 35))
>>> print(bin_)
Bin(2, {<Outcome '2' 35:1>})
"""
if not isinstance(outcome, Outcome):
raise InvalidObjectError
self.outcomes |= set([outcome])
def __repr__(self):
"""The type representation of a :class:'Bin' object.
Create Bins object and get their representation.
>>> outcomes = Outcome('00', 35), Outcome('Split 23-24', 17)
>>> Bin(37, outcomes[0]), Bin(23, outcomes[1])
(<Bin 00>, <Bin 23>)
:return string: Type representation of the :class:'Outcome'.
"""
return "<Bin %s>" % ("00" if self.number == 37 else str(self.number))
def __str__(self):
"""The string representation of a :class:'Bin' object.
Create Bins object and get their representation.
>>> outcomes = Outcome('00', 35), Outcome('00-0-1-2-3', 6)
>>> bin_ = Bin(37, *outcomes)
Printing this :class:'Bin' returns something like this.
Bin(37, {<Outcome '00' 35:1>, <Outcome '00-0-1-2-3' 6:1>})
:return string: String representation of the :class:'Bin'.
"""
outcomes = ', '.join([repr(oc) for oc in self.outcomes])
return "Bin(%d, {%s})" % (self.number, outcomes)
class Wheel(object):
"""A :class:'Wheel' is collection of 38 numbered :class:'Bin's, who
themselves are a collection of :class:'Outcome's. The Wheel uses a random
number generator to select the wining :class:'Bin's.
Creating a :class:'Wheel'
>>> wheel = Wheel()
Creating a :class:'Wheel' with a custome number generator
>>> rng = NonRandom() # Custom random number generator
>>> wheel = Wheel(rng)
"""
bins = None
builder = None
outcomes = set()
def __init__(self, rng=None):
"""Initialize a Wheel with 38 Bins and a random number generator."""
self.rng = random.Random() if rng is None else rng
self.builder = BinBuilder(self)
self.build_components()
def build_components(self):
self.builder.build_bins()
def add_outcome(self, number, outcome):
"""Adds the given Outcome to the Bin with the given number.
:param bin: The Bin (number) to add the Outcome to.
:param outcome: The Outcome to add.
"""
if not (isinstance(number, int) and 0 <= number <= 37):
return NotImplemented
self.bins[number].add(outcome)
self.outcomes.add(outcome)
def get_outcome(self, name):
"""Returns the specified outcome.
:param name: The Outcome to retrieve.
:return Outcome:
"""
for oc in self.outcomes:
if name == oc.name:
return oc
else:
return False
def get_random_outcome(self):
"""Returns a random outcome.
:return Outcome:
"""
return random.choice(list(self.outcomes))
def next(self):
"""Generates a random number between 0 and 37, and returns the
randomly selected Bin."""
choice = self.rng.choice([num for num in range(38)])
return self.bins[choice]
def get(self, number):
"""Returns the specified Bin from the internal collection."""
if isinstance(number, int) and 0 <= number <= 37:
return self.bins[number]
class BinBuilder(object):
"""Builds bins with outcomes for a wheel"""
wheel = None
fact = None
def __init__(self, wheel=None):
""""""
if wheel is not None:
self.set_wheel(wheel)
self.fact = OutcomeFactory()
def set_wheel(self, wheel):
if not isinstance(wheel, Wheel):
raise InvalidObjectError
self.wheel = wheel
def build_bins(self, wheel=None):
if wheel is not None:
self.set_wheel(wheel)
self.wheel.bins = tuple([Bin(num) for num in range(38)])
from inspect import getmembers
members = getmembers(self)
for name, method in members:
if name.startswith("generate"):
method()
def generate_zero_bets(self):
"""Generates zero bet Outcomes"""
oc = self.fact.make('00-0-1-2-3', 6)
for n in [37, 0, 1, 2, 3]:
self.wheel.add_outcome(n, oc)
def generate_straight_bets(self):
"""Generates straight bet Outcomes"""
self.wheel.add_outcome(0, self.fact.make("0", 35))
for n in range(1, 37):
oc = self.fact.make(f"{n}", (35))
self.wheel.add_outcome(n, oc)
self.wheel.add_outcome(37, self.fact.make("00", (35)))
def generate_left_right_split_bets(self):
"""Generates left right split bet Outcomes"""
for c in [1, 2]:
for r in range(12):
n = (3 * r) + c
oc = self.fact.make(f'Split {n}-{n+1}', 17)
self.wheel.add_outcome(n, oc)
self.wheel.add_outcome(n + 1, oc)
def generate_up_down_split_bets(self):
"""Generates up down split bet Outcomes"""
for n in range(1, 34):
oc = self.fact.make(f'Split {n}-{n+3}', (17))
self.wheel.add_outcome(n, oc)
self.wheel.add_outcome(n + 3, oc)
def generate_street_bets(self):
"""Generates street bet Outcomes"""
for r in range(12):
n = (3 * r) + 1
oc = self.fact.make(f"Street {n}-{n+1}-{n+2}", (11))
for i in range(3):
self.wheel.add_outcome(n + i, oc)
def generate_corner_bets(self):
"""Generates corner bet Outcomes"""
for col in (1, 2):
for r in range(11):
n = (3 * r) + col
oc = self.fact.make(f"Corner {n}-{n+1}-{n+3}-{n+4}", (8))
for i in [0, 1, 3, 4]:
self.wheel.add_outcome(n + i, oc)
def generate_line_bets(self):
"""Generates line bet Outcomes"""
for r in range(11):
n = (3 * r) + 1
oc = self.fact.make(f"Line {n}-{n+1}-{n+2}-{n+3}-{n+4}-{n+5}", (5))
for i in range(6):
self.wheel.add_outcome(n + i, oc)
def generate_dozen_bets(self):
"""Generates dozen bet Outcomes"""
for d in range(3):
oc = self.fact.make(f'Dozen {d+1}', (2))
for n in range(12):
number = (12 * d) + n + 1
self.wheel.add_outcome(number, oc)
def generate_column_bets(self):
"""Generates column bet Outcomes"""
for c in range(3):
oc = self.fact.make(f'Column {c+1}', (2))
for r in range(12):
number = (3 * r) + c + 1
self.wheel.add_outcome(number, oc)
def generate_even_money_bets(self):
"""Generates even money bet Outcomes"""
red = self.fact.make('Red', 1)
black = self.fact.make('Black', 1)
even = self.fact.make('Even', 1)
odd = self.fact.make('Odd', 1)
high = self.fact.make('High', 1)
low = self.fact.make('Low', 1)
for n in range(1, 37):
if n < 19:
self.wheel.add_outcome(n, low)
else:
self.wheel.add_outcome(n, high)
if n % 2:
self.wheel.add_outcome(n, odd)
else:
self.wheel.add_outcome(n, even)
if n in [1, 3, 5, 7, 9, 12, 14, 16, 18,
19, 21, 23, 25, 30, 32, 34, 36]:
self.wheel.add_outcome(n, red)
else:
self.wheel.add_outcome(n, black)
class RouletteTable(Table):
"""The :class:'Table' in a game of roulette consisting of a :class:'Wheel'
and :class:'Bet's."""
def __init__(self, _min=10, _max=500):
"""Initialize a new :class:'Table' with a :class:'Wheel' and limits."""
super(RouletteTable, self).__init__()
self.min, self.max, self.bets = (_min, _max, list())
def build_components(self):
self.wheel = Wheel()
def is_valid(self, bet):
"""Determines whether the bet placed on the :class:'Table' is valid.
A :class:'Bet' is valid if the sum of the current :class:'Bet' and all
other :class:'Bet's placed on the table is greater than or equal to the
:class:'Table' minimum but less than or equal to the maximum.
"""
if not(self.max >= bet.amount >= self.min):
return False
total = 0
for current in self:
total += current.amount
return self.max >= (total + bet.amount)
class RoulettePlayer(Player):
rounds = 250
def set_rounds(self, rounds):
self.rounds = rounds
def can_continue(self):
bet = self.make_bet()
return self.rounds > 0 and self.can_bet(bet)
def place_bet(self):
self.rounds -= 1
bet = self.make_bet()
if not (self.can_continue() and self.can_bet(bet)):
return
self.stake -= bet.amount
self.table.place_bet(bet)
class Passenger57(RoulettePlayer):
"""A :class:'Player' who always bets on black."""
def __init__(self, table):
"""Instantiates a new :class:'Passenger57' :class:'Player'."""
super(Passenger57, self).__init__(table)
self.black = self.table.wheel.get_outcome('Black')
def make_bet(self):
if not (self.table.min > self.stake):
amount = random.randint(self.table.min, self.stake)
else:
amount = 50
return Bet(amount, self.black)
def win(self, bet):
super(Passenger57, self).win(bet)
print(f"Player won: ${bet.win_amount()}.00 from a ${bet.amount}.00 bet.")
def lose(self, bet):
super(Passenger57, self).lose(bet)
print(f'Player lost: ${bet.lose_amount()}.00')
class Martingale(RoulettePlayer):
loss_count = 0
def __init__(self, table, wager=10):
super(Martingale, self).__init__(table)
self.wager = wager
# define a random wager? table.min <==> table.max * 0.1
def make_bet(self):
amount = self.wager * (2**self.loss_count)
outcome = self.table.wheel.get_random_outcome()
return Bet(amount, outcome)
def win(self, bet):
super(Martingale, self).win(bet)
self.loss_count = 0
# print(self.rounds, self.loss_count, self.stake)
def lose(self, bet):
self.loss_count += 1
# print(self.rounds, self.loss_count, self.stake)
class RouletteGame(Game):
"""The game"""
table = None
def __init__(self, configurations):
super(RouletteGame, self).__init__(configurations)
def build_components(self, configurations):
limits = configurations['table_limits']
self.table = RouletteTable(limits["min"], limits["max"])
def cycle(self, player):
if not isinstance(player, RoulettePlayer):
raise InvalidObjectError
player.place_bet()
win_bin = self.table.wheel.next()
for bet in self.table:
if bet.outcome in win_bin.outcomes:
player.win(bet)
else:
player.lose(bet)
self.table.clear()
class RouletteSimulator(Simulator):
"""A :class:'RouletteSimulator' responsible for simulating game sessions."""
init_duration = 250
init_stake | |
subtrees must also be binary search trees.
Example 1:
2
/ \
1 3
Input: root = [2,1,3]
Output: true
Example 2:
Input: root = [5,1,4,null,null,3,6]
Output: false
Explanation: The root node's value is 5 but its right child's value is 4.
"""
def isValidBST(self, root: Optional[TreeNode]) -> bool:
def fn(root, floor=float("-inf"), ceiling=float("inf")):
if not root:
return True
if root.val <= floor or root.val >= ceiling:
return False
# in the left branch, root is the new ceiling;
# contrarily root is the new floor in right branch
return fn(root.left, floor, root.val) and fn(root.right, root.val, ceiling)
return fn(root)
def isValidBST_(self, root: Optional[TreeNode]) -> bool:
def fn(node, lo=-inf, hi=inf):
if not node:
return True
return (
fn(node.left, lo, node.val)
and lo < node.val < hi
and fn(node.right, node.val, hi)
)
return fn(root)
r"""
# - Kth Smallest Element in a BST -
# https://leetcode.com/problems/kth-smallest-element-in-a-bst/
Given the root of a binary search tree, and an integer k, return the kth
smallest value (1-indexed) of all the values of the nodes in the tree.
Example 1:
3
/ \
1 4
\
2
Input: root = [3,1,4,null,2], k = 1
Output: 1
Example 2:
5
/ \
3 6
/ \
2 4
/
1
Input: root = [5,3,6,2,4,null,null,1], k = 3
Output: 3
Follow up: If the BST is modified often (i.e., we can do insert and delete
operations) and you need to find the kth smallest frequently, how would
you optimize?
"""
def kthSmallest(self, root: Optional[TreeNode], k: int) -> int:
# O(n) time, O(n) space
def inorder(r):
return inorder(r.left) + [r.val] + inorder(r.right) if r else []
return inorder(root)[k - 1] # nth smallest is nth element of inorder traverse 🤯
def kthSmallest_(self, root: Optional[TreeNode], k: int) -> int:
# O(H + k) time, O(H) space
# NOTE O(log N + k) time for balanced tree, O(N + k) for unbalanced on left
node = root
stack: List[TreeNode] = []
while node or stack:
if node:
stack.append(node)
node = node.left
continue
node = stack.pop()
if (k := k - 1) == 0:
return node.val
node = node.right
raise Exception
r"""
# - Lowest Common Ancestor of BST -
# https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-search-tree/
Given a binary search tree (BST), find the lowest common ancestor (LCA) of two
given nodes in the BST.
According to the definition of LCA on Wikipedia: “The lowest common ancestor is
defined between two nodes p and q as the lowest node in T that has both p and q as
descendants (where we allow a node to be a descendant of itself).”
Example 1:
6
/ \
2 8
/ \ / \
0 4 7 9
/ \
3 5
Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 8
Output: 6
Explanation: The LCA of nodes 2 and 8 is 6.
Example 2:
6
/ \
2 8
/ \ / \
0 4 7 9
/ \
3 5
Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 4
Output: 2
Explanation: The LCA of nodes 2 and 4 is 2, since a node can be a descendant of
itself according to the LCA definition.
Example 3:
Input: root = [2,1], p = 2, q = 1
Output: 2
"""
def lowestCommonAncestor(
self, root: TreeNode, p: TreeNode, q: TreeNode
) -> TreeNode: # type: ignore
# O(n) time, O(1) space
node = root
while node:
if p.val > node.val and q.val > node.val:
node = node.right # type:ignore
elif p.val < node.val and q.val < node.val:
node = node.left # type:ignore
else:
return node
raise Exception
"""
# - Implement Trie (Prefix Tree) -
# https://leetcode.com/problems/implement-trie-prefix-tree/
A trie (pronounced as "try") or prefix tree is a tree data structure used to
efficiently store and retrieve keys in a dataset of strings. There are various
applications of this data structure, such as autocomplete and spellchecker.
Implement the Trie class:
- Trie() Initializes the trie object.
- void insert(String word) Inserts the string word into the trie.
- boolean search(String word) Returns true if the string word is in the trie
(i.e., was inserted before), and false otherwise.
- boolean startsWith(String prefix) Returns true if there is a previously
inserted string word that has the prefix prefix, and false otherwise.
Example 1:
Input
["Trie", "insert", "search", "search", "startsWith", "insert", "search"]
[[], ["apple"], ["apple"], ["app"], ["app"], ["app"], ["app"]]
Output
[null, null, true, false, true, null, true]
Explanation
Trie trie = new Trie();
trie.insert("apple");
trie.search("apple"); // return True
trie.search("app"); // return False
trie.startsWith("app"); // return True
trie.insert("app");
trie.search("app"); // return True
"""
class Trie:
# O(m) time where m is string len, O(1) space
def __init__(self):
self.trie = TrieNode()
def insert(self, word):
reduce(lambda d, k: d[k], word, self.trie)["end"] = True # type: ignore
def search(self, word):
return reduce(
lambda d, k: d[k] if k in d else TrieNode(), word, self.trie
).get("end", False)
def startsWith(self, word):
return bool(
reduce(
lambda d, k: d[k] if k in d else TrieNode(), word, self.trie
).keys()
)
class Trie_:
def __init__(self):
self.root = TrieNode_()
def insert(self, word: str) -> None:
node = self.root
for i in word:
if i not in node.children:
node.children[i] = TrieNode_()
node = node.children[i]
node.word = True
def search(self, word: str) -> bool:
node = self.root
for i in word:
if i not in node.children:
return False
node = node.children[i]
return node.word
def startsWith(self, prefix: str) -> bool:
node = self.root
for i in prefix:
if i not in node.children:
return False
node = node.children[i]
return True
class Trie__:
# NOTE all inputs assumed to consist of lowercase letters a-z.
# NOTE all inputs are guaranteed to be non-empty strings.
def __init__(self):
# TODO recursive type pyright
# Node = Dict[str, "Node"]
self.root = {}
def insert(self, word: str) -> None:
node = self.root
for char in word:
node = node.setdefault(char, {})
node["$"] = word
def search(self, word: str) -> bool:
node = self.root
for char in word:
if char not in node:
return False
node = node[char]
return bool(node.get("$")) # TODO this passes without the bool???????
def startsWith(self, prefix: str) -> bool:
node = self.root
for char in prefix:
if char not in node:
return False
node = node[char]
return True
"""
# - Add and Search Word -
# https://leetcode.com/problems/add-and-search-word-data-structure-design/
Design a data structure that supports adding new words and finding if a string
matches any previously added string.
Implement the WordDictionary class:
WordDictionary() Initializes the object.
void addWord(word) Adds word to the data structure, it can be matched later.
bool search(word) Returns true if there is any string in the data structure that
matches word or false otherwise. word may contain dots '.' where dots can be
matched with any letter.
Example:
Input
["WordDictionary","addWord","addWord","addWord","search","search","search","search"]
[[],["bad"],["dad"],["mad"],["pad"],["bad"],[".ad"],["b.."]]
Output
[null,null,null,null,false,true,true,true]
Explanation
WordDictionary wordDictionary = new WordDictionary();
wordDictionary.addWord("bad");
wordDictionary.addWord("dad");
wordDictionary.addWord("mad");
wordDictionary.search("pad"); // return False
wordDictionary.search("bad"); // return True
wordDictionary.search(".ad"); // return True
wordDictionary.search("b.."); // return True
"""
class WordDictionary:
# NOTE slow timeouts sometimes
def __init__(self):
self.trie = {}
def addWord(self, word: str) -> None:
node = self.trie
for ch in word:
node = node.setdefault(ch, {})
node["$"] = word
def search(self, word: str) -> bool:
def fn(node, i):
if not node:
return False
if i == len(word):
return node.get("$")
if word[i] == ".":
return any(fn(node[k], i + 1) for k in node if k != "$")
return fn(node.get(word[i]), i + 1)
return fn(self.trie, 0)
"""
# - Word Search II -
# https://leetcode.com/problems/word-search-ii/
Given an m x n board of characters and a list of strings words, return all
words on the board.
Each word must be constructed from letters of sequentially adjacent cells,
where adjacent cells are horizontally or vertically neighboring. The same
letter cell may not be used more than once in a word.
Example 1:
Input: board = [["o","a","a","n"],
| |
import os
import numpy as np
import netCDF4
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import matplotlib
import matplotlib.collections as mcollections
import matplotlib.patches as patches
from matplotlib.path import Path
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
from matplotlib._png import read_png
def add_logo(imagepath, ax, position, zoom, zorder):
"""Add an image on the figure
:param imagepath: path to the image to add
:param ax: axes object
:param position: relative position on the map
:param zoom: zoom level
:param zorder:
:return:
"""
logo2plot = read_png(imagepath)
imagebox = OffsetImage(logo2plot, zoom=zoom)
# coordinates to position this image
ab = AnnotationBbox(imagebox, position,
xybox=(0., 0.),
xycoords='data',
pad=0.0,
boxcoords="offset points")
ab.zorder = zorder
ax.add_artist(ab)
def create_rect_patch(coordinates, m, **kwargs):
"""
Create a rectangular patch to add on the map
:param coordinates:
:param m: Basemap object
:return: patch
"""
xr1, yr1 = m(coordinates[0], coordinates[2])
xr2, yr2 = m(coordinates[0], coordinates[3])
xr3, yr3 = m(coordinates[1], coordinates[3])
xr4, yr4 = m(coordinates[1], coordinates[2])
verts = [(xr1, yr1), (xr2, yr2), (xr3, yr3), (xr4, yr4), (xr1, yr1), ]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY, ]
path = Path(verts, codes)
patch = patches.PathPatch(path, **kwargs)
return patch
def extract_coastline(coordinates, filename, res='i'):
"""
Extract the coastline in a region delimited by a bounding box
and save the result in a text file
:param coordinates: coordinates delimiting a bounding box (lonmin, lonmax, latmin, latmax)
:param filename: name of the file where the coastline will be saved
:param res: resolution for the extraction
:return:
"""
x, y = gshhs.get_coastline(xlim=[coordinates[0], coordinates[1]],
ylim=[coordinates[2], coordinates[3]],
res=res)
# Save the coastline
np.savetxt(filename, np.ma.vstack((x, y)).T)
def load_coast(coastfile, valex=-999.):
"""
Read coastline coordinates from an existing file
:param coastfile: name of the file containing the coastline
:param valex: exclusion value
:return: lon, lat
"""
lon, lat = np.loadtxt(coastfile, usecols=(0, 1), unpack=True)
lon[lon == valex] = np.nan
lat[lat == valex] = np.nan
return lon, lat
def load_coast_gshhs(coastfile, coordinates, valex=-999.):
"""
Read coastline coordinates from an existing file in a region delimited
by a bounding box
:param coastfile: name of the file containing the coastline
:param coordinates: coordinates delimiting a bounding box (lonmin, lonmax, latmin, latmax)
:param valex: exclusion value
:return: lon, lat
"""
lon, lat = np.loadtxt(coastfile, usecols=(0, 1), unpack=True)
goodcoord = np.where((lon >= coordinates[0]) & (lon <= coordinates[1]) &
(lat >= coordinates[2]) & (lat <= coordinates[3]))[0]
goodcoord2 = np.where(np.logical_or((lon == valex), (lat == valex)))[0]
goodcoord = np.union1d(goodcoord, goodcoord2)
lon, lat = lon[goodcoord], lat[goodcoord]
lon[lon == valex] = np.nan
lat[lat == valex] = np.nan
return lon, lat
def alborex_load_bathy(bathyfile, coordinates):
"""
Load bathymetry from a netCDF file in a select region
delimited by a list of coordinates
:param bathyfile: name of the netCDF file
:param coordinates: coordinates delimiting a bounding box (lonmin, lonmax, latmin, latmax)
:return:
"""
with netCDF4.Dataset( bathyfile, 'r') as nc:
lon = nc.variables['longitude'][:]
lat = nc.variables['latitude'][:]
depth = nc.variables['depth'][:]
# subset
goodlon = np.where(np.logical_and((lon >= coordinates[0]),
(lon <= coordinates[1])))[0]
goodlat = np.where(np.logical_and((lat >= coordinates[2]),
(lat <= coordinates[3])))[0]
lon = lon[goodlon]
lat = lat[goodlat]
depth = depth[goodlat, :]
depth = depth[:, goodlon]
return lon, lat, depth
def load_altimetry(altimetryfile, coordinates):
"""
:param altimetryfile:
:param coordinates: coordinates delimiting a bounding box (lonmin, lonmax, latmin, latmax)
:return:
"""
with netCDF4.Dataset(altimetryfile, 'r') as nc:
lon = nc.variables['lon'][:] - 360.
lat = nc.variables['lat'][:]
u = np.squeeze(nc.variables['u'][:])
v = np.squeeze(nc.variables['v'][:])
# subset
goodlon = np.where(np.logical_and((lon >= coordinates[0]),
(lon <= coordinates[1])))[0]
goodlat = np.where(np.logical_and((lat >= coordinates[2]),
(lat <= coordinates[3])))[0]
lon = lon[goodlon]
lat = lat[goodlat]
u = u[goodlat, :]
u = u[:, goodlon]
v = v[goodlat, :]
v = v[:, goodlon]
return lon, lat, u, v
def load_sst(sstfile, coordinates):
"""Return the coordinates, the SST, the time and the satellite name,
given a data file and a list of coordinates delimiting a bounding box
:param sstfile: name of the netCDF4 file containing the data
:param coordinates: coordinates delimiting a bounding box (lonmin, lonmax, latmin, latmax)
:return:
"""
with netCDF4.Dataset(sstfile, 'r') as nc:
lon = nc.variables['lon'][:]
lat = nc.variables['lat'][:]
timesst = nc.variables['time'][:]
# subset
goodlon = np.where(np.logical_and((lon >= coordinates[0]),
(lon <= coordinates[1])))[0]
goodlat = np.where(np.logical_and((lat >= coordinates[2]),
(lat <= coordinates[3])))[0]
lon = lon[goodlon]
lat = lat[goodlat]
sst = np.squeeze(nc.variables['mcsst'][:, goodlat, goodlon])
mask = nc.variables['lsmask'][:, goodlat, goodlon].squeeze()
sst = np.ma.masked_where(mask == 1, sst)
timesst *= 60.
sat = nc.satellite
sensor = nc.sensor_name
sensorsat = sensor.upper() + ' on ' + sat.upper()
return lon, lat, sst, timesst, sensorsat
def load_sst_l2(filename):
"""
Load the SST from netCDF L2 file obtained from
https://oceancolor.gsfc.nasa.gov
:param filename: name of the netCDF file
:return: lon, lat, sst, sstflag, sstyear, sstday
"""
if os.path.exists(filename):
with netCDF4.Dataset(filename) as nc:
# Read platform
sat = nc.platform
# Read time information
# Assume all the measurements made the same day (and same year)
year = nc.groups['scan_line_attributes'].variables['year'][0]
day = nc.groups['scan_line_attributes'].variables['day'][0]
# Read coordinates
lon = nc.groups['navigation_data'].variables['longitude'][:]
lat = nc.groups['navigation_data'].variables['latitude'][:]
# Read geophysical variables
try:
sst = nc.groups['geophysical_data'].variables['sst'][:]
sstqual = nc.groups['geophysical_data'].variables['qual_sst'][:]
except KeyError:
sst = nc.groups['geophysical_data'].variables['sst4'][:]
sstqual = nc.groups['geophysical_data'].variables['qual_sst4'][:]
else:
lon, lat, sst, sstqual, year, day, sat = [], [], [], [], [], [], []
return lon, lat, sst, sstqual, year, day, sat
def load_sst_l2_old(sstfile):
"""
Load the SST from netCDF L2 file obtained from
https://oceancolor.gsfc.nasa.gov
:param sstfile: name of the netCDF file
:return: lon, lat, sst, sstflag, sstyear, sstday
"""
if 'SST4' in sstfile:
sstname = 'Geophysical_Data_sst4'
sstflagname = 'Geophysical_Data_qual_sst4'
else:
sstname = 'Geophysical_Data_sst'
sstflagname = 'Geophysical_Data_qual_sst'
with netCDF4.Dataset(sstfile, 'r') as nc:
lon = nc.variables['Navigation_Data_longitude'][:]
lat = nc.variables['Navigation_Data_latitude'][:]
sst = nc.variables[sstname][:] * 0.005
sstflag = nc.variables[sstflagname][:]
sst = np.ma.masked_where(sstflag > 1, sst)
sstyear = nc.Start_Year
sstday = nc.Start_Day
return lon, lat, sst, sstflag, sstyear, sstday
def plot_sst_leaflet(lon, lat, sst, figname, **kwargs):
m = Basemap(llcrnrlon=coordinates[0],
llcrnrlat=coordinates[2],
urcrnrlon=coordinates[1],
urcrnrlat=coordinates[3], resolution = 'l', epsg=3857)
llon, llat = m(lon, lat)
fig = plt.figure(frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
m.pcolormesh(llon, llat, sst, **kwargs)
ax.axis('off')
#ax.set_xlim(lon.min(), lon.max())
#ax.set_ylim(lat.min(), lat.max())
f1 = plt.gca()
f1.axes.get_xaxis().set_ticks([])
f1.axes.get_yaxis().set_ticks([])
plt.savefig(figname, transparent=True,
bbox_inches='tight', pad_inches=0)
plt.close()
def load_ctd(ctdfile):
"""
Load the coordinates (lon, lat, depth), the temperature and chlorophyll concentration
from the selected netCDF file
:param ctdfile: name of the netCDF file
:return: lon, lat, depth, temp, chloro
"""
with netCDF4.Dataset(ctdfile, 'r') as nc:
lon = nc.variables['LON'][:]
lat = nc.variables['LAT'][:]
depth = nc.variables['DEPTH'][:]
time = nc.variables['time'][:]
temp = nc.variables['WTR_TEM_01'][:]
chloro = nc.variables['CHLO'][:]
chloro = np.ma.masked_where(np.isnan(chloro), chloro)
return lon, lat, depth, time, temp, chloro
def load_glider_data(gliderfile, NN=1):
"""
Load the coordinates and the temperature from a glider file
:param gliderfile: name of the netCDF file
:param NN: sub-sampling factor (keep 1 out of NN measurements)
"""
with netCDF4.Dataset(gliderfile, 'r') as nc:
lon = nc.variables['longitude'][::NN]
lat = nc.variables['latitude'][::NN]
depth = nc.variables['depth'][::NN]
temperature = nc.variables['temperature'][::NN]
return lon, lat, depth, temperature
def load_glider_coord(gliderfile):
"""
Load the coordinates from a glider file
:param gliderfile: name of the glider netCDF file
:return: lon: longitude
:return: lat: latitude
:return: depth: depth
:return: time: time
"""
with netCDF4.Dataset(gliderfile, 'r') as nc:
lon = nc.variables['longitude'][:]
lat = nc.variables['latitude'][:]
depth = nc.variables['depth'][:]
time = nc.variables['time'][:]
return lon, lat, depth, time
def change_wall_prop(ax, coordinates, depths, angles):
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_xaxis.gridlines.set_linestyles(':')
ax.w_yaxis.gridlines.set_linestyles(':')
ax.w_zaxis.gridlines.set_linestyles(':')
ax.view_init(angles[0], angles[1])
ax.set_xlim(coordinates[0], coordinates[1])
ax.set_ylim(coordinates[2], coordinates[3])
ax.set_zlim(depths[0], depths[1])
ax.set_zlabel('Depth (m)')
ax.set_zticks(np.arange(depths[0], depths[1] + 10, depths[2]))
ax.set_zticklabels(range(int(-depths[0]), -int(depths[1]) - 10, -int(depths[2])))
def read_l2_wind(windfile, coordinates):
"""
Read the L2 wind from a netCDF file
given a list of coordinates delimiting a bounding box
:param windfile: netCDF file containing the data
:param coordinates: coordinates delimiting a bounding box (lonmin, lonmax, latmin, latmax)
:return: lon, lat, uwind, vwind, windtime
"""
# Open NetCDF file
with netCDF4.Dataset(windfile) as nc:
lon = nc.variables['lon'][:]
lat = nc.variables['lat'][:]
windspeed = nc.variables['wind_speed'][:]
winddirection = nc.variables['wind_dir'][:]
windtime = nc.variables['time'][:]
# Change longitudes
lon[lon > 180] -= 360.0
# Reduce dimensions
lon = lon.flatten()
lat = lat.flatten()
windspeed = windspeed.flatten()
winddirection = winddirection.flatten()
# Select sub-region and check if data inside the area of interest
goodlon = np.nonzero(np.logical_and(lon <= coordinates[1],
lon >= coordinates[0]))
goodlon = goodlon[0]
if goodlon.size != 0:
lat = lat[goodlon]
lon = lon[goodlon]
windspeed = windspeed[goodlon]
winddirection = -winddirection[goodlon] + 90.
goodlat = np.nonzero(np.logical_and(lat <= coordinates[3],
lat >= coordinates[2]))
goodlat = goodlat[0]
if | |
remote profiling of Solr instances
Returns:
hosts - A list of hosts for the SolrCloud cluster.
"""
hosts = _lookup_hosts(cluster, True)
numNodesPerHost = int(nodesPerHost)
totalNodes = numNodesPerHost * len(hosts)
_info('Setting up %d SolrCloud nodes on cluster: %s with hosts: %s' % (totalNodes, cluster, hosts))
cloud = _provider_api(cluster)
# setup/start zookeeper
zkHosts = []
if zk is None:
# just run 1 node on the first host
numZkNodes = int(zkn) if zkn is not None else 1
if numZkNodes > len(hosts):
_warn('Number of requested local ZooKeeper nodes %d exceeds number of available hosts! Using %d instead.' % (numZkNodes, len(hosts)))
zkHosts = _zk_ensemble(cluster, hosts[0:numZkNodes])
else:
zkHosts = _get_zk_hosts(cloud, zk)
if len(zkHosts) == 0:
_fatal('No ZooKeeper hosts found!')
zkHost = ','.join(zkHosts)
# chroot the znodes for this cluster
zkHost += ('/' + cluster)
_info('ZooKeeper connection string for cluster: '+zkHost)
instance_type = _get_instance_type(cloud, cluster)
if solrJavaMemOpts is None:
solrJavaMemOpts = _get_solr_java_memory_opts(instance_type, numNodesPerHost)
remoteSolrDir = _env(cluster, 'solr_tip')
remoteSolrJavaHome = _env(cluster, 'solr_java_home')
# make sure the solr-scale-tk shell scripts are up-to-date on the remote servers
exportCloudEnv = ('''#!/bin/bash
export SOLR_JAVA_HOME="%s"
if [ -z "$JAVA_HOME" ]; then
export JAVA_HOME=$SOLR_JAVA_HOME
fi
export SOLR_TOP="%s"
export ZK_HOST="%s"
export CLOUD_SCRIPTS_DIR="$SOLR_TOP/cloud84/scripts/cloud-scripts"
export SOLRCLOUD_CLUSTER="%s"
export NODES_PER_HOST=%d
export SOLR_JAVA_MEM="%s"
''' % (remoteSolrJavaHome, remoteSolrDir, zkHost, cluster, numNodesPerHost, solrJavaMemOpts))
is_private = _is_private_subnet(cluster)
# write the include file for the bin/solr script
solrInSh = _get_solr_in_sh(cluster, remoteSolrJavaHome, solrJavaMemOpts, zkHost, is_private, yjp_path=yjp_path)
solrInShPath = remoteSolrDir+'/bin/solr.in.sh'
sstkEnvScript = _env(cluster, 'SSTK_ENV')
sstkScript = _env(cluster, 'SSTK')
binSolrScript = remoteSolrDir + '/bin/solr'
cloudDir = _env(cluster, 'sstk_cloud_dir')
solrTip = _env(cluster, 'solr_tip')
solrVersion = os.path.basename(solrTip).split("-")[1]
for host in hosts:
with settings(host_string=host), hide('output', 'running'):
run('mkdir -p '+cloudDir+' || true')
run('rm -f ' + sstkEnvScript)
_fab_append(sstkEnvScript, exportCloudEnv)
run('chmod +x ' + sstkEnvScript)
put('./'+CTL_SCRIPT, cloudDir)
run('chmod +x ' + sstkScript)
run('rm -f '+solrInShPath)
_fab_append(solrInShPath, solrInSh)
#Bootstrap ZK
if StrictVersion(solrVersion) >= StrictVersion("6.4.2"):
with shell_env(JAVA_HOME=remoteSolrJavaHome), settings(host_string=hosts[0]), hide('output', 'running'):
_info(remoteSolrDir + '/bin/solr zk mkroot ' + cluster + ' -z ' + ','.join(zkHosts))
run(remoteSolrDir + '/bin/solr zk mkroot ' + cluster + ' -z ' + ','.join(zkHosts))
else:
with shell_env(JAVA_HOME=remoteSolrJavaHome), settings(host_string=hosts[0]), hide('output', 'running'):
_info(remoteSolrDir+'/server/scripts/cloud-scripts/zkcli.sh -zkhost '+zkHost+' -cmd bootstrap -solrhome '+remoteSolrDir+'/server/solr')
run(remoteSolrDir+'/server/scripts/cloud-scripts/zkcli.sh -zkhost '+zkHost+' -cmd bootstrap -solrhome '+remoteSolrDir+'/server/solr')
metaHost = None
# setup N Solr nodes per host using the script to do the actual starting
numStores = instanceStoresByType[instance_type]
if numStores is None:
numStores = 1
if numStores == 0:
# need to check if they mounted ebs vols
with settings(host_string=hosts[0]), hide('output', 'running', 'warnings'):
if _fab_exists('/vol3'):
numStores = 4
elif _fab_exists('/vol2'):
numStores = 3
elif _fab_exists('/vol1'):
numStores = 2
elif _fab_exists('/vol0'):
numStores = 1
if numStores > 0:
_status('Found %d mounted EBS volumes.' % numStores)
solrHostAndPorts = []
for host in hosts:
with settings(host_string=host), hide('output', 'running', 'warnings'):
#_integ_host_with_meta(cluster, host, metaHost)
for p in range(0, numNodesPerHost):
solrPort = str(84 + p)
solrHostAndPorts.append(host + ':89' + solrPort)
volIndex = p
if volIndex >= numStores:
volIndex = volIndex % numStores
remoteSetupCmd = '%s setup %s %d' % (sstkScript, solrPort, volIndex)
_status('Running setup on '+host+': ' + remoteSetupCmd)
run(remoteSetupCmd)
time.sleep(2)
for x in range(0, numNodesPerHost):
solrPortUniq = str(84 + x)
solrPort = '89' + solrPortUniq
solrDir = 'cloud'+solrPortUniq
remoteStartCmd = '%s start -cloud -p %s -d %s' % (binSolrScript, solrPort, solrDir)
_status('Running start on '+host+': ' + remoteStartCmd)
run(remoteSolrDir+'/bin/solr stop -p '+solrPort+' || true')
startScriptOutput = remoteSolrDir+'/cloud84/logs/solr-startup.out'
_runbg(remoteStartCmd, startScriptOutput)
time.sleep(2)
_info('Started Solr on port '+solrPort+' on '+host+'; check '+startScriptOutput+' if Solr is not running.')
# wait until the Solr servers report they are up
_status('Solr instances launched ... waiting up to %d seconds to see %d Solr servers come online.' % (180, totalNodes))
_wait_to_see_solr_up_on_hosts(solrHostAndPorts,180)
cloud.close()
return hosts
def is_solr_up(cluster):
"""
Quick check to see if Solr is responding to HTTP requests on all nodes in the cluster.
"""
hosts = _lookup_hosts(cluster)
numNodes = _num_solr_nodes_per_host(cluster)
solrHostAndPorts = []
for h in hosts:
for n in range(0,numNodes):
solrHostAndPorts.append(h + ':89' + str(84 + n))
_wait_to_see_solr_up_on_hosts(solrHostAndPorts, 5)
# create a collection
def new_collection(cluster, name, rf=1, shards=1, conf='_default', existingConfName=None):
"""
Create a new collection in the specified cluster.
This command assumes the configuration given by the conf parameter has already been uploaded
to ZooKeeper.
Arg Usage:
cluster: Identifies the SolrCloud cluster you want to create the collection on.
name: Name of the collection to create.
rf (int, optional): Replication factor for the collection (number of replicas per shard)
shards (int, optional): Number of shards to distribute this collection across
Returns:
collection stats
"""
hosts = _lookup_hosts(cluster)
numNodes = _num_solr_nodes_per_host(cluster)
remoteSolrDir = _env(cluster, 'solr_tip')
with settings(host_string=hosts[0]), hide('output', 'running', 'warnings'):
confParam = '-d '+conf
if existingConfName is not None:
confParam = '-n '+existingConfName
run(remoteSolrDir+'/bin/solr create -c %s -rf %s -shards %s %s' % (name, str(rf), str(shards), confParam))
cluster_status(cluster,name)
def delete_collection(cluster, name):
"""
Delete a collection from the specified cluster.
Arg Usage:
cluster: Identifies the SolrCloud cluster you want to delete the collection from.
name: Name of the collection to delete.
"""
hosts = _lookup_hosts(cluster, False)
deleteAction = 'http://%s:8984/solr/admin/collections?action=DELETE&name=%s' % (hosts[0], name)
_info('Delete the collection named %s using:\n%s' % (name, deleteAction))
try:
response = urllib2.urlopen(deleteAction)
solr_resp = response.read()
_info('Delete collection succeeded\n' + solr_resp)
except urllib2.HTTPError as e:
_error('Delete collection named %s failed due to: %s' % (name, str(e)) + '\n' + e.read())
def cluster_status(cluster, collection=None, shard=None):
"""
Retrieve status for the specified cluster.
Arg Usage:
cluster: Identifies the SolrCloud cluster you want to get status for.
collection (optional): restricts status info to this collection.
shard (optional, comma-separated list): restricts status info to this shard/set of shards.
"""
hosts = _lookup_hosts(cluster, False)
params = ''
if collection is not None or shard is not None:
params = '?'
if collection is not None:
params += collection
if shard is not None: params += '&'
if shard is not None: params += shard
listAction = 'http://%s:8984/solr/admin/collections?action=CLUSTERSTATUS%s' % (hosts[0], params)
_info('Retrieving cluster status using:\n%s' % listAction)
try:
response = urllib2.urlopen(listAction)
solr_resp = response.read()
_info(solr_resp)
except urllib2.HTTPError as e:
_error('Cluster status retrieval failed due to: %s' % str(e) + '\n' + e.read())
def new_zk_ensemble(cluster, n=3, instance_type='m3.medium', az=None, placement_group=None, customTags=None):
"""
Configures, starts, and checks the health of a ZooKeeper ensemble on one or more nodes in a cluster.
Arg Usage:
cluster (str): Cluster ID used to identify the ZooKeeper ensemble created by this command.
n (int, optional): Size of the cluster.
instance_type (str, optional):
Returns:
zkHosts: List of ZooKeeper hosts for the ensemble.
"""
paramReport = '''
*****
Launching new ZooKeeper ensemble with the following parameters:
cluster: %s
numInstances: %d
instance_type: %s
*****
''' % (cluster, int(n), instance_type)
_info(paramReport)
hosts = new_ec2_instances(cluster=cluster, n=n, instance_type=instance_type, az=az, placement_group=placement_group, customTags=customTags)
zkHosts = _zk_ensemble(cluster, hosts)
_info('Successfully launched new ZooKeeper ensemble')
return zkHosts
def setup_zk_ensemble(cluster):
"""
Configures, starts, and checks the health of a ZooKeeper ensemble in an existing cluster.
"""
cloud = _provider_api(cluster)
hosts = _cluster_hosts(cloud, cluster)
_verify_ssh_connectivity(hosts)
zkHosts = _zk_ensemble(cluster, hosts)
_info('Successfully launched new ZooKeeper ensemble')
return zkHosts
def kill(cluster):
"""
Terminate all running nodes of the specified cluster.
"""
cloud = _provider_api(cluster)
taggedInstances = _find_instances_in_cluster(cloud, cluster)
instance_ids = taggedInstances.keys()
if confirm('Found %d instances to terminate, continue? ' % len(instance_ids)):
cloud.terminate_instances(instance_ids)
cloud.close()
# update the local config to remove this cluster
sstk_cfg = _get_config()
sstk_cfg['clusters'].pop(cluster, None)
_save_config()
# pretty much just chains a bunch of commands together to create a new solr cloud cluster ondemand
def new_solrcloud(cluster, n=1, zk=None, zkn=1, nodesPerHost=1, instance_type=None, ami=None, az=None, placement_group=None, yjp_path=None, auto_confirm=False, solrJavaMemOpts=None, purpose=None, project=None, customTags='{"CostCenter":"eng"}',rootEbsVolSize=None):
"""
Provisions n EC2 instances and then deploys SolrCloud; uses the new_ec2_instances and setup_solrcloud
commands internally to execute this command.
"""
if zk is None:
zkHost = '*NEW* %d instances' % int(zkn)
else:
cloud = _provider_api(cluster)
zkHosts = _get_zk_hosts(cloud, zk)
zkHost = zk + ': ' + (','.join(zkHosts))
cloud.close()
if az is None:
az = _env(cluster, 'AWS_AZ')
if ami is None:
ami = _env(cluster, 'AWS_HVM_AMI_ID')
paramReport = '''
*****
Launching new SolrCloud cluster with the following parameters:
cluster: %s
zkHost: %s
instance_type: %s
numInstances: %d
Solr nodesPerHost: %d
ami: %s
az: %s
placement_group: %s
*****
''' % (cluster, zkHost, instance_type, int(n), int(nodesPerHost), ami, az, placement_group)
_info(paramReport)
| |
both models, check if there was a change insight the nodes
#for a deep comparison, serialize them
newNodeSerialized = json.dumps(newModel[newNodeId],sort_keys=True)
oldNodeSerialized = json.dumps(oldModel[newNodeId],sort_keys=True)
if newNodeSerialized != oldNodeSerialized:
#something is different, so return that node
diff["modifiedNodes"][newNodeId]=copy.deepcopy(newModel[newNodeId])
#now check for deleted once, these appear in the old but not in the new
diff["deletedNodeIds"]=list(set(oldModel.keys())-set(newModel.keys()))
diff["handle"]=newHandle
return diff
def publish_status_msg(self, event):
"""
send out an event e.g. for status information
event to send looks like
event = { "id": 1123,
"event": "system.status"
"data:"{"nodeId":xx, "value":..,"function":... ...}
}
Args
event [string or dict]
"""
self.logger.debug(f"publish_status_msg ({event})")
self.modelUpdateCounter += 1
if type(event) is str:
#make sure the formatting is json compatible
event = event.replace("'",'"')# ' => "
event={"event":"system.status","data":{"text":event}}
event["id"]=self.modelUpdateCounter
for observerObject in self.observers:
observerObject.update(event)
def publish_event(self,event,desc=None,info=None):
"""
send an event out
Args
event [str] the event string
desc the node descriptor
info [dict] aditional field to send out
"""
self.logger.debug(f"publish_event {event} : {desc}, {info}")
data = {}
if desc:
id = self.get_id(desc)
if not id: return
data["id"] = id
data["browsePath"] = self.get_browse_path(id)
data.update(info)
event = {"event":event,"data":data}
for observerObject in self.observers:
observerObject.update(event)
def disable_observers(self):
self.lock_model()
#with self.lock:
self.disableObserverCounter += 1
#self.logger.debug(f"disable_observers() {self.disableObserverCounter}")
def enable_observers(self):
self.release_model()
if self.disableObserverCounter >0:
self.disableObserverCounter -=1
else:
self.logger.error("enable_observers without disable observers")
#self.logger.debug(f"enable_observers() {self.disableObserverCounter}")
def notify_observers(self, nodeIds, properties, eventInfo={}):
"""
public wrapper for __notify observser, only expert use!
"""
#self.logger.info(f"notify observses(), {str_lim(nodeIds,50)}, {properties}")
return self.__notify_observers(nodeIds,properties,eventInfo)
def get_referencers(self,descList,deepLevel = 0):
"""
get the references to this node via backtraversing the leaves algorithm
we look for parents through deepLevel levels and from there on we look back for referencers
deepLevel is the the level of extra parent level: 1 means the one more level, two means two extra level
Returns:
a list of referencers ids that point to the given descList nodes
"""
#convert all to nodes to ids
if type(descList) is not list:
descList = [descList]
startList = set([self.__get_id(node) for node in descList])
startList =set([node for node in startList if node]) #remove None and duplicates
referencers = set() #we collect the parents here and avoid duplicates
#in this first iteration we take the referencers pointing directly to the nodes or their parents
workList = startList.copy()
for level in range(deepLevel+1):
#from this level we take the backrefs
for id in workList:
referencers.update(self.model[id]["backRefs"])
#prepare parents for next round
parents=set()
for id in workList:
myParent=self.model[id]["parent"]
if myParent not in ["0","1"]: #root
parents.update([myParent]) #!use list to avoid break into chars
#now take the parents as currentList
workList = parents.copy()
if workList ==[]:
break #avoid turning cycles for nothing
#second step:
# now we take all final referencers and all referencers to those referencers with no limit
# (go back the leaves algorithm)
collectedReferencers = referencers.copy() # we take all we have so far
while True:
workList=set()
for id in referencers:
workList.update(self.model[id]["backRefs"])
collectedReferencers.update(workList)
if not workList:
break
else:
#one more round
referencers = workList.copy()
return list(collectedReferencers)
def __notify_observers(self, nodeIds, properties, eventInfo={} ):
"""
this function is called internally when nodes or properties have changed. Then, we look if any
observer has to be triggered
we also increase the counter and time on the root.observers.modelObserver
Args:
nodeId: the nodeIds where a change occurred
properties: the property or list of properties of the node that has changed
"""
#exception for the progress node
if type(properties) is not list:
properties = [properties]
if type(nodeIds) is not list:
nodeIds = [nodeIds]
if self.disableObserverCounter>0:
#only one exception: progress works always
mustReturn = True
with self.lock:
for nodeId in nodeIds:
if self.model[nodeId]["name"] == "progress":
mustReturn = False
break
if mustReturn:
#self.logger.info(f"__notify_observers disable return {nodeIds} {properties}")
return
with self.lock:
# this is for the tree updates, any change is taken
self.modelUpdateCounter = self.modelUpdateCounter + 1 #this is used by the diff update function and model copies
collectedEvents=[]
enableTree = self.get_node("root.system.enableTreeUpdateEvents")
if enableTree and enableTree.get_value()==False:
pass
else:
# Notify all observers about the tree update, this is a standard
event = {
"id": self.modelUpdateCounter,
"event": "tree.update",
"data": ""}
collectedEvents.append(event) # send later
names =[self.model[id]["name"] for id in nodeIds]
self.logger.debug(f"__notify_observers {len(nodeIds)} ids:{str_lim(names,100)}: {properties}")
triggeredObservers=[] # we use this to suppress multiple triggers of the same observer, the list holds the observerIds to be triggered
#p=utils.Profiling("__notify.iterate_nodes")
referencers = self.get_referencers(nodeIds,deepLevel=5)#deeplevel 5: nodes can be organized by the user in hierachy
nodeId = self.__get_id(nodeIds[0])#take the first for the event string,
#p.lap(f"get refs for {nodeId}")
self.logger.debug(f"__notify on {len(referencers)} referencers: {str_lim([self.get_browse_path(id) for id in referencers],200)}")
for id in referencers:
if self.model[id]["name"] == "targets" and self.model[self.model[id]["parent"]]["type"] == "observer":
# this referencers is an observer,
observerId = self.model[id]["parent"]
observer = self.get_children_dict(observerId)
# check if trigger
if observer["enabled"]["value"] == True:
#self.logger.debug(f"{self.model[nodeId]['name']} is targeted by observer {self.get_browse_path(observerId)}")
if observerId in triggeredObservers:
self.logger.debug(f"we have triggered the observer {self.get_browse_path(observerId)} in this call already, pass")
continue
#self.logger.debug(f"check properties to triggered the observer {self.get_browse_path(observerId)}")
#check if any of the observed properties matches
propertyMatch = False
for property in properties:
if property in observer["properties"]["value"]:
propertyMatch=True
break
if not propertyMatch:
#self.logger.debug(f"observer trigger on {self.get_browse_path(observerId)} no property match ")
pass
else:
self.logger.debug(f"observer trigger on {self.get_browse_path(observerId)} for change in {property}")
self.model[observer["triggerCounter"]["id"]]["value"] = self.model[observer["triggerCounter"]["id"]]["value"]+1
self.model[observer["lastTriggerTime"]["id"]]["value"] = datetime.datetime.now().isoformat()
for funcNodeId in self.get_leaves_ids(observer["onTriggerFunction"]["id"]):
self.logger.debug(f"execute ontrigger function {funcNodeId}")
self.execute_function(funcNodeId)
if "triggerSourceId" in observer:
self.model[observer["triggerSourceId"]["id"]]["value"] = nodeId
if observer["hasEvent"]["value"] == True:
#self.logger.debug(f"send event {observer['eventString']['value']}")
#also send the real event
#self.modelUpdateCounter = self.modelUpdateCounter+1
event = {
"id": self.modelUpdateCounter,
"event": observer["eventString"]["value"],
"data": {"nodeId":observerId,"sourceId":nodeId,"sourcePath":self.get_browse_path(nodeId)}}
#we directly put some changed properties in the event
if self.model[nodeId]["type"] not in ["column","file","timeseries"]:
event["data"]["value"]=self.model[nodeId]["value"]
for prop in properties:
if prop in ["children","forwardRefs"]:
event["data"][prop]=self.model[nodeId][prop]
#some special handling
try:
if event["event"] == "system.progress":
progressNode = self.get_node(self.get_leaves_ids("root.system.progress.targets")[0])
event["data"]["value"] = progressNode.get_value()
event["data"]["function"] = progressNode.get_parent().get_parent().get_browse_path()
else:
eventNode = self.get_node(observerId)
extraInfoNode = eventNode.get_child("eventData")
if extraInfoNode:
extraInfo = extraInfoNode.get_value()
if type(extraInfo) is not dict:
extraInfo={"info":extraInfo}
event["data"].update(extraInfo)
if eventInfo:
event["data"]["_eventInfo"]=eventInfo #put this only if we have info
except Exception as ex:
self.logger.error(f"error getting extra info for event {ex}, {sys.exc_info()[0]}")
#for all other events, take the event data if there is one (as json)
self.logger.debug(f"generate event {event}")
collectedEvents.append(event)
triggeredObservers.append(observerId)# next time, we don't trigger
#p.lap("complete backrefs {nodeId}, {backrefs}")
#self.logger.debug(p)
#self.logger.debug("now send the events")
#event = copy.deepcopy(event)
for event in collectedEvents:
for observerObject in self.observers:
observerObject.update(event)
self.logger.debug(f"done sending {len(collectedEvents)} events")
def create_observer(self):
# Instantiate a new observer
observer = Observer(self)
# attach it to the model
self.attach_observer(observer)
# return the observer
return observer
def attach_observer(self, observer):
# Add a new observer
self.logger.debug(f"Adding new observer: {id(observer)}")
with self.lock:
self.observers.append(observer)
def detach_observer(self, observer):
with self.lock:
try:
self.observers.remove(observer)
self.logger.debug(f"Removing observer: {id(observer)}")
except ValueError:
self.logger.exception("Trying to remove an observer which doesn't exist in the list of observers.")
def set_column_len(self,nodeDescriptor,newLen):
"""
adjust the len of a colum, extension are inf-padded,
Args: nodeDescriptor: the node
newLen (int) the new lenth of the column
Returns:
the new value set or none if problem
"""
with self.lock:
id = self.get_id(nodeDescriptor)
if not id: return None
if self.model[id]["type"] != "column":
self.logger.error("set_column_len: not a column")
return None
#now make the adjustments
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]['value'] = numpy.full(newLen, numpy.nan)
else:
#is already an array
if len(self.model[id]['value']) == newLen:
#nothing to do
pass
if len(self.model[id]['value']) > newLen:
self.model[id]['value'] = self.model[id]['value'][0:newLen]
elif len(self.model[id]['value']) < newLen:
self.model[id]['value'] = numpy.append(self.model[id]['value'], numpy.full(dataLen-len(self.model[id]['value']), numpy.nan))
else:
#same len
pass
return newLen
def get_upload_folder_files(self, matchFilter=None, blackList = []):
"""
Args:
fileNameMatch: a string that must be contained in the files to deliver
blackList: a list of filenames which should not be delivered
Returns list of files with absolute file names, list of files with fileNames
"""
full_path = os.path.realpath(__file__) # returns a string representing the canonical path, argument file is a file system path
path, filename = os.path.split(full_path)
folder = path+r'\upload'
absFileNames = []
foundFileNames = []
#now iterate the uploaded files
fileNames = os.listdir(folder)
for idx,fileName in enumerate(fileNames):
if matchFilter:
if matchFilter not in fileName:
continue # this file will be ignored
if fileName in blackList:
continue
foundFileNames.append(fileName)
absFileNames = [folder+"\\"+fileName for fileName in foundFileNames]
return foundFileNames,absFileNames
def update(self):
"""
update all known widgets to the | |
raise:
- RemoteError if Etherscan is used and there is a problem querying it or
parsing its response
- InputError if the given name is not a valid ENS name
"""
try:
normal_name = normalize_name(name)
except InvalidName as e:
raise InputError(str(e)) from e
resolver_addr = self._call_contract(
web3=web3,
contract_address=ENS_MAINNET_ADDR,
abi=ENS_ABI,
method_name='resolver',
arguments=[normal_name_to_hash(normal_name)],
)
if is_none_or_zero_address(resolver_addr):
return None
ens_resolver_abi = ENS_RESOLVER_ABI.copy()
arguments = [normal_name_to_hash(normal_name)]
if blockchain != SupportedBlockchain.ETHEREUM:
ens_resolver_abi.extend(ENS_RESOLVER_ABI_MULTICHAIN_ADDRESS)
arguments.append(blockchain.ens_coin_type())
try:
deserialized_resolver_addr = deserialize_ethereum_address(resolver_addr)
except DeserializationError:
log.error(
f'Error deserializing address {resolver_addr} while doing'
f'ens lookup',
)
return None
address = self._call_contract(
web3=web3,
contract_address=deserialized_resolver_addr,
abi=ens_resolver_abi,
method_name='addr',
arguments=arguments,
)
if is_none_or_zero_address(address):
return None
if blockchain != SupportedBlockchain.ETHEREUM:
return HexStr(address.hex())
try:
return deserialize_ethereum_address(address)
except DeserializationError:
log.error(f'Error deserializing address {address}')
return None
def _call_contract_etherscan(
self,
contract_address: ChecksumEthAddress,
abi: List,
method_name: str,
arguments: Optional[List[Any]] = None,
) -> Any:
"""Performs an eth_call to an ethereum contract via etherscan
May raise:
- RemoteError if there is a problem with
reaching etherscan or with the returned result
"""
web3 = Web3()
contract = web3.eth.contract(address=contract_address, abi=abi)
input_data = contract.encodeABI(method_name, args=arguments if arguments else [])
result = self.etherscan.eth_call(
to_address=contract_address,
input_data=input_data,
)
if result == '0x':
raise BlockchainQueryError(
f'Error doing call on contract {contract_address} for {method_name} '
f'with arguments: {str(arguments)} via etherscan. Returned 0x result',
)
fn_abi = contract._find_matching_fn_abi(
fn_identifier=method_name,
args=arguments,
)
output_types = get_abi_output_types(fn_abi)
output_data = web3.codec.decode_abi(output_types, bytes.fromhex(result[2:]))
if len(output_data) == 1:
# due to https://github.com/PyCQA/pylint/issues/4114
return output_data[0] # pylint: disable=unsubscriptable-object
return output_data
def _get_transaction_receipt(
self,
web3: Optional[Web3],
tx_hash: EVMTxHash,
) -> Dict[str, Any]:
if web3 is None:
tx_receipt = self.etherscan.get_transaction_receipt(tx_hash)
try:
# Turn hex numbers to int
block_number = int(tx_receipt['blockNumber'], 16)
tx_receipt['blockNumber'] = block_number
tx_receipt['cumulativeGasUsed'] = int(tx_receipt['cumulativeGasUsed'], 16)
tx_receipt['gasUsed'] = int(tx_receipt['gasUsed'], 16)
tx_receipt['status'] = int(tx_receipt.get('status', '0x1'), 16)
tx_index = int(tx_receipt['transactionIndex'], 16)
tx_receipt['transactionIndex'] = tx_index
for receipt_log in tx_receipt['logs']:
receipt_log['blockNumber'] = block_number
receipt_log['logIndex'] = deserialize_int_from_hex(
symbol=receipt_log['logIndex'],
location='etherscan tx receipt',
)
receipt_log['transactionIndex'] = tx_index
except (DeserializationError, ValueError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key {msg}'
log.error(
f'Couldnt deserialize transaction receipt {tx_receipt} data from '
f'etherscan due to {msg}',
)
raise RemoteError(
f'Couldnt deserialize transaction receipt data from etherscan '
f'due to {msg}. Check logs for details',
) from e
return tx_receipt
# Can raise TransactionNotFound if the user's node is pruned and transaction is old
tx_receipt = web3.eth.get_transaction_receipt(tx_hash) # type: ignore
return process_result(tx_receipt)
def get_transaction_receipt(
self,
tx_hash: EVMTxHash,
call_order: Optional[Sequence[NodeName]] = None,
) -> Dict[str, Any]:
return self.query(
method=self._get_transaction_receipt,
call_order=call_order if call_order is not None else self.default_call_order(),
tx_hash=tx_hash,
)
def _get_transaction_by_hash(
self,
web3: Optional[Web3],
tx_hash: EVMTxHash,
) -> EthereumTransaction:
if web3 is None:
tx_data = self.etherscan.get_transaction_by_hash(tx_hash=tx_hash)
else:
tx_data = web3.eth.get_transaction(tx_hash) # type: ignore
try:
transaction = deserialize_ethereum_transaction(data=tx_data, internal=False, ethereum=self) # noqa: E501
except (DeserializationError, ValueError) as e:
raise RemoteError(
f'Couldnt deserialize ethereum transaction data from {tx_data}. Error: {str(e)}',
) from e
return transaction
def get_transaction_by_hash(
self,
tx_hash: EVMTxHash,
call_order: Optional[Sequence[NodeName]] = None,
) -> EthereumTransaction:
return self.query(
method=self._get_transaction_by_hash,
call_order=call_order if call_order is not None else self.default_call_order(),
tx_hash=tx_hash,
)
def call_contract(
self,
contract_address: ChecksumEthAddress,
abi: List,
method_name: str,
arguments: Optional[List[Any]] = None,
call_order: Optional[Sequence[NodeName]] = None,
block_identifier: BlockIdentifier = 'latest',
) -> Any:
return self.query(
method=self._call_contract,
call_order=call_order if call_order is not None else self.default_call_order(),
contract_address=contract_address,
abi=abi,
method_name=method_name,
arguments=arguments,
block_identifier=block_identifier,
)
def _call_contract(
self,
web3: Optional[Web3],
contract_address: ChecksumEthAddress,
abi: List,
method_name: str,
arguments: Optional[List[Any]] = None,
block_identifier: BlockIdentifier = 'latest',
) -> Any:
"""Performs an eth_call to an ethereum contract
May raise:
- RemoteError if etherscan is used and there is a problem with
reaching it or with the returned result
- BlockchainQueryError if web3 is used and there is a VM execution error
"""
if web3 is None:
return self._call_contract_etherscan(
contract_address=contract_address,
abi=abi,
method_name=method_name,
arguments=arguments,
)
contract = web3.eth.contract(address=contract_address, abi=abi)
try:
method = getattr(contract.caller(block_identifier=block_identifier), method_name)
result = method(*arguments if arguments else [])
except (ValueError, BadFunctionCallOutput) as e:
raise BlockchainQueryError(
f'Error doing call on contract {contract_address}: {str(e)}',
) from e
return result
def get_logs(
self,
contract_address: ChecksumEthAddress,
abi: List,
event_name: str,
argument_filters: Dict[str, Any],
from_block: int,
to_block: Union[int, Literal['latest']] = 'latest',
call_order: Optional[Sequence[NodeName]] = None,
) -> List[Dict[str, Any]]:
if call_order is None: # Default call order for logs
call_order = (NodeName.OWN, NodeName.ETHERSCAN)
return self.query(
method=self._get_logs,
call_order=call_order,
contract_address=contract_address,
abi=abi,
event_name=event_name,
argument_filters=argument_filters,
from_block=from_block,
to_block=to_block,
)
def _get_logs(
self,
web3: Optional[Web3],
contract_address: ChecksumEthAddress,
abi: List,
event_name: str,
argument_filters: Dict[str, Any],
from_block: int,
to_block: Union[int, Literal['latest']] = 'latest',
) -> List[Dict[str, Any]]:
"""Queries logs of an ethereum contract
May raise:
- RemoteError if etherscan is used and there is a problem with
reaching it or with the returned result
"""
event_abi = find_matching_event_abi(abi=abi, event_name=event_name)
_, filter_args = construct_event_filter_params(
event_abi=event_abi,
abi_codec=Web3().codec,
contract_address=contract_address,
argument_filters=argument_filters,
fromBlock=from_block,
toBlock=to_block,
)
if event_abi['anonymous']:
# web3.py does not handle the anonymous events correctly and adds the first topic
filter_args['topics'] = filter_args['topics'][1:]
events: List[Dict[str, Any]] = []
start_block = from_block
if web3 is not None:
events = _query_web3_get_logs(
web3=web3,
filter_args=filter_args,
from_block=from_block,
to_block=to_block,
contract_address=contract_address,
event_name=event_name,
argument_filters=argument_filters,
)
else: # etherscan
until_block = (
self.etherscan.get_latest_block_number() if to_block == 'latest' else to_block
)
blocks_step = 300000
while start_block <= until_block:
while True: # loop to continuously reduce block range if need b
end_block = min(start_block + blocks_step, until_block)
try:
new_events = self.etherscan.get_logs(
contract_address=contract_address,
topics=filter_args['topics'], # type: ignore
from_block=start_block,
to_block=end_block,
)
except RemoteError as e:
if 'Please select a smaller result dataset' in str(e):
blocks_step = blocks_step // 2
if blocks_step < 100:
raise # stop trying
# else try with the smaller step
continue
# else some other error
raise
break # we must have a result
# Turn all Hex ints to ints
for e_idx, event in enumerate(new_events):
try:
block_number = deserialize_int_from_hex(
symbol=event['blockNumber'],
location='etherscan log query',
)
log_index = deserialize_int_from_hex(
symbol=event['logIndex'],
location='etherscan log query',
)
# Try to see if the event is a duplicate that got returned
# in the previous iteration
for previous_event in reversed(events):
if previous_event['blockNumber'] < block_number:
break
same_event = (
previous_event['logIndex'] == log_index and
previous_event['transactionHash'] == event['transactionHash']
)
if same_event:
events.pop()
new_events[e_idx]['address'] = deserialize_ethereum_address(
event['address'],
)
new_events[e_idx]['blockNumber'] = block_number
new_events[e_idx]['timeStamp'] = deserialize_int_from_hex(
symbol=event['timeStamp'],
location='etherscan log query',
)
new_events[e_idx]['gasPrice'] = deserialize_int_from_hex(
symbol=event['gasPrice'],
location='etherscan log query',
)
new_events[e_idx]['gasUsed'] = deserialize_int_from_hex(
symbol=event['gasUsed'],
location='etherscan log query',
)
new_events[e_idx]['logIndex'] = log_index
new_events[e_idx]['transactionIndex'] = deserialize_int_from_hex(
symbol=event['transactionIndex'],
location='etherscan log query',
)
except DeserializationError as e:
raise RemoteError(
'Couldnt decode an etherscan event due to {str(e)}}',
) from e
# etherscan will only return 1000 events in one go. If more than 1000
# are returned such as when no filter args are provided then continue
# the query from the last block
if len(new_events) == 1000:
start_block = new_events[-1]['blockNumber']
else:
start_block = end_block + 1
events.extend(new_events)
return events
def get_event_timestamp(self, event: Dict[str, Any]) -> Timestamp:
"""Reads an event returned either by etherscan or web3 and gets its timestamp
Etherscan events contain a timestamp. Normal web3 events don't so it needs to
be queried from the block number
WE could also add this to the get_logs() call but would add unnecessary
rpc calls for get_block_by_number() for each log entry. Better have it
lazy queried like this.
TODO: Perhaps better approach would be a log event class for this
"""
if 'timeStamp' in event:
# event from etherscan
return Timestamp(event['timeStamp'])
# event from web3
block_number = event['blockNumber']
block_data = self.get_block_by_number(block_number)
return Timestamp(block_data['timestamp'])
def _get_blocknumber_by_time_from_subgraph(self, ts: Timestamp) -> int:
"""Queries Ethereum Blocks Subgraph for closest block at or before given timestamp"""
response = self.blocks_subgraph.query(
f"""
{{
blocks(
first: 1, orderBy: timestamp, orderDirection: desc,
where: {{timestamp_lte: "{ts}"}}
) {{
id
number
timestamp
}}
}}
""",
)
try:
result = int(response['blocks'][0]['number'])
except (IndexError, KeyError) as e:
raise RemoteError(
f'Got unexpected ethereum blocks subgraph response: {response}',
) from e
else:
return result
def get_blocknumber_by_time(self, ts: Timestamp, etherscan: bool = True) -> int:
"""Searches for the blocknumber of a specific timestamp
- Performs the etherscan api call by default first
- If RemoteError raised or etherscan flag set to false
-> | |
<reponame>shidarin/RipMaster
#/usr/bin/python
# Ripmaster.tools
# Module containing tools used by Ripmaster
# By <NAME>, 2013/11/10
"""
Description
-----------
This module contains all the classes that Ripmaster uses to represent the files
and processes on disk.
Classes
-------
AudioTrack
Represents a single audio track within a <Movie>. Each AudioTrack in the mkv
gets an AudioTrack object, not just ones Handbrake can't handle.
Config
The Config object reads the Ripmaster.ini file for all the user set
configuration options.
Movie
Represents a single mkv file, contains <AudioTrack>s and <SubtitleTracks>s.
Calls all the extraction and conversion methods of it's children.
SubtitleTrack
Represents a single subtitle track within a <Movie>. Each subtitle track in
the mkv gets a SubtitleTrack object, not just the ones Handbrake can't
handle. Since one subtitle track can contain both forced and non-forced
subtitles, it's possible that a single subtitle track gets split into
two subtitle tracks- one forced and the other containing every subtitle
(both forced and not forced).
Functions
---------
bdSup2Sub()
CLI command builder for converting subtitle tracks with BDSup2Sub. For all
intents and purposes, this is the BDSup2Sub application.
handbrake()
CLI command builder for converting video and audio with Handbrake. For all
intents and purposes, this is the Handbrake application.
mkvExtract()
CLI command builder for extracting tracks with mkvextract. For all intents
and purposes, this is the mkvextract application.
mkvInfo()
Uses mkvmerge to fetch names, filetypes and trackIDs for all audio, video
and subtitle tracks from a given mkv.
mkvMerge()
Merges a converted movie, converted subtitles and any extracted audio tracks
"""
#===============================================================================
# IMPORTS
#===============================================================================
# Standard Imports
from ast import literal_eval
import ConfigParser
import os
from subprocess import Popen, PIPE
#===============================================================================
# GLOBALS
#===============================================================================
# Conversion Dictionaries
BFRAMES = {
'ultrafast': 0,
'superfast': 3,
'veryfast': 3,
'faster': 3,
'fast': 3,
'medium': 3,
'slow': 3,
'slower': 3,
'veryslow': 8,
'placebo': 16
}
# Possible Config Choices
AUDIO_FALLBACK_DEFAULT = 'ffac3'
AUDIO_FALLBACKS = [
'faac',
'ffaac',
'ffac3',
'lame',
'vorbis',
'ffflac',
]
LANGUAGE_DEFAULT = 'English'
LANGUAGES = ['English']
SORTING_DEFAULT = 'alphabetical'
SORTINGS = ['alphabetical', 'quality', 'resolution']
SORTING_REVERSE_DEFAULT = True
RESOLUTION_DEFAULT = 1080
RESOLUTIONS = [1080, 720, 480]
RESOLUTION_WIDTH = {1080: 1920, 720: 1280, 480: 720}
QUALITY_DEFAULT = 20
QUALITIES = ['uq', 'hq', 'bq']
X264_SPEED_DEFAULT = 'slow'
X264_SPEEDS = BFRAMES.keys()
# Handbrake Settings
H264_PRESETS = [
'animation',
'film',
'grain',
'psnr',
'ssim',
'fastdecode',
'zerolatency'
]
FPS_PRESETS = ['30p', '25p', '24p']
EXTRACTABLE_AUDIO = ['pcm', 'truehd']
EXTRACTABLE_SUBTITLE = ['pgs']
# Generic
SAMPLE_CONFIG = """[Programs]
BDSupToSub: C://Program Files (x86)/MKVToolNix/BDSup2Sub.jar
HandbrakeCLI: C://Program Files/Handbrake/HandBrakeCLI.exe
Java: C://Program Files (x86)/Java/jre7/bin/java
mkvExtract: C://Program Files (x86)/MKVToolNix/mkvextract.exe
mkvMerge: C://Program Files (x86)/MKVToolNix/mkvmerge.exe
[Handbrake Settings]
animation_BFrames: 8
audio_Fallback: ffac3
language: English
sorting: alphabetical
sorting_Reverse: no
x264_Speed: slow
[Base Encode Quality]
1080p: 20
720p: 20
480p: 20
[High Encode Quality]
1080p: 19
720p: 19
480p: 19
[Ultra Encode Quality]
1080p: 16
720p: 16
480p: 16"""
#===============================================================================
# PRIVATE FUNCTIONS
#===============================================================================
def _stripAndRemove(string, remove=None):
"""Strips whitespace and optional chars from both sides of the target string.
Args:
target : (str)
The string to strip leading and trailing whitespace from
remove=None : (str)
The string to remove from the string
Raises:
N/A
Returns:
(str)
The target string after being stripped on either end.
"""
stringStripped = string.lstrip().rstrip()
stringRemoved = stringStripped.replace(remove, '')
stringFinal = stringRemoved.lstrip().rstrip()
return stringFinal
def _trackInfo(line):
"""Takes a track line from mkvmerge -I and returns track information
Args:
line : (str)
A single line from mkvmerge -I's result.
Raises:
ValueError
Will be raised if line fed to _trackInfo is not a trackID line, or
known track type.
Returns:
(int), (str), (dict)
The track ID, the track type, and a dictionary
with additional information.
"""
# If we are for some reason fed a line without a TrackID, raise
if not line.startswith('Track ID'):
raise ValueError(line + ' is not a Track ID line.')
# trackID identifies which track this is of the original mkv.
trackID = int(line.split(':')[0].replace('Track ID ', ''))
# The track type is right after the trackID. We'll make sure we find the
# actual track type (and not part of a title or other text) by including
# the whitespace and punctuation that surrounds it.
if ': video (' in line:
trackType = 'video'
elif ': audio (' in line:
trackType = 'audio'
elif ': subtitles (' in line:
trackType = 'subtitles'
else:
raise ValueError(line + ' does not contain a known track type.')
# By splitting on the opening and removing the closing bracket, we'll
# be left with only the track dictionary, but it will be in string form.
trackDict = line.split('[')[-1].replace(']', '')
trackDict = trackDict.replace('\r\n', '')
# We need to add " marks around all entries, and comma seperate entries.
trackDict = trackDict.replace(' ', '", "')
trackDict = trackDict.replace(':', '": "')
trackDict = '{"' + trackDict + '"}'
trackDict = literal_eval(trackDict)
# Now we need to set some defaults. It's possible the track dictionary
# doesn't have these, and we'll be referencing them later.
trackDict.setdefault('default_track', '0')
trackDict.setdefault('forced_track', '0')
trackDict.setdefault('language', 'eng')
return trackID, trackType, trackDict
#===============================================================================
# CLASSES
#===============================================================================
class AudioTrack(object):
"""A single audio track.
Args:
movie : (<Movie>)
The parent <Movie> object that this <AudioTrack> is a child of.
trackID : (str)
The trackID of the audio track this object is to represent.
fileType : (str)
The filetype of this audiotrack.
"""
def __init__(self, movie, trackID, fileType, infoDict):
self.movie = movie
self.trackID = trackID
self.fileType = fileType
self.info = infoDict
self.extracted = False
self.extractedAudio = None
self.default = True if self.info['default_track'] == '1' else False
def extractTrack(self):
"""Extracts the audiotrack this object represents from the parent mkv"""
command = "{trackID}:".format(trackID=self.trackID)
# Derive the location to save the track to
fileName = self.movie.fileName.replace('.mkv', '')
fileName += "_Track{TrackID}_audio.{ext}".format(
TrackID=self.trackID,
ext=self.fileType
)
self.extractedAudio = os.path.join(
self.movie.root,
self.movie.subdir,
fileName
).replace('\\', '/')
print ""
print "Extracting trackID {ID} of type {type} from {file}".format(
ID=self.trackID,
type=self.fileType,
file=self.movie.path
)
print ""
mkvExtract(self.movie.path, command, self.extractedAudio)
self.extracted = True
class Config(object):
""" Class containing the basic encoding environment as described by the .ini
Args:
iniFile : (str)
Ripmaster's configuration file
Sample config file:
[Programs]
BDSupToSub: C://Program Files (x86)/MKVToolNix/BDSup2Sub.jar
HandbrakeCLI: C://Program Files/Handbrake/HandBrakeCLI.exe
Java: C://Program Files (x86)/Java/jre7/bin/java
mkvExtract: C://Program Files (x86)/MKVToolNix/mkvextract.exe
mkvMerge: C://Program Files (x86)/MKVToolNix/mkvmerge.exe
[Handbrake Settings]
animation_BFrames: 8
audio_Fallback: ffac3
language: English
sorting: alphabetical
sorting_Reverse: no
x264_Speed: slow
[Base Encode Quality]
1080p: 20
720p: 20
480p: 20
[High Encode Quality]
1080p: 19
720p: 19
480p: 19
[Ultra Encode Quality]
1080p: 16
720p: 16
480p: 16
Leading and trailing whitespaces are automatically removed, but all entries
are case sensitive.
"""
config = None
# Programs
handBrake = ''
java = ''
mkvExtract = ''
mkvMerge = ''
sup2Sub = ''
# Handbrake Settings
bFrames = None
audioFallback = AUDIO_FALLBACK_DEFAULT
language = LANGUAGE_DEFAULT
sorting = SORTING_DEFAULT
sortingReverse = SORTING_REVERSE_DEFAULT
x264Speed = X264_SPEED_DEFAULT
# Encode Qualities
quality = {'uq': {}, 'hq': {}, 'bq': {}}
def __init__(self, iniFile):
# This will either return True or raise an exception
if self.checkConfig(iniFile):
try:
self.getSettings(iniFile)
except (ConfigParser.NoOptionError,
ConfigParser.NoSectionError), ex:
# NoOptionError strings to:
# No option 'djkas' in section: 'Programs'
# NoSectionError strings to:
# No section: 'Programsss'
# Both are index 2
error = str(ex).split()[2].replace("'", '')
if ex.__class__ is ConfigParser.NoOptionError:
exception = 'option'
# We also want to add the section to option errors, so
# we'll pull it from the last index.
error += " from section: "
error += str(ex).split()[-1].replace("'", '')
elif ex.__class__ is ConfigParser.NoSectionError:
exception = 'section'
message = "Missing the ini {type}: {err}. Please fill the " \
"missing options and retry.".format(
type=exception,
err=error
)
raise ValueError(message)
def checkConfig(self, iniFile):
"""Checks that the iniFile provided actually exists. Creates if not.
Args:
iniFile : (str)
Config File location
Raises:
IOError
Raised if config file is missing.
Returns:
If IOError not raised, that means a config file was found and this
will return True
"""
if os.path.exists(iniFile):
return True
else:
with open(iniFile, "w") as f:
f.write(SAMPLE_CONFIG)
errorMsg = "\nPROCESS WILL FAIL\nYou were missing the .ini file. " \
"I had to create one for you. You'll find it in " \
"Ripmaster's folder, called Ripmaster.ini - You need " \
"to specify the path for the various applications\n"
raise IOError(errorMsg)
@classmethod
def getSettings(cls, iniFile):
"""Opens the ini file, splits the lines into a list, and grabs input"""
print "Reading config from:", iniFile
with open(iniFile, "r") as f:
cls.config = ConfigParser.ConfigParser()
cls.config.readfp(f)
cf = cls.config
# Grab all of our 'Programs' settings
cat = 'Programs'
cls.sup2Sub = cf.get(cat, 'BDSupToSub')
| |
= usersDB.addUser(message.author.id)
itemNum = argsSplit[1]
if not bbUtil.isInt(itemNum):
await message.channel.send(":x: Invalid item number!")
return
itemNum = int(itemNum)
if itemNum > len(requestedBBUser.getInactivesByName(item)):
await message.channel.send(":x: Invalid item number! You have " + str(len(requestedBBUser.getInactivesByName(item))) + " " + item + "s.")
return
if itemNum < 1:
await message.channel.send(":x: Invalid item number! Must be at least 1.")
return
clearItems = False
if len(argsSplit) == 3:
if argsSplit[2] == "clear":
if item != "ship":
await message.channel.send(":x: `clear` can only be used when selling a ship!")
return
clearItems = True
else:
await message.channel.send(":x: Invalid argument! Please only give an item type (ship/weapon/module/turret), an item number, and optionally `clear` when selling a ship.")
return
requestedShop = guildsDB.getGuild(message.guild.id).shop
if item == "ship":
requestedShip = requestedBBUser.inactiveShips[itemNum - 1]
if clearItems:
requestedBBUser.unequipAll(requestedShip)
requestedBBUser.credits += requestedShip.getValue()
requestedBBUser.inactiveShips.remove(requestedShip)
requestedShop.shipsStock.append(requestedShip)
outStr = ":moneybag: You sold your **" + requestedShip.getNameOrNick() + "** for **" + str(requestedShip.getValue()) + " credits**!"
if clearItems:
outStr += "\nItems removed from the ship can be found in the hangar."
await message.channel.send(outStr)
elif item == "weapon":
requestedWeapon = requestedBBUser.inactiveWeapons[itemNum - 1]
requestedBBUser.credits += requestedWeapon.value
requestedBBUser.inactiveWeapons.remove(requestedWeapon)
requestedShop.weaponsStock.append(requestedWeapon)
await message.channel.send(":moneybag: You sold your **" + requestedWeapon.name + "** for **" + str(requestedWeapon.value) + " credits**!")
elif item == "module":
requestedModule = requestedBBUser.inactiveModules[itemNum - 1]
requestedBBUser.credits += requestedModule.value
requestedBBUser.inactiveModules.remove(requestedModule)
requestedShop.modulesStock.append(requestedModule)
await message.channel.send(":moneybag: You sold your **" + requestedModule.name + "** for **" + str(requestedModule.value) + " credits**!")
elif item == "turret":
requestedTurret = requestedBBUser.inactiveTurrets[itemNum - 1]
requestedBBUser.credits += requestedTurret.value
requestedBBUser.inactiveTurrets.remove(requestedTurret)
requestedShop.turretsStock.append(requestedTurret)
await message.channel.send(":moneybag: You sold your **" + requestedTurret.name + "** for **" + str(requestedTurret.value) + " credits**!")
else:
raise NotImplementedError("Valid but unsupported item name: " + item)
bbCommands.register("sell", cmd_shop_sell)
dmCommands.register("sell", err_nodm)
"""
Equip the item of the given item type, at the given index, from the user's inactive items.
if "transfer" is specified, the new ship's items are cleared, and the old ship's items attempt to fill new ship.
"transfer" is only valid when equipping a ship.
@param message -- the discord message calling the command
@param args -- string containing an item type and an index number, and optionally "transfer", separated by a single space
"""
async def cmd_equip(message, args):
argsSplit = args.split(" ")
if len(argsSplit) < 2:
await message.channel.send(":x: Not enough arguments! Please provide both an item type (ship/weapon/module/turret) and an item number from `" + bbConfig.commandPrefix + "hangar`")
return
if len(argsSplit) > 3:
await message.channel.send(":x: Too many arguments! Please only give an item type (ship/weapon/module/turret), an item number, and optionally `transfer` when equipping a ship.")
return
item = argsSplit[0].rstrip("s")
if item == "all" or item not in bbConfig.validItemNames:
await message.channel.send(":x: Invalid item name! Please choose from: ship, weapon, module or turret.")
return
if usersDB.userIDExists(message.author.id):
requestedBBUser = usersDB.getUser(message.author.id)
else:
requestedBBUser = usersDB.addUser(message.author.id)
itemNum = argsSplit[1]
if not bbUtil.isInt(itemNum):
await message.channel.send(":x: Invalid item number!")
return
itemNum = int(itemNum)
if itemNum > len(requestedBBUser.getInactivesByName(item)):
await message.channel.send(":x: Invalid item number! You have " + str(len(requestedBBUser.getInactivesByName(item))) + " " + item + "s.")
return
if itemNum < 1:
await message.channel.send(":x: Invalid item number! Must be at least 1.")
return
transferItems = False
if len(argsSplit) == 3:
if argsSplit[2] == "transfer":
if item != "ship":
await message.channel.send(":x: `transfer` can only be used when equipping a ship!")
return
transferItems = True
else:
await message.channel.send(":x: Invalid argument! Please only give an item type (ship/weapon/module/turret), an item number, and optionally `transfer` when equipping a ship.")
return
if item == "ship":
requestedShip = requestedBBUser.inactiveShips[itemNum - 1]
activeShip = requestedBBUser.activeShip
if transferItems:
requestedBBUser.unequipAll(requestedShip)
requestedBBUser.activeShip.transferItemsTo(requestedShip)
requestedBBUser.unequipAll(activeShip)
requestedBBUser.equipShipObj(requestedShip)
outStr = ":rocket: You switched to the **" + requestedShip.getNameOrNick() + "**."
if transferItems:
outStr += "\nItems thay could not fit in your new ship can be found in the hangar."
await message.channel.send(outStr)
elif item == "weapon":
if not requestedBBUser.activeShip.canEquipMoreWeapons():
await message.channel.send(":x: Your active ship does not have any free weapon slots!")
return
requestedItem = requestedBBUser.inactiveWeapons[itemNum - 1]
requestedBBUser.activeShip.equipWeapon(requestedItem)
requestedBBUser.inactiveWeapons.pop(itemNum - 1)
await message.channel.send(":wrench: You equipped the **" + requestedItem.name + "**.")
elif item == "module":
if not requestedBBUser.activeShip.canEquipMoreModules():
await message.channel.send(":x: Your active ship does not have any free module slots!")
return
requestedItem = requestedBBUser.inactiveModules[itemNum - 1]
if not requestedBBUser.activeShip.canEquipModuleType(requestedItem.getType()):
await message.channel.send(":x: You already have the max of this type of module equipped!")
return
requestedBBUser.activeShip.equipModule(requestedItem)
requestedBBUser.inactiveModules.pop(itemNum - 1)
await message.channel.send(":wrench: You equipped the **" + requestedItem.name + "**.")
elif item == "turret":
if not requestedBBUser.activeShip.canEquipMoreTurrets():
await message.channel.send(":x: Your active ship does not have any free turret slots!")
return
requestedItem = requestedBBUser.inactiveTurrets[itemNum - 1]
requestedBBUser.activeShip.equipTurret(requestedItem)
requestedBBUser.inactiveTurrets.pop(itemNum - 1)
await message.channel.send(":wrench: You equipped the **" + requestedItem.name + "**.")
else:
raise NotImplementedError("Valid but unsupported item name: " + item)
bbCommands.register("equip", cmd_equip)
dmCommands.register("equip", cmd_equip)
"""
Unequip the item of the given item type, at the given index, from the user's active ship.
@param message -- the discord message calling the command
@param args -- string containing either "all", or (an item type and either an index number or "all", separated by a single space)
"""
async def cmd_unequip(message, args):
argsSplit = args.split(" ")
unequipAllItems = len(argsSplit) > 0 and argsSplit[0] == "all"
if not unequipAllItems and len(argsSplit) < 2:
await message.channel.send(":x: Not enough arguments! Please provide both an item type (all/weapon/module/turret) and an item number from `" + bbConfig.commandPrefix + "hangar` or `all`.")
return
if len(argsSplit) > 2:
await message.channel.send(":x: Too many arguments! Please only give an item type (all/weapon/module/turret), an item number or `all`.")
return
if usersDB.userIDExists(message.author.id):
requestedBBUser = usersDB.getUser(message.author.id)
else:
requestedBBUser = usersDB.addUser(message.author.id)
if unequipAllItems:
requestedBBUser.unequipAll(requestedBBUser.activeShip)
await message.channel.send(":wrench: You unequipped **all items** from your ship.")
return
item = argsSplit[0].rstrip("s")
if item not in bbConfig.validItemNames:
await message.channel.send(":x: Invalid item name! Please choose from: weapon, module or turret.")
return
if item == "ship":
await message.channel.send(":x: You can't go without a ship! Instead, switch to another one.")
return
unequipAll = argsSplit[1] == "all"
if not unequipAll:
itemNum = argsSplit[1]
if not bbUtil.isInt(itemNum):
await message.channel.send(":x: Invalid item number!")
return
itemNum = int(itemNum)
if itemNum > len(requestedBBUser.activeShip.getActivesByName(item)):
await message.channel.send(":x: Invalid item number! Your ship has " + str(len(requestedBBUser.activeShip.getActivesByName(item))) + " " + item + "s.")
return
if itemNum < 1:
await message.channel.send(":x: Invalid item number! Must be at least 1.")
return
if item == "weapon":
if not requestedBBUser.activeShip.hasWeaponsEquipped():
await message.channel.send(":x: Your active ship does not have any weapons equipped!")
return
if unequipAll:
for weapon in requestedBBUser.activeShip.weapons:
requestedBBUser.inactiveWeapons.append(weapon)
requestedBBUser.activeShip.unequipWeaponObj(weapon)
await message.channel.send(":wrench: You unequipped all **weapons**.")
else:
requestedItem = requestedBBUser.activeShip.weapons[itemNum - 1]
requestedBBUser.inactiveWeapons.append(requestedItem)
requestedBBUser.activeShip.unequipWeaponIndex(itemNum - 1)
await message.channel.send(":wrench: You unequipped the **" + requestedItem.name + "**.")
elif item == "module":
if not requestedBBUser.activeShip.hasModulesEquipped():
await message.channel.send(":x: Your active ship does not have any modules equipped!")
return
if unequipAll:
for module in requestedBBUser.activeShip.modules:
requestedBBUser.inactiveModules.append(module)
requestedBBUser.activeShip.unequipModuleObj(module)
await message.channel.send(":wrench: You unequipped all **modules**.")
else:
requestedItem = requestedBBUser.activeShip.modules[itemNum - 1]
requestedBBUser.inactiveModules.append(requestedItem)
requestedBBUser.activeShip.unequipModuleIndex(itemNum - 1)
await message.channel.send(":wrench: You unequipped the **" + requestedItem.name + "**.")
elif item == "turret":
if not requestedBBUser.activeShip.hasTurretsEquipped():
await message.channel.send(":x: Your active ship does not have any turrets equipped!")
return
if unequipAll:
for turret in requestedBBUser.activeShip.turrets:
requestedBBUser.inactiveTurrets.append(turret)
requestedBBUser.activeShip.unequipTurretObj(turret)
await message.channel.send(":wrench: You unequipped all **turrets**.")
else:
requestedItem = requestedBBUser.activeShip.turrets[itemNum - 1]
requestedBBUser.inactiveTurrets.append(requestedItem)
requestedBBUser.activeShip.unequipTurretIndex(itemNum - 1)
await message.channel.send(":wrench: You unequipped the **" + requestedItem.name + "**.")
else:
raise NotImplementedError("Valid but unsupported item name: " + item)
bbCommands.register("unequip", cmd_unequip)
dmCommands.register("unequip", cmd_unequip)
"""
Set the nickname of the active ship.
@param message -- the discord message calling the command
@param args -- string containing the new nickname.
"""
async def cmd_nameship(message, args):
if usersDB.userIDExists(message.author.id):
requestedBBUser = usersDB.getUser(message.author.id)
else:
requestedBBUser = usersDB.addUser(message.author.id)
if requestedBBUser.activeShip is None:
await message.channel.send(":x: You do not have a ship equipped!")
return
if args == "":
await message.channel.send(":x: Not enough arguments. Please give the new nickname!")
return
if len(args) > bbConfig.maxShipNickLength:
await message.channel.send(":x: Nicknames must be " + str(bbConfig.maxShipNickLength) + " characters or less!")
return
requestedBBUser.activeShip.changeNickname(args)
await message.channel.send(":pencil: You named your " + requestedBBUser.activeShip.name + ": **" + args + "**.")
bbCommands.register("nameship", cmd_nameship, forceKeepArgsCasing=True)
dmCommands.register("nameship", cmd_nameship, forceKeepArgsCasing=True)
"""
Remove the nickname of the active ship.
@param message -- the discord message calling the command
@param args -- ignored
"""
async def cmd_unnameship(message, args):
if usersDB.userIDExists(message.author.id):
requestedBBUser = usersDB.getUser(message.author.id)
else:
requestedBBUser = usersDB.addUser(message.author.id)
if |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.